ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b40822d35102c93ef2f99a64fda782fdf5977ddb | # coding: utf-8
"""
Quokka will try to read configurations from environment variables
so you dont need this local_settings.py file if you have env vars.
1. You can set as a file
export QUOKKA_SETTINGS='/path/to/settings.py'
2. You can set individual values
export QUOKKA_MONGODB_DB="quokka_db"
export QUOKKA_MONGODB_HOST='localhost'
export QUOKKA_MONGODB_PORT='$int 27017'
Or just fill your values in this file and rename it to 'local_settings.py'
"""
# MONGO
MONGODB_DB = "quokka_db"
MONGODB_HOST = 'localhost'
MONGODB_PORT = 27017
MONGODB_USERNAME = None
MONGODB_PASSWORD = None
# Debug and toolbar
DEBUG = True
DEBUG_TOOLBAR_ENABLED = False
# Logger
LOGGER_ENABLED = True
LOGGER_LEVEL = 'DEBUG'
LOGGER_FORMAT = '%(asctime)s %(name)-12s %(levelname)-8s %(message)s'
LOGGER_DATE_FORMAT = '%d.%m %H:%M:%S'
"""
If you want to have a new theme installed you can use quokkacms tool
$ pip install quokkacms
$ cd quokka
$ quokkacms install_theme material
The above commands will download material design theme to your themes folder,
then just enable it.
DEFAULT_THEME = 'material'
"""
|
py | b408233c78e44f99be1ad348eed2a956a9b495f1 | # Copyright (c) 2021 Food-X Technologies
#
# This file is part of foodx_devops_tools.
#
# You should have received a copy of the MIT License along with
# foodx_devops_tools. If not, see <https://opensource.org/licenses/MIT>.
from foodx_devops_tools.deploy_me_entry import flit_entry
class TestFlitEntry:
def test_clean(self, mocker):
mock_flow = mocker.patch("foodx_devops_tools.deploy_me_entry.deploy_me")
flit_entry()
mock_flow.assert_called_once_with()
|
py | b4082354e76ba1d02a251773a10914786ebffe50 | # -*- coding: UTF-8 -*-
import optparse
import bbt_searchfile
from common.logging import MyLogging
Usage = "main_analyze.py -d yyyy-mm-dd -p (path)"
Parser = optparse.OptionParser(usage=Usage)
mylog = MyLogging()
def usage():
# Parser.add_option("-d", "--date", dest="date", default="\d+", help=r'yyyy-mm-dd 可当做一个通配使用')
Parser.add_option("-y", "--year", dest="year", default="\d+", help=r'yyyy-mm-dd 可当做一个通配使用')
Parser.add_option("-m", "--month", dest="month", default="\d+", help=r'yyyy-mm-dd 可当做一个通配使用')
Parser.add_option("-d", "--day", dest="day", default="\d+", help=r'yyyy-mm-dd 可当做一个通配使用')
Parser.add_option("-l", "--log-path", dest="path", help=r"Log file dir.")
Parser.add_option("-a", "--app", dest="app", default="\d+", help=r"AppId 应用id.")
Parser.add_option("-p", "--platform", dest="platform", default="\d+", help=r"Log file dir.")
Parser.add_option("-t", "--channel-type", dest="channel_type", default="\d+", help=r"Log file dir.")
Parser.add_option("-c", "--channel", dest="channel", default="\d+", help=r"Log file dir.")
if __name__ == '__main__':
usage()
(option, args) = Parser.parse_args()
anal = bbt_searchfile.Analyze(option.year, option.month, option.day, option.path, option.platform, option.app,
option.channel_type, option.channel, mylog)
anal.run()
|
py | b4082610e5d57a9f5280477c4fba91ce33c9e023 | from ..base import ShopifyResource
from ..resources import Metafield
from six.moves import urllib
import base64
import re
class Image(ShopifyResource):
_prefix_source = "/admin/products/$product_id/"
@classmethod
def _prefix(cls, options={}):
product_id = options.get("product_id")
if product_id:
return "/admin/products/%s" % (product_id)
else:
return "/admin"
def __getattr__(self, name):
if name in ["pico", "icon", "thumb", "small", "compact", "medium", "large", "grande", "original"]:
return re.sub(r"/(.*)\.(\w{2,4})", r"/\1_%s.\2" % (name), self.src)
else:
return super(Image, self).__getattr__(name)
def attach_image(self, data, filename=None):
self.attributes["attachment"] = base64.b64encode(data).decode()
if filename:
self.attributes["filename"] = filename
def metafields(self):
if self.is_new():
return []
query_params = { 'metafield[owner_id]': self.id, 'metafield[owner_resource]': 'product_image' }
return Metafield.find(from_ = '/admin/metafields.json?%s' % urllib.parse.urlencode(query_params))
def save(self):
if 'product_id' not in self._prefix_options:
self._prefix_options['product_id'] = self.product_id
return super(ShopifyResource, self).save()
|
py | b408267710841bc8fe0566a9ef34169c08258e5c | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
from telemetry import story
from telemetry.internal.results import page_test_results
from telemetry.page import page as page_module
from telemetry.timeline import async_slice
from telemetry.timeline import model as model_module
from telemetry.value import improvement_direction
from telemetry.value import scalar
from telemetry.web_perf.metrics import timeline_based_metric
from telemetry.web_perf import timeline_based_measurement as tbm_module
class FakeSmoothMetric(timeline_based_metric.TimelineBasedMetric):
def AddResults(self, model, renderer_thread, interaction_records, results):
results.AddValue(scalar.ScalarValue(
results.current_page, 'FakeSmoothMetric', 'ms', 1,
improvement_direction=improvement_direction.DOWN))
results.AddValue(scalar.ScalarValue(
results.current_page, 'SmoothMetricRecords', 'count',
len(interaction_records),
improvement_direction=improvement_direction.DOWN))
class FakeLoadingMetric(timeline_based_metric.TimelineBasedMetric):
def AddResults(self, model, renderer_thread, interaction_records, results):
results.AddValue(scalar.ScalarValue(
results.current_page, 'FakeLoadingMetric', 'ms', 2,
improvement_direction=improvement_direction.DOWN))
results.AddValue(scalar.ScalarValue(
results.current_page, 'LoadingMetricRecords', 'count',
len(interaction_records),
improvement_direction=improvement_direction.DOWN))
class FakeStartupMetric(timeline_based_metric.TimelineBasedMetric):
def AddResults(self, model, renderer_thread, interaction_records, results):
pass
def AddWholeTraceResults(self, model, results):
results.AddValue(scalar.ScalarValue(
results.current_page, 'FakeStartupMetric', 'ms', 3,
improvement_direction=improvement_direction.DOWN))
class TimelineBasedMetricTestData(object):
def __init__(self, options):
self._model = model_module.TimelineModel()
renderer_process = self._model.GetOrCreateProcess(1)
self._renderer_thread = renderer_process.GetOrCreateThread(2)
self._renderer_thread.name = 'CrRendererMain'
self._foo_thread = renderer_process.GetOrCreateThread(3)
self._foo_thread.name = 'CrFoo'
self._results_wrapper = tbm_module._TBMResultWrapper()
self._results = page_test_results.PageTestResults()
self._results.telemetry_info.benchmark_name = 'benchmark'
self._results.telemetry_info.benchmark_start_epoch = 123
self._results.telemetry_info.benchmark_descriptions = 'foo'
self._story_set = None
self._threads_to_records_map = None
self._tbm_options = options
@property
def model(self):
return self._model
@property
def renderer_thread(self):
return self._renderer_thread
@property
def foo_thread(self):
return self._foo_thread
@property
def threads_to_records_map(self):
return self._threads_to_records_map
@property
def results(self):
return self._results
def AddInteraction(self, thread, marker='', ts=0, duration=5):
assert thread in (self._renderer_thread, self._foo_thread)
thread.async_slices.append(async_slice.AsyncSlice(
'category', marker, timestamp=ts, duration=duration,
start_thread=self._renderer_thread, end_thread=self._renderer_thread,
thread_start=ts, thread_duration=duration))
def FinalizeImport(self):
self._model.FinalizeImport()
self._threads_to_records_map = (
tbm_module._GetRendererThreadsToInteractionRecordsMap(self._model))
self._story_set = story.StorySet(base_dir=os.path.dirname(__file__))
self._story_set.AddStory(page_module.Page(
'http://www.bar.com/', self._story_set, self._story_set.base_dir,
name='http://www.bar.com/'))
self._results.WillRunPage(self._story_set.stories[0])
def AddResults(self):
all_metrics = self._tbm_options.GetLegacyTimelineBasedMetrics()
for thread, records in self._threads_to_records_map.iteritems():
# pylint: disable=protected-access
metric = tbm_module._TimelineBasedMetrics(
self._model, thread, records, self._results_wrapper, all_metrics)
metric.AddResults(self._results)
for metric in all_metrics:
metric.AddWholeTraceResults(self._model, self._results)
self._results.DidRunPage(self._story_set.stories[0])
class TimelineBasedMetricsTests(unittest.TestCase):
def setUp(self):
self._options = tbm_module.Options()
self._options.SetLegacyTimelineBasedMetrics(
(FakeSmoothMetric(), FakeLoadingMetric(), FakeStartupMetric()))
def testGetRendererThreadsToInteractionRecordsMap(self):
d = TimelineBasedMetricTestData(self._options)
# Insert 2 interaction records to renderer_thread and 1 to foo_thread
d.AddInteraction(d.renderer_thread, ts=0, duration=20,
marker='Interaction.LogicalName1')
d.AddInteraction(d.renderer_thread, ts=25, duration=5,
marker='Interaction.LogicalName2')
d.AddInteraction(d.foo_thread, ts=50, duration=15,
marker='Interaction.LogicalName3')
d.FinalizeImport()
self.assertEquals(2, len(d.threads_to_records_map))
# Assert the 2 interaction records of renderer_thread are in the map.
self.assertIn(d.renderer_thread, d.threads_to_records_map)
interactions = d.threads_to_records_map[d.renderer_thread]
self.assertEquals(2, len(interactions))
self.assertEquals(0, interactions[0].start)
self.assertEquals(20, interactions[0].end)
self.assertEquals(25, interactions[1].start)
self.assertEquals(30, interactions[1].end)
# Assert the 1 interaction records of foo_thread is in the map.
self.assertIn(d.foo_thread, d.threads_to_records_map)
interactions = d.threads_to_records_map[d.foo_thread]
self.assertEquals(1, len(interactions))
self.assertEquals(50, interactions[0].start)
self.assertEquals(65, interactions[0].end)
def testAddResults(self):
d = TimelineBasedMetricTestData(self._options)
d.AddInteraction(d.renderer_thread, ts=0, duration=20,
marker='Interaction.LogicalName1')
d.AddInteraction(d.foo_thread, ts=25, duration=5,
marker='Interaction.LogicalName2')
d.FinalizeImport()
d.AddResults()
self.assertEquals(1, len(d.results.FindAllPageSpecificValuesFromIRNamed(
'LogicalName1', 'FakeSmoothMetric')))
self.assertEquals(1, len(d.results.FindAllPageSpecificValuesFromIRNamed(
'LogicalName2', 'FakeLoadingMetric')))
self.assertEquals(1, len(d.results.FindAllPageSpecificValuesNamed(
'FakeStartupMetric')))
def testDuplicateInteractionsInDifferentThreads(self):
d = TimelineBasedMetricTestData(self._options)
d.AddInteraction(d.renderer_thread, ts=10, duration=5,
marker='Interaction.LogicalName/repeatable')
d.AddInteraction(d.foo_thread, ts=20, duration=5,
marker='Interaction.LogicalName')
self.assertRaises(tbm_module.InvalidInteractions, d.FinalizeImport)
def testDuplicateRepeatableInteractionsInDifferentThreads(self):
d = TimelineBasedMetricTestData(self._options)
d.AddInteraction(d.renderer_thread, ts=10, duration=5,
marker='Interaction.LogicalName/repeatable')
d.AddInteraction(d.foo_thread, ts=20, duration=5,
marker='Interaction.LogicalName/repeatable')
self.assertRaises(tbm_module.InvalidInteractions, d.FinalizeImport)
def testDuplicateUnrepeatableInteractionsInSameThread(self):
d = TimelineBasedMetricTestData(self._options)
d.AddInteraction(d.renderer_thread, ts=10, duration=5,
marker='Interaction.LogicalName')
d.AddInteraction(d.renderer_thread, ts=20, duration=5,
marker='Interaction.LogicalName')
d.FinalizeImport()
self.assertRaises(tbm_module.InvalidInteractions, d.AddResults)
def testDuplicateRepeatableInteractions(self):
d = TimelineBasedMetricTestData(self._options)
d.AddInteraction(d.renderer_thread, ts=10, duration=5,
marker='Interaction.LogicalName/repeatable')
d.AddInteraction(d.renderer_thread, ts=20, duration=5,
marker='Interaction.LogicalName/repeatable')
d.FinalizeImport()
d.AddResults()
self.assertEquals(1, len(d.results.pages_that_succeeded))
|
py | b4082677be8dcc7c36c5b387234d65cc268a1603 | import matplotlib as mpl
mpl.use('Agg')
import matplotlib.axes as axes
import matplotlib.pyplot as plt
import api
import random
import api.util as u
import copy
import math
from api import Fleet, Planet, Map
class State:
"""
Represents the state of the game at a given plie.
"""
# The map
__map = None # type: Map
# Whether each planet belongs to player 1, 2 or is neutral (player 0)
__owner = [] # type: list[int]
# The number of ships stationed at each planet
__garrisons = [] # type: list[int]
# All fleets in transit
__fleets = [] # type: list[Fleet]
# True if it's player 1's turn
__player1s_turn = None # type: bool
# If one of the players has lost the game by making an illegal move
# or not moving fast enough. None is nobody has revoked, otherwise the
# id of the player that revoked
__revoked = None # type: int, None
# How many turns have passed
__turn = 0 # type: int
def __init__(self,
map, # type: Map
garrisons, # type: list[int]
owner, # type: int
start=1, # type: int
fleets=None # type: list[Fleet]
):
"""
:param map: The playing area
:param garrisons: A list of integers such that garrisons[i]
contains the number of ships stationed at planet map.planets()[i]
:param owner: A list of integers such that owners[i]
contains the owner (0, 1 or 2) of planet map.planets()[i]
:param start: Which player is set to make the next turn in this state (1 or 2)
:param fleets: A list of fleet objects representing the fleets in transit in this state
"""
self.__map = map
self.__owner = list(owner)
self.__garrisons = list(garrisons)
self.__player1s_turn = True if start == 1 else False
if not fleets is None:
self.__fleets = list(fleets)
@classmethod
def make(cls,
map, # type: Map
garrisons, # type: list[int]
player1s_planets, # type: list[int]
player2s_planets, # type: list[int]
start=1, # type: int
fleets=None # type: list[Fleet]
):
"""
A factory method allowing you to construct a state by specifying the planets owned by
each players in two separate lists.
:param map: The playing area
:param garrisons: A list of integers such that garrisons[i]
contains the number of ships stationed at planet map.planets()[i]
:param player1s_planets: A list containing the indices of the planets
belonging to player 1
:param player2s_planets: A list containing the indices of the planets
belonging to player 2
:param start: Which play is set to make the next move (1 or 2)
:param fleets: A list of fleet objects representing the fleets in transit in this state
:return:
"""
owner = [0] * map.size()
for i in player1s_planets:
owner[i] = 1
for i in player2s_planets:
owner[i] = 2
return cls(map, list(garrisons), owner, start, fleets)
def fleets(self):
# type: () -> list[Fleet]
"""
:return: A list of the fleet objects in this state
"""
return list(self.__fleets)
def next(self,
move # type: tuple[int, int]
):
# type: () -> State
"""
Compute the next state from this one, assuming that the player whose turn it is makes the given move.
:return: The state that would result from the given move.
:raises: RuntimeError if state is finished. Be sure to check state.finished() before calling this
method.
"""
if self.finished():
raise RuntimeError('Gamestate is finished. No next states exist.')
# Start with a copy of the current state
state = self.clone() # type: State
# Switch the player
state.__player1s_turn = not self.__player1s_turn
# Increment the turn number (we count the number of turns not of plies)
if self.whose_turn() == 2:
state.__turn += 1
# Check illegal moves (moving from other player's planet)
if move is not None and self.owner(self.planets()[move[0]]) != self.whose_turn():
state.__revoked = self.whose_turn()
return state
state.__fleets = []
# Execute the move
if move is not None:
source = self.planets()[move[0]]
target = self.planets()[move[1]]
if self.garrison(source) > 1: # If the source planet has < 1 ships, no fleet is sent
half = float(self.__garrisons[source.id()]) * 0.5
fleetsize = int(math.floor(half)) # add half the ships to the fleet
state.__garrisons[source.id()] = self.__garrisons[source.id()] - fleetsize # leave the rest behind
fleet = Fleet(source, target, self.whose_turn(), fleetsize)
state.__fleets.append(fleet)
# Move the fleets, and handle attacks
for fleet in self.__fleets:
next = fleet.next() # New fleet object, one step closer to destination
if next is None: # fleet has arrived
# Reinforcements
if state.owner(state.planets()[fleet.target().id()]) == fleet.owner():
state.__garrisons[fleet.target().id()] += fleet.size()
# Attack
else:
# compute the ships remaining after attack: negative means attacker won
result = state.__garrisons[fleet.target().id()] - fleet.size()
# Planet is conquered, change owner
if result < 0:
state.__owner[fleet.target().id()] = fleet.owner()
state.__garrisons[fleet.target().id()] = - result
else:
state.__garrisons[fleet.target().id()] = result
else:
state.__fleets.append(next)
state.__player1s_turn = not self.__player1s_turn
# If player 2 has moved (end of the turn), increase the garrisons
if self.whose_turn() == 2:
for planet in self.planets():
if self.__turn % planet.turns_per_ship() == 0 \
and self.owner(planet) != 0 \
and self.__turn != 0:
state.__garrisons[planet.id()] += 1
return state
def turn_nr(self):
# type: () -> int
"""
:return: How many turns preceded this state.
"""
return self.__turn
def whose_turn(self):
# type: () -> int
"""
:return: The player who is set to make the next move.
"""
return 1 if self.__player1s_turn else 2
def planets(self,
owner_id = None # type: int
):
# type: () -> list[Planet]
"""
:param owner_id: Filter by owner. If given, only the planets belonging to
this owner are returned (0, 1 or 2)
:return: a list of planets. If no id is given, return all planets. With an
id (0, 1 or 2), all planets belonging to that player
"""
planets = self.__map.planets()
if owner_id is None:
return planets
return [p for p in planets if self.owner(p) == owner_id]
def finished(self):
# type: () -> bool
"""
:return: A boolean indicating whether the game is finished. The game
is finished if one of the players has zero ships left, or if a player
has revoked. A player revokes by playing an illegal move, or not
finishing on time (in a time controlled game).
"""
if self.__revoked is not None:
return True
for owner in [1, 2]:
# If no planets owned
if len(self.planets(owner)) == 0:
# check nr of fleets owned
fleets = 0
for fleet in self.__fleets:
if fleet.owner() == owner:
fleets += 1
if fleets == 0:
return True
return False
def revoked(self):
return self.__revoked
def winner(self):
"""
Who won the game (if it's finished).
:return: The (integer) id of the player who won if the game is finished (1 or 2). None
if the game is not finished.
"""
if not self.finished():
return None
if self.__revoked is not None:
return self.whose_turn()
if len(self.planets(1)) == 0:
return 2
assert(len(self.planets(2)) == 0)
return 1
def owner(self,
planet # type: Planet
):
# type: () -> int
"""
:param planet: The planet for which we want the owner (NB: the planet object, not the id)
:return: who owns the given planet in this gamestate.
"""
return self.__owner[planet.id()] # type: int
def garrison(self,
planet # type: Planet
):
"""
:param planet: The planet for which we want the number of stationed ships.
:return: How many ships there are at the given planet in this gamestate
"""
return self.__garrisons[planet.id()]
def clone(self):
# type: () -> State
"""
Creates a copy of this state object, where all the volatile
objects (fleets, owner array) are deep-copied. The map and planet are
references to the original objects.
"""
state = State(self.__map, list(self.__garrisons), list(self.__owner))
state.__revoked = self.__revoked
# Deep copy the fleets
fleets = [fleet.clone() for fleet in self.__fleets]
state.__fleets = fleets
state.__turn = self.__turn
return state
def moves(self):
# type: () -> list[tuple[int, int]]
"""
:return: A list of all the legal moves that can be made by the player whose turn it is.
"""
# get the planets
mine = self.planets(self.whose_turn())
all = self.planets()
# Generate all possible pairs with the first element from mine
# and the second from all.
moves = []
for m in mine:
if self.garrison(m) > 1:
for a in all:
if m.id() != a.id():
moves.append((m.id(),a.id()))
# None is also a legal move (do nothing)
moves.append(None)
return moves
def visualize(self):
# type: () -> Figure
"""
Visualize the gamestate.
:return: A matplotlib figure object. Save to a file with::
state.visualize().savefig('filename.png')
The format is automatically determined from the extension you choose.
"""
fig = plt.figure(figsize=(6,6))
cm = ['gray', 'blue', 'red']
ax = fig.add_subplot(111) # type: axes.Axes
ax.set_axis_bgcolor('#19102b')
# Plot the planets
xs = []
ys = []
sizes = []
labels = []
for planet in self.planets():
xs.append(planet.coords()[0])
ys.append(planet.coords()[1])
sizes.append(planet.size() * 500)
labels.append(self.garrison(planet))
ax.scatter(xs, ys, s=sizes, c=[cm[o] for o in self.__owner], zorder=10, linewidth=0,)
for x, y, label, size in zip(xs, ys, labels, sizes):
ax.annotate(
label,
xy=(x, y), xytext=(0, - 15 - size/50.0),
textcoords='offset points', ha='center', va='bottom',
zorder=30, color='white')
# Plot the fleets
for fleet in self.__fleets:
ax.plot(
[fleet.source().coords()[0],
fleet.target().coords()[0]],
[fleet.source().coords()[1],
fleet.target().coords()[1]], alpha=0.8, color=cm[fleet.owner()], linestyle='dotted')
turns_left = fleet.distance()
max_dist = api.util.distance(fleet.source(), fleet.target())
max_turns = max_dist / u.SPEED
ratio = 0.5 if max_turns == 0 else float(turns_left) / float(max_turns) # scale the distance travelled to target to the range (0, 1)
# Current location of the fleet
location = plus(mult(fleet.source().coords(), ratio), mult(fleet.target().coords(), 1.0 - ratio))
ax.scatter(location[0], location[1], c=cm[fleet.owner()], s=math.sqrt(fleet.size()) * 20, marker='s', linewidth=0, zorder=20)
ax.annotate(
fleet.size(),
xy=location, xytext=(0, - 20),
textcoords='offset points', ha='center', va='bottom',
zorder=30, color=cm[fleet.owner()])
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.get_xaxis().set_tick_params(which='both', top='off', bottom='off', labelbottom='off')
ax.get_yaxis().set_tick_params(which='both', left='off', right='off', labelleft='off')
# ax.set_xlim([0, 1])
# ax.set_ylim([0, 1])
ax.set_title('turn {}.{}: '.format(self.__turn, self.whose_turn()))
return fig
def __repr__(self):
# type: () -> str
"""
:return: A concise string representation of the state in one line
"""
res = 'turn {}.{}: '.format(self.__turn, self.whose_turn())
res += ' planets: '
for i, planet in enumerate(self.planets()):
res += ', ' if i !=0 else ''
res += '{}.o{}g{}'.format(planet.id(), self.owner(planet), self.garrison(planet))
if len(self.fleets()) == 0:
res += '\t no fleets'
else:
res += '\t fleets: '
for i, fleet in enumerate(self.fleets()):
res += ', ' if i !=0 else ''
res += '{}>{}.o{}s{}d{}'.format(
fleet.source().id(), fleet.target().id(),
fleet.owner(), fleet.size(), fleet.distance())
return res
@staticmethod
def generate(num_planets, id=None, symmetric=True):
# type: () -> (State, id)
"""
Generates a random start state: a random map, with home planets assigned
NB: This method returns a tuple (State, id), so call it like this::
state, id = State.generate(6)
:param num_planets: The number of planets in the map
:param id: Optional. The same id will always lead to the same map. If it is not
supplied, or None, a random map will be generated.
:return: A pair of a starting state and its id.
"""
if not symmetric:
return State.generate_asym(num_planets, id)
if id is None:
id = random.randint(0, 100000)
# Create an RNG with id as the seed
rng = random.Random(id)
planets = []
# Home planets
planets.append(Planet(0.0, 0.0, 1, 0))
planets.append(Planet(1.0, 1.0, 1, 1))
garrisons = [100, 100]
# Rest of the planets
for i in range(2, num_planets, 2):
x = round(rng.random(), 2)
y = round(rng.random(), 2)
size = 1.0 / rng.choice([1] + [3, 5, 7, 13, 17] * 3)
garrisons += ([rng.randint(1, 30)] * 2)
planets.append(Planet(x, y, size, i))
planets.append(Planet(1 - x, 1 - y, size, i + 1))
if num_planets % 2 != 0:
x = round(rng.random(), 2)
y = 1 - x
size = 1.0 / rng.choice([1] + [3, 5, 7, 13, 17] * 3)
garrisons.append(rng.randint(1, 30))
planets.append(Planet(x, y, size, num_planets - 1))
map = Map(planets)
state = State.make(map, garrisons, [0], [1])
return state, id
@staticmethod
def generate_asym(num_planets, id=None):
if id is None:
id = random.randint(0, 100000)
# Create an RNG with id as the seed
rng = random.Random(id)
planets = []
# Home planets
planets.append(Planet(0.0, 0.0, 1, 0))
planets.append(Planet(1.0, 1.0, 1, 1))
garrisons = [100, 100]
# Rest of the planets
for i in range(2, num_planets):
x = round(rng.random(), 2)
y = round(rng.random(), 2)
size = 1.0 / rng.choice([1] + [3, 5, 7, 13, 17] * 3)
garrisons.append(rng.randint(1, 30))
planets.append(Planet(x, y, size, i))
map = Map(planets)
state = State.make(map, garrisons, [0], [1])
return state, id
@staticmethod
def load(file, whose_turn=1):
# type: () -> State
"""
Loads a state from a file (or more accurately, a map, and garrison/ownership information).
Each line in the file describes a planet. The lines should be of the form
x, y, size, nr_ships, owner
At least one planet should be owned by player 1 and one by 2.
For instance:
0.0, 0.0, 10, 50, 1
1.0, 1.0, 10, 50, 2
0.5, 0.5, 1, 30, 0
"""
planets = []
garrisons = []
owners = []
with open(file, 'r') as f:
for line in f:
line = line.rstrip()
x, y, size, nr_ships, owner = line.split(',')
x = float(x)
y = float(y)
size = int(size)
nr_ships = int(nr_ships)
owner = int(owner)
planets.append(Planet(x, y, size))
garrisons.append(nr_ships)
owners.append(owner)
map = Map(planets)
state = State(map, garrisons, owners, whose_turn)
def mult(
seq, # type: list[float]
scalar # type:
):
# type: () -> list[float]
"""
Create a list by multiplying seq element-wise by scalar.
:param seq:
:param scalar:
:return: A list with the value seq[i] * scalar at index i
"""
return [value * scalar for value in seq]
def plus(pair1, pair2):
# type: () -> tuple
"""
Element-wise sum for pairs
:return: A pair; the element-wise sum of the given pairs
"""
return pair1[0] + pair2[0], pair1[1] + pair2[1]
|
py | b408284b040f4e39368d69e75856403d1f964959 | # -*- coding: utf-8 -*-
import sys
if sys.version_info.major == 2:
from fractions import gcd
else:
from math import gcd
import networkx as nx
"""Algorithms for directed acyclic graphs (DAGs)."""
# Copyright (C) 2006-2011 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
__author__ = """\n""".join(['Aric Hagberg <[email protected]>',
'Dan Schult ([email protected])',
'Ben Edwards ([email protected])'])
__all__ = ['descendants',
'ancestors',
'topological_sort',
'topological_sort_recursive',
'is_directed_acyclic_graph',
'is_aperiodic']
def descendants(G, source):
"""Return all nodes reachable from `source` in G.
Parameters
----------
G : NetworkX DiGraph
source : node in G
Returns
-------
des : set()
The descendants of source in G
"""
if not G.has_node(source):
raise nx.NetworkXError("The node %s is not in the graph." % source)
des = set(nx.shortest_path_length(G, source=source).keys()) - set([source])
return des
def ancestors(G, source):
"""Return all nodes having a path to `source` in G.
Parameters
----------
G : NetworkX DiGraph
source : node in G
Returns
-------
ancestors : set()
The ancestors of source in G
"""
if not G.has_node(source):
raise nx.NetworkXError("The node %s is not in the graph." % source)
anc = set(nx.shortest_path_length(G, target=source).keys()) - set([source])
return anc
def is_directed_acyclic_graph(G):
"""Return True if the graph G is a directed acyclic graph (DAG) or
False if not.
Parameters
----------
G : NetworkX graph
A graph
Returns
-------
is_dag : bool
True if G is a DAG, false otherwise
"""
if not G.is_directed():
return False
try:
topological_sort(G)
return True
except nx.NetworkXUnfeasible:
return False
def topological_sort(G,nbunch=None):
"""Return a list of nodes in topological sort order.
A topological sort is a nonunique permutation of the nodes
such that an edge from u to v implies that u appears before v in the
topological sort order.
Parameters
----------
G : NetworkX digraph
A directed graph
nbunch : container of nodes (optional)
Explore graph in specified order given in nbunch
Raises
------
NetworkXError
Topological sort is defined for directed graphs only. If the
graph G is undirected, a NetworkXError is raised.
NetworkXUnfeasible
If G is not a directed acyclic graph (DAG) no topological sort
exists and a NetworkXUnfeasible exception is raised.
Notes
-----
This algorithm is based on a description and proof in
The Algorithm Design Manual [1]_ .
See also
--------
is_directed_acyclic_graph
References
----------
.. [1] Skiena, S. S. The Algorithm Design Manual (Springer-Verlag, 1998).
http://www.amazon.com/exec/obidos/ASIN/0387948600/ref=ase_thealgorithmrepo/
"""
if not G.is_directed():
raise nx.NetworkXError(
"Topological sort not defined on undirected graphs.")
# nonrecursive version
seen = set()
order = []
explored = set()
if nbunch is None:
nbunch = G.nodes_iter()
for v in nbunch: # process all vertices in G
if v in explored:
continue
fringe = [v] # nodes yet to look at
while fringe:
w = fringe[-1] # depth first search
if w in explored: # already looked down this branch
fringe.pop()
continue
seen.add(w) # mark as seen
# Check successors for cycles and for new nodes
new_nodes = []
for n in G[w]:
if n not in explored:
if n in seen: #CYCLE !!
raise nx.NetworkXUnfeasible("Graph contains a cycle.")
new_nodes.append(n)
if new_nodes: # Add new_nodes to fringe
fringe.extend(new_nodes)
else: # No new nodes so w is fully explored
explored.add(w)
order.append(w)
fringe.pop() # done considering this node
return list(reversed(order))
def topological_sort_recursive(G,nbunch=None):
"""Return a list of nodes in topological sort order.
A topological sort is a nonunique permutation of the nodes such
that an edge from u to v implies that u appears before v in the
topological sort order.
Parameters
----------
G : NetworkX digraph
nbunch : container of nodes (optional)
Explore graph in specified order given in nbunch
Raises
------
NetworkXError
Topological sort is defined for directed graphs only. If the
graph G is undirected, a NetworkXError is raised.
NetworkXUnfeasible
If G is not a directed acyclic graph (DAG) no topological sort
exists and a NetworkXUnfeasible exception is raised.
Notes
-----
This is a recursive version of topological sort.
See also
--------
topological_sort
is_directed_acyclic_graph
"""
if not G.is_directed():
raise nx.NetworkXError(
"Topological sort not defined on undirected graphs.")
def _dfs(v):
ancestors.add(v)
for w in G[v]:
if w in ancestors:
raise nx.NetworkXUnfeasible("Graph contains a cycle.")
if w not in explored:
_dfs(w)
ancestors.remove(v)
explored.add(v)
order.append(v)
ancestors = set()
explored = set()
order = []
if nbunch is None:
nbunch = G.nodes_iter()
for v in nbunch:
if v not in explored:
_dfs(v)
return list(reversed(order))
def is_aperiodic(G):
"""Return True if G is aperiodic.
A directed graph is aperiodic if there is no integer k > 1 that
divides the length of every cycle in the graph.
Parameters
----------
G : NetworkX DiGraph
Graph
Returns
-------
aperiodic : boolean
True if the graph is aperiodic False otherwise
Raises
------
NetworkXError
If G is not directed
Notes
-----
This uses the method outlined in [1]_, which runs in O(m) time
given m edges in G. Note that a graph is not aperiodic if it is
acyclic as every integer trivial divides length 0 cycles.
References
----------
.. [1] Jarvis, J. P.; Shier, D. R. (1996),
Graph-theoretic analysis of finite Markov chains,
in Shier, D. R.; Wallenius, K. T., Applied Mathematical Modeling:
A Multidisciplinary Approach, CRC Press.
"""
if not G.is_directed():
raise nx.NetworkXError("is_aperiodic not defined for undirected graphs")
s = next(G.nodes_iter())
levels = {s:0}
this_level = [s]
g = 0
l = 1
while this_level:
next_level = []
for u in this_level:
for v in G[u]:
if v in levels: # Non-Tree Edge
g = gcd(g, levels[u]-levels[v] + 1)
else: # Tree Edge
next_level.append(v)
levels[v] = l
this_level = next_level
l += 1
if len(levels)==len(G): #All nodes in tree
return g==1
else:
return g==1 and nx.is_aperiodic(G.subgraph(set(G)-set(levels)))
|
py | b4082a58fb2283c4cbf31576a1eb1e6b23505d05 | from django.conf import settings
TASKS = getattr(settings, 'TEST_TASKS', (
'discover_jenkins.tasks.run_pylint.PyLintTask',
'discover_jenkins.tasks.with_coverage.CoverageTask',
))
OUTPUT_DIR = getattr(settings, 'TEST_OUTPUT_DIR', 'reports')
PYLINT_RCFILE = getattr(settings, 'TEST_PYLINT_RCFILE', 'pylint.rc')
PROJECT_APPS = getattr(settings, 'TEST_PROJECT_APPS', ())
COVERAGE_WITH_MIGRATIONS = getattr(settings, 'TEST_COVERAGE_WITH_MIGRATIONS', False)
COVERAGE_REPORT_HTML_DIR = getattr(settings, 'TEST_COVERAGE_REPORT_HTML_DIR', '')
COVERAGE_MEASURE_BRANCH = getattr(settings, 'TEST_COVERAGE_MEASURE_BRANCH', True)
COVERAGE_EXCLUDE_PATHS = getattr(settings, 'TEST_COVERAGE_EXCLUDE_PATHS', [])
COVERAGE_RCFILE = getattr(settings, 'TEST_COVERAGE_RCFILE', 'coverage.rc')
JSHINT_CHECKED_FILES = getattr(settings, 'TEST_JSHINT_CHECKED_FILES', None)
JSHINT_RCFILE = getattr(settings, 'TEST_JSHINT_RCFILE', None)
JSHINT_EXCLUDE = getattr(settings, 'TEST_JSHINT_EXCLUDE', [])
|
py | b4082a795b8b50320defcc45d400ab395b8e9bd5 | from . import BASIC, COMFORT, EXTENDED, NS_RAM
from .accounting import (AccountingAccount, ApplicableTradeTax,
BillingSpecifiedPeriod, TradeAllowanceCharge)
from .delivery import SupplyChainEvent
from .elements import Element
from .fields import (CurrencyField, Field, MultiField, QuantityField,
StringField)
from .note import IncludedNote
from .party import ShipToTradeParty, UltimateShipToTradeParty
from .product import TradeProduct
from .references import (LineAdditionalReferencedDocument,
LineBuyerOrderReferencedDocument,
LineContractReferencedDocument,
LineCustomerOrderReferencedDocument,
LineDeliveryNoteReferencedDocument,
LineDespatchAdviceReferencedDocument,
LineReceivingAdviceReferencedDocument)
class AllowanceCharge(TradeAllowanceCharge):
class Meta:
namespace = NS_RAM
tag = "AppliedTradeAllowanceCharge"
class GrossPrice(Element):
amount = CurrencyField(NS_RAM, "ChargeAmount", required=True, profile=COMFORT,
_d="Bruttopreis")
basis_quantity = QuantityField(NS_RAM, "BasisQuantity", required=False,
profile=COMFORT, _d="Preisbasismenge")
charge = MultiField(AllowanceCharge, required=False, profile=COMFORT)
class Meta:
namespace = NS_RAM
tag = "GrossPriceProductTradePrice"
class NetPrice(Element):
amount = CurrencyField(NS_RAM, "ChargeAmount", required=True, profile=COMFORT)
basis_quantity = QuantityField(NS_RAM, "BasisQuantity", required=False,
profile=COMFORT, _d="Preisbasismenge")
class Meta:
namespace = NS_RAM
tag = "NetPriceProductTradePrice"
class LineDocument(Element):
line_id = StringField(NS_RAM, "LineID")
notes = MultiField(IncludedNote)
class Meta:
namespace = NS_RAM
tag = "AssociatedDocumentLineDocument"
class LineAgreement(Element):
buyer_order = Field(LineBuyerOrderReferencedDocument, required=False, profile=EXTENDED)
contract = Field(LineContractReferencedDocument, required=False, profile=EXTENDED)
customer_order = Field(LineCustomerOrderReferencedDocument, required=False, profile=EXTENDED)
additional_references = MultiField(LineAdditionalReferencedDocument, required=False,
profile=COMFORT)
gross = Field(GrossPrice, required=False, profile=COMFORT)
net = Field(NetPrice)
class Meta:
namespace = NS_RAM
tag = "SpecifiedSupplyChainTradeAgreement"
class LineDelivery(Element):
billed_quantity = QuantityField(NS_RAM, "BilledQuantity", required=True,
profile=BASIC, _d="Menge, berechnet")
charge_free_quantity = QuantityField(NS_RAM, "ChargeFreeQuantity", required=False,
profile=EXTENDED, _d="Menge, ohne Berechnung")
package_quantity = QuantityField(NS_RAM, "PackageQuantity", required=False,
profile=EXTENDED, _d="Anzahl Packstücke")
ship_to = Field(ShipToTradeParty, required=False, profile=EXTENDED)
ultimate_ship_to = Field(UltimateShipToTradeParty, required=False, profile=EXTENDED)
event = Field(SupplyChainEvent, required=False, profile=EXTENDED,
_d="Detailinformationen zur tatsächlichen Lieferung")
despatch_advice = Field(LineDespatchAdviceReferencedDocument, required=False,
profile=EXTENDED)
receiving_advice = Field(LineReceivingAdviceReferencedDocument, required=False,
profile=EXTENDED)
delivery_note = Field(LineDeliveryNoteReferencedDocument, required=False,
profile=EXTENDED)
class Meta:
namespace = NS_RAM
tag = "SpecifiedSupplyChainTradeDelivery"
class LineSummation(Element):
total_amount = CurrencyField(NS_RAM, "LineTotalAmount", required=True,
profile=COMFORT)
total_allowance_charge = CurrencyField(NS_RAM, "TotalAllowanceChargeAmount",
required=False, profile=EXTENDED, _d="Gesamtbetrag der Zu- und Abschläge")
class Meta:
namespace = NS_RAM
tag = "SpecifiedTradeSettlementMonetarySummation"
class LineSettlement(Element):
trade_tax = Field(ApplicableTradeTax, required=False, profile=COMFORT)
period = Field(BillingSpecifiedPeriod, required=False, profile=EXTENDED)
accounting_account = Field(AccountingAccount, required=False, profile=EXTENDED,
_d="Kostenstelle")
monetary_summation = Field(LineSummation, required=False, profile=COMFORT)
class Meta:
namespace = NS_RAM
tag = "SpecifiedSupplyChainTradeSettlement"
class LineItem(Element):
document = Field(LineDocument, required=True)
agreement = Field(LineAgreement)
delivery = Field(LineDelivery)
settlement = Field(LineSettlement, required=True)
product = Field(TradeProduct)
class Meta:
namespace = NS_RAM
tag = "IncludedSupplyChainTradeLineItem"
|
py | b4082aa3ec953f487328d9097c5799a791fe51ab | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common flags for the consumers subcommand group."""
from googlecloudsdk.api_lib.endpoints import services_util
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.util import completers
_SERVICES_LIST_COMMAND = ('endpoints services list --format=disable '
'--flatten=serviceName')
class ConsumerServiceCompleter(completers.ListCommandCompleter):
def __init__(self, **kwargs):
super(ConsumerServiceCompleter, self).__init__(
collection=services_util.SERVICES_COLLECTION,
list_command=_SERVICES_LIST_COMMAND,
flags=['enabled'],
**kwargs)
class ProducerServiceCompleter(completers.ListCommandCompleter):
def __init__(self, **kwargs):
super(ProducerServiceCompleter, self).__init__(
collection=services_util.SERVICES_COLLECTION,
list_command=_SERVICES_LIST_COMMAND,
flags=['produced'],
**kwargs)
def operation_flag(suffix='to act on'):
return base.Argument(
'operation',
help='The name of the operation {0}.'.format(suffix))
def producer_service_flag(suffix='to act on', flag_name='service'):
return base.Argument(
flag_name,
completer=ProducerServiceCompleter,
help='The name of the service {0}.'.format(suffix))
def consumer_service_flag(suffix='to act on', flag_name='service'):
return base.Argument(
flag_name,
completer=ConsumerServiceCompleter,
help='The name of the service {0}.'.format(suffix))
def available_service_flag(suffix='to act on', flag_name='service'):
# NOTE: Because listing available services often forces the tab completion
# code to timeout, this flag will not enable tab completion.
return base.Argument(
flag_name,
help='The name of the service {0}.'.format(suffix))
def key_flag(suffix='to act on'):
return base.Argument(
'--key',
help='The identifier of the key {0}.'.format(suffix))
|
py | b4082b0211c45eaaca1439207ad2f2adc831c7f1 | from pydantic import BaseModel
from decimal import Decimal
class Configuration(BaseModel):
LfServerId: int = None # Id of the Legacy Fleet server
adminId: int = 312381318891700224 # Scruffy_90
contributerVoiceCategoryName: str = None # The category from wich to pull participants from all VCs.
serviceCharge: Decimal = 0.005 # Service charge for transactions in the MO.Trader
|
py | b4082b83f73e0e24eeae747fc22f25f889e47101 | import subprocess
from PyInstaller.__main__ import run as pack_exe
# pack the .exe file
pack_exe([
"--onefile",
"--windowed",
"--workpath=../build/work",
"--specpath=../build/work",
"--distpath=../build/out",
"--name=DBSpy",
"../src/dbspy/__main__.py"
])
# run the packed .exe file
subprocess.Popen("../build/out/DBSpy.exe")
|
py | b4082c0c6bbbb6fb7d830cc0ed3330572b27c83a | """ Unit test to check the correct behaviour of constraints"""
import unittest
import numpy as np
import keras
from keras.initializers import Constant
from keras.layers import (
MaxPooling2D,
AveragePooling2D,
Input,
Dense,
Flatten,
Activation,
)
from keras.models import Model
from pooling.pooling_layers import (
OW1Pooling2D,
OW2Pooling2D,
OW3Pooling2D,
)
from pooling.ow_constraints import (
PosUnitModule,
)
class Test_PosUnitConstraint(unittest.TestCase):
""" Test OW1 Pooling"""
def setUp(self):
self.x_input = np.random.rand(4, 4, 2)
self.x_input = np.expand_dims(self.x_input, axis=0)
self.input_tensor = Input(shape=self.x_input.shape[1:])
self.y = np.array([0, 1]).reshape(1,2)
self.pool_size = (4, 4)
self.optimizer = keras.optimizers.Adam()
def test_ow1_constraint(self):
""" Test ow-pool with mean weights"""
constraint = PosUnitModule()
neg_ones_ini = -np.ones(self.pool_size[0] * self.pool_size[1])
w_initializer = Constant(value=neg_ones_ini)
x = OW1Pooling2D(pool_size=self.pool_size, name='ow', padding='same',
weights_constraint=constraint,
weights_initializer=w_initializer)(self.input_tensor)
x = Flatten()(x)
x = Activation('softmax')(x)
ow_model = Model(self.input_tensor, x)
ow_model.compile(optimizer=self.optimizer, loss='categorical_crossentropy',
metrics=['accuracy'])
ow_model.fit(self.x_input, self.y, epochs=1, verbose=0)
ow_layer = ow_model.layers[-3]
ow_weights = ow_layer.get_weights()[0]
np.testing.assert_array_almost_equal(np.sum(ow_weights),1)
self.assertFalse(np.sum(ow_weights<0))
def test_ow2_constraint(self):
""" Test ow-pool with mean weights"""
constraint = PosUnitModule(axis=1)
neg_ones_ini = -np.ones((self.x_input.shape[3],
self.pool_size[0] * self.pool_size[1]))
w_initializer = Constant(value=neg_ones_ini)
x = OW2Pooling2D(pool_size=self.pool_size, name='ow', padding='same',
weights_constraint=constraint,
weights_initializer=w_initializer)(self.input_tensor)
x = Flatten()(x)
x = Activation('softmax')(x)
ow_model = Model(self.input_tensor, x)
ow_model.compile(optimizer=self.optimizer, loss='categorical_crossentropy',
metrics=['accuracy'])
ow_model.fit(self.x_input, self.y, epochs=5, verbose=0)
ow_layer = ow_model.layers[-3]
ow_weights = ow_layer.get_weights()[0]
np.testing.assert_array_almost_equal(np.sum(ow_weights, axis=1), [1, 1])
self.assertFalse(np.sum(ow_weights<0))
def test_ow3_constraint(self):
""" Test ow-pool with mean weights"""
constraint = PosUnitModule(axis=3)
neg_ones_ini = -np.ones((1, 1, self.x_input.shape[3],
self.pool_size[0] * self.pool_size[1]))
w_initializer = Constant(value=neg_ones_ini)
x = OW3Pooling2D(pool_size=self.pool_size, name='ow', padding='same',
weights_constraint=constraint,
weights_initializer=w_initializer)(self.input_tensor)
x = Flatten()(x)
x = Activation('softmax')(x)
ow_model = Model(self.input_tensor, x)
ow_model.compile(optimizer=self.optimizer, loss='categorical_crossentropy',
metrics=['accuracy'])
ow_model.fit(self.x_input, self.y, epochs=5, verbose=0)
ow_layer = ow_model.layers[-3]
ow_weights = ow_layer.get_weights()[0]
np.testing.assert_array_almost_equal(np.sum(ow_weights, axis=3),
[[[1, 1]]], decimal=5)
self.assertFalse(np.sum(ow_weights<0))
|
py | b4082c1acbd83fffb2d8e48b0f4f6f73d46fe7dd | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the SWIG-wrapped events writer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
from tensorflow.core.framework import summary_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python import _pywrap_events_writer
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.lib.io import tf_record
from tensorflow.python.platform import googletest
from tensorflow.python.util import compat
class PywrapeventsWriterTest(test_util.TensorFlowTestCase):
def testWriteEvents(self):
file_prefix = os.path.join(self.get_temp_dir(), "events")
writer = _pywrap_events_writer.EventsWriter(compat.as_bytes(file_prefix))
filename = compat.as_text(writer.FileName())
event_written = event_pb2.Event(
wall_time=123.45,
step=67,
summary=summary_pb2.Summary(
value=[summary_pb2.Summary.Value(
tag="foo", simple_value=89.0)]))
writer.WriteEvent(event_written)
writer.Flush()
writer.Close()
with self.assertRaises(errors.NotFoundError):
for r in tf_record.tf_record_iterator(filename + "DOES_NOT_EXIST"):
self.assertTrue(False)
reader = tf_record.tf_record_iterator(filename)
event_read = event_pb2.Event()
event_read.ParseFromString(next(reader))
self.assertTrue(event_read.HasField("file_version"))
event_read.ParseFromString(next(reader))
# Second event
self.assertProtoEquals("""
wall_time: 123.45 step: 67
summary { value { tag: 'foo' simple_value: 89.0 } }
""", event_read)
with self.assertRaises(StopIteration):
next(reader)
def testWriteEventInvalidType(self):
class _Invalid(object):
def __str__(self):
return "Invalid"
with self.assertRaisesRegex(TypeError, "Invalid"):
_pywrap_events_writer.EventsWriter(b"foo").WriteEvent(_Invalid())
if __name__ == "__main__":
googletest.main()
|
py | b4082dcb8545c24e1ee10cd02f89470ab3b26d94 | #
# BSD 3-Clause License
#
# Copyright (c) 2017 xxxx
# All rights reserved.
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ============================================================================
#import torch
import torch.nn as nn
import torchvision.models as models
class EmbeddingNet(nn.Module):
def __init__(self, backbone=None):
super(EmbeddingNet, self).__init__()
if backbone is None:
backbone = models.resnet50(num_classes=128)
self.backbone = backbone
def forward(self, x):
x = self.backbone(x)
x = nn.functional.normalize(x, dim=1)
return x
|
py | b4082e711ea5fb5c569a280a47e5e8b8ea0237ee | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
import django.db.models.deletion
from django.conf import settings
import common.models.base
import uuid
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('chul', '0001_auto_20160318_0338'),
]
operations = [
migrations.CreateModel(
name='CHUServiceLink',
fields=[
('id', models.UUIDField(default=uuid.uuid4, serialize=False, editable=False, primary_key=True)),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('updated', models.DateTimeField(default=django.utils.timezone.now)),
('deleted', models.BooleanField(default=False)),
('active', models.BooleanField(default=True, help_text=b'Indicates whether the record has been retired?')),
('search', models.CharField(max_length=255, null=True, editable=False, blank=True)),
('chu', models.ForeignKey(to='chul.CommunityHealthUnit', on_delete=django.db.models.deletion.PROTECT)),
('created_by', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.PROTECT, default=common.models.base.get_default_system_user_id, to=settings.AUTH_USER_MODEL)),
('service', models.ForeignKey(to='chul.CHUService', on_delete=django.db.models.deletion.PROTECT)),
('updated_by', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.PROTECT, default=common.models.base.get_default_system_user_id, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-updated', '-created'),
'default_permissions': ('add', 'change', 'delete', 'view'),
'abstract': False,
},
),
]
|
py | b4082f165343b52ede57e0c998bb053724c5a597 | import os
import sys
from collections import deque
from logging import getLogger
from tatau_core.models import TaskAssignment
from tatau_core.nn.tatau.model import Model
from tatau_core.nn.tatau.progress import TrainProgress
from tatau_core.utils import configure_logging
from tatau_core.utils.ipfs import IPFS, Downloader
from .session import Session, SessionValue
configure_logging()
logger = getLogger('tatau_core.trainer')
class TrainSession(Session):
train_history = SessionValue()
init_weights_path = SessionValue()
chunk_dirs = SessionValue()
train_weights_path = SessionValue()
def __init__(self, uuid=None):
super(TrainSession, self).__init__(module=__name__, uuid=uuid)
def process_assignment(self, assignment: TaskAssignment, *args, **kwargs):
logger.info('Train Task: {}'.format(assignment))
train_result = assignment.train_result
assert train_result
logger.info('Train data: {}'.format(assignment.train_data))
downloader = Downloader(assignment.task_declaration_id)
downloader.add_to_download_list(assignment.train_data.model_code_ipfs, 'model.py')
initial_weight_file_name = None
if assignment.train_data.weights_ipfs is not None:
initial_weight_file_name = 'initial_weight_{}'.format(assignment.train_data.current_iteration)
downloader.add_to_download_list(assignment.train_data.weights_ipfs, initial_weight_file_name)
else:
logger.info('Initial weights are not set')
batch_size = assignment.train_data.batch_size
epochs = assignment.train_data.epochs
chunk_dirs = deque()
for index, chunk_ipfs in enumerate(assignment.train_data.train_chunks_ipfs):
dir_name = 'chunk_{}'.format(index)
downloader.add_to_download_list(chunk_ipfs, dir_name)
chunk_dirs.append(downloader.resolve_path(dir_name))
downloader.download_all()
logger.info('Dataset downloaded')
self.model_path = downloader.resolve_path('model.py')
self.init_weights_path = None if initial_weight_file_name is None \
else downloader.resolve_path(initial_weight_file_name)
self.chunk_dirs = chunk_dirs
logger.info('Start training')
self._run(batch_size, epochs, assignment.train_data.current_iteration)
train_result.train_history = self.train_history
train_result.loss = train_result.train_history['loss'][-1]
train_result.accuracy = train_result.train_history['acc'][-1]
ipfs = IPFS()
ipfs_file = ipfs.add_file(self.train_weights_path)
logger.info('Result weights_ipfs are uploaded')
train_result.weights_ipfs = ipfs_file.multihash
def main(self):
logger.info('Start training')
batch_size = int(sys.argv[2])
nb_epochs = int(sys.argv[3])
current_iteration = int(sys.argv[4])
model = Model.load_model(path=self.model_path)
init_weights_path = self.init_weights_path
if init_weights_path is not None:
model.load_weights(init_weights_path)
else:
logger.info('Initial weights are not set')
progress = TrainProgress()
train_history = model.train(
chunk_dirs=self.chunk_dirs,
batch_size=batch_size, nb_epochs=nb_epochs,
train_progress=progress, current_iteration=current_iteration
)
train_weights_path = os.path.join(self.base_dir, 'result_weights')
model.save_weights(train_weights_path)
self.train_weights_path = train_weights_path
self.train_history = train_history
if __name__ == '__main__':
logger.info("Start trainer")
TrainSession.run()
|
py | b4083037bcf5ecea4e5504c5f28b73cde6c05a32 | """Dialog agent interface and classes."""
from abc import ABC, abstractmethod
from convlab2.nlu import NLU
from convlab2.dst import DST
from convlab2.policy import Policy
from convlab2.nlg import NLG
from copy import deepcopy
class Agent(ABC):
"""Interface for dialog agent classes."""
@abstractmethod
def __init__(self, name: str):
self.name = name
@abstractmethod
def response(self, observation):
"""Generate agent response given user input.
The data type of input and response can be either str or list of tuples, condition on the form of agent.
Example:
If the agent is a pipeline agent with NLU, DST and Policy, then type(input) == str and
type(response) == list of tuples.
Args:
observation (str or list of tuples):
The input to the agent.
Returns:
response (str or list of tuples):
The response generated by the agent.
"""
pass
@abstractmethod
def init_session(self, **kwargs):
"""Reset the class variables to prepare for a new session."""
pass
class PipelineAgent(Agent):
"""Pipeline dialog agent base class, including NLU, DST, Policy and NLG.
The combination modes of pipeline agent modules are flexible. The only thing you have to make sure is that
the API of agents are matched.
Example:
If agent A is (nlu, tracker, policy), then the agent B should be like (tracker, policy, nlg) to ensure API
matching.
The valid module combinations are as follows:
===== ===== ====== === == ===
NLU DST Policy NLG In Out
===== ===== ====== === == ===
\+ \+ \+ \+ nl nl
o \+ \+ \+ da nl
o \+ \+ o da da
\+ \+ \+ o nl da
o o \+ o da da
===== ===== ====== === == ===
"""
def __init__(self, nlu: NLU, dst: DST, policy: Policy, nlg: NLG, name: str):
"""The constructor of PipelineAgent class.
Here are some special combination cases:
1. If you use word-level DST (such as Neural Belief Tracker), you should set the nlu_model paramater \
to None. The agent will combine the modules automitically.
2. If you want to aggregate DST and Policy as a single module, set tracker to None.
Args:
nlu (NLU):
The natural langauge understanding module of agent.
dst (DST):
The dialog state tracker of agent.
policy (Policy):
The dialog policy module of agent.
nlg (NLG):
The natural langauge generator module of agent.
"""
super(PipelineAgent, self).__init__(name=name)
assert self.name in ['user', 'sys']
self.opponent_name = 'user' if self.name is 'sys' else 'sys'
self.nlu = nlu
self.dst = dst
self.policy = policy
self.nlg = nlg
self.init_session()
self.history = []
def state_replace(self, agent_state):
"""
this interface is reserved to replace all interal states of agent
the code snippet example below is for the scenario when the agent state only depends on self.history and self.dst.state
"""
self.history = deepcopy(agent_state['history'])
self.dst.state = deepcopy(agent_state['dst_state'])
def state_return(self):
"""
this interface is reserved to return all interal states of agent
the code snippet example below is for the scenario when the agent state only depends on self.history and self.dst.state
"""
agent_state = {}
agent_state['history'] = deepcopy(self.history)
agent_state['dst_state'] = deepcopy(self.dst.state)
return agent_state
def response(self, observation):
"""Generate agent response using the agent modules."""
# Note: If you modify the logic of this function, please ensure that it is consistent with deploy.server.ServerCtrl._turn()
if self.dst is not None:
self.dst.state['history'].append([self.opponent_name, observation]) # [['sys', sys_utt], ['user', user_utt],...]
self.history.append([self.opponent_name, observation])
# get dialog act
if self.nlu is not None:
self.input_action = self.nlu.predict(observation, context=[x[1] for x in self.history[:-1]])
else:
self.input_action = observation
self.input_action = deepcopy(self.input_action) # get rid of reference problem
# get state
if self.dst is not None:
if self.name is 'sys':
self.dst.state['user_action'] = self.input_action
else:
self.dst.state['system_action'] = self.input_action
state = self.dst.update(self.input_action)
else:
state = self.input_action
state = deepcopy(state) # get rid of reference problem
# get action
self.output_action = deepcopy(self.policy.predict(state)) # get rid of reference problem
# get model response
if self.nlg is not None:
model_response = self.nlg.generate(self.output_action)
else:
model_response = self.output_action
# print(model_response)
if self.dst is not None:
self.dst.state['history'].append([self.name, model_response])
if self.name is 'sys':
self.dst.state['system_action'] = self.output_action
else:
self.dst.state['user_action'] = self.output_action
self.history.append([self.name, model_response])
return model_response
def is_terminated(self):
if hasattr(self.policy, 'is_terminated'):
return self.policy.is_terminated()
return None
def get_reward(self):
if hasattr(self.policy, 'get_reward'):
return self.policy.get_reward()
return None
def init_session(self, **kwargs):
"""Init the attributes of DST and Policy module."""
if self.nlu is not None:
self.nlu.init_session()
if self.dst is not None:
self.dst.init_session()
if self.name == 'sys':
self.dst.state['history'].append([self.name, 'null'])
if self.policy is not None:
self.policy.init_session(**kwargs)
if self.nlg is not None:
self.nlg.init_session()
self.history = []
def get_in_da(self):
return self.input_action
def get_out_da(self):
return self.output_action
|
py | b40830deb23708b8429be20afcc2c138510b7742 | # Generated by Django 2.1.7 on 2019-05-13 11:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0012_auto_20190513_1047'),
]
operations = [
migrations.AlterField(
model_name='leavemsg',
name='unique_id',
field=models.CharField(default='leavemsg_uuid=c1d06ebe-bb2b-4436-a9f2-ca29b505698b', max_length=128, unique=True, verbose_name='唯一标识符'),
),
migrations.AlterField(
model_name='user',
name='unique_id',
field=models.CharField(default='user_uuid=b42143d2-02ed-4fea-b134-ccfea1329225', max_length=128, unique=True, verbose_name='唯一标识符'),
),
]
|
py | b40831d75f01be3ff459517fc64fb7155568699e | # coding=utf-8
"""
Unit testing the variety of column customizations available
"""
import pytest
import tableformatter as tf
# Make the test results reproducible regardless of what color libraries are installed
tf.TableColors.set_color_library('none')
tf.set_default_grid(tf.AlternatingRowGrid('', ''))
class MyRowObject(object):
"""Simple object to demonstrate using a list of objects with TableFormatter"""
def __init__(self, field1: str, field2: str, field3: int, field4: int):
self.field1 = field1
self.field2 = field2
self._field3 = field3
self.field4 = field4
def get_field3(self):
"""Demonstrates accessing object functions"""
return self._field3
def multiply(row_obj: MyRowObject):
"""Demonstrates an object formatter function"""
return str(row_obj.get_field3() * row_obj.field4)
def int2word(num, separator="-"):
"""Demonstrates a field formatter function
From: https://codereview.stackexchange.com/questions/156590/create-the-english-word-for-a-number
"""
ones_and_teens = {0: "Zero", 1: 'One', 2: 'Two', 3: 'Three',
4: 'Four', 5: 'Five', 6: 'Six', 7: 'Seven',
8: 'Eight', 9: 'Nine', 10: 'Ten', 11: 'Eleven',
12: 'Twelve', 13: 'Thirteen', 14: 'Fourteen',
15: 'Fifteen', 16: 'Sixteen', 17: 'Seventeen',
18: 'Eighteen', 19: 'Nineteen'}
twenty2ninety = {2: 'Twenty', 3: 'Thirty', 4: 'Forty', 5: 'Fifty',
6: 'Sixty', 7: 'Seventy', 8: 'Eighty', 9: 'Ninety', 0: ""}
if 0 <= num < 19:
return ones_and_teens[num]
elif 20 <= num <= 99:
tens, below_ten = divmod(num, 10)
if below_ten > 0:
words = twenty2ninety[tens] + separator + \
ones_and_teens[below_ten].lower()
else:
words = twenty2ninety[tens]
return words
elif 100 <= num <= 999:
hundreds, below_hundred = divmod(num, 100)
tens, below_ten = divmod(below_hundred, 10)
if below_hundred == 0:
words = ones_and_teens[hundreds] + separator + "hundred"
elif below_ten == 0:
words = ones_and_teens[hundreds] + separator + \
"hundred" + separator + twenty2ninety[tens].lower()
else:
if tens > 0:
words = ones_and_teens[hundreds] + separator + "hundred" + separator + twenty2ninety[
tens].lower() + separator + ones_and_teens[below_ten].lower()
else:
words = ones_and_teens[
hundreds] + separator + "hundred" + separator + ones_and_teens[below_ten].lower()
return words
else:
print("num out of range")
@pytest.fixture
def obj_rows():
rows = [MyRowObject('Longer text that will trigger the column wrapping', 'A2', 5, 56),
MyRowObject('B1', 'B2\nB2\nB2', 23, 8),
MyRowObject('C1', 'C2', 4, 9),
MyRowObject('D1', 'D2', 7, 5)]
return rows
def test_wrapped_object_formatter(obj_rows):
columns = (tf.Column('First', width=20, attrib='field1'),
tf.Column('Second', attrib='field2'),
tf.Column('Num 1', width=3, attrib='get_field3'),
tf.Column('Num 2', attrib='field4'),
tf.Column('Multiplied', obj_formatter=multiply))
table = tf.generate_table(obj_rows, columns)
expected = '''
╔══════════════════════╤════════╤═════╤═══════╤════════════╗
║ │ │ Num │ │ ║
║ First │ Second │ 1 │ Num 2 │ Multiplied ║
╠══════════════════════╪════════╪═════╪═══════╪════════════╣
║ Longer text that │ A2 │ 5 │ 56 │ 280 ║
║ will trigger the │ │ │ │ ║
║ column wrapping │ │ │ │ ║
║ B1 │ B2 │ 23 │ 8 │ 184 ║
║ │ B2 │ │ │ ║
║ │ B2 │ │ │ ║
║ C1 │ C2 │ 4 │ 9 │ 36 ║
║ D1 │ D2 │ 7 │ 5 │ 35 ║
╚══════════════════════╧════════╧═════╧═══════╧════════════╝
'''.lstrip('\n')
assert table == expected
def test_wrapped_indent_center_header(obj_rows):
columns = (tf.Column('First', width=20, attrib='field1', wrap_mode=tf.WrapMode.WRAP_WITH_INDENT),
tf.Column('Second', attrib='field2'),
tf.Column('Num 1', width=3, attrib='get_field3', header_halign=tf.ColumnAlignment.AlignCenter),
tf.Column('Num 2', attrib='field4'),
tf.Column('Multiplied', attrib=None, obj_formatter=multiply))
table = tf.generate_table(obj_rows, columns)
expected = '''
╔══════════════════════╤════════╤═════╤═══════╤════════════╗
║ │ │ Num │ │ ║
║ First │ Second │ 1 │ Num 2 │ Multiplied ║
╠══════════════════════╪════════╪═════╪═══════╪════════════╣
║ Longer text that │ A2 │ 5 │ 56 │ 280 ║
║ » will trigger the │ │ │ │ ║
║ » column wrapping │ │ │ │ ║
║ B1 │ B2 │ 23 │ 8 │ 184 ║
║ │ B2 │ │ │ ║
║ │ B2 │ │ │ ║
║ C1 │ C2 │ 4 │ 9 │ 36 ║
║ D1 │ D2 │ 7 │ 5 │ 35 ║
╚══════════════════════╧════════╧═════╧═══════╧════════════╝
'''.lstrip('\n')
assert table == expected
def test_wrapped_custom_indent_header_right_header_top(obj_rows):
columns = (tf.Column('First', width=20, attrib='field1', wrap_mode=tf.WrapMode.WRAP_WITH_INDENT,
wrap_prefix='>>> '),
tf.Column('Second', attrib='field2', cell_halign=tf.ColumnAlignment.AlignCenter),
tf.Column('Num 1', width=3, attrib='get_field3', header_halign=tf.ColumnAlignment.AlignRight),
tf.Column('Num 2', attrib='field4', header_valign=tf.ColumnAlignment.AlignTop),
tf.Column('Multiplied', attrib=None, obj_formatter=multiply))
table = tf.generate_table(obj_rows, columns)
expected = '''
╔══════════════════════╤════════╤═════╤═══════╤════════════╗
║ │ │ Num │ Num 2 │ ║
║ First │ Second │ 1 │ │ Multiplied ║
╠══════════════════════╪════════╪═════╪═══════╪════════════╣
║ Longer text that │ A2 │ 5 │ 56 │ 280 ║
║ >>> will trigger the │ │ │ │ ║
║ >>> column wrapping │ │ │ │ ║
║ B1 │ B2 │ 23 │ 8 │ 184 ║
║ │ B2 │ │ │ ║
║ │ B2 │ │ │ ║
║ C1 │ C2 │ 4 │ 9 │ 36 ║
║ D1 │ D2 │ 7 │ 5 │ 35 ║
╚══════════════════════╧════════╧═════╧═══════╧════════════╝
'''.lstrip('\n')
assert table == expected
def test_truncate_end_custom_padding(obj_rows):
columns = (tf.Column('First', width=20, attrib='field1', wrap_mode=tf.WrapMode.TRUNCATE_END),
tf.Column('Second', attrib='field2', cell_padding=3),
tf.Column('Num 1', width=3, attrib='get_field3'),
tf.Column('Num 2', attrib='field4'),
tf.Column('Multiplied', attrib=None, obj_formatter=multiply))
table = tf.generate_table(obj_rows, columns)
expected = '''
╔══════════════════════╤════════════╤═════╤═══════╤════════════╗
║ │ │ Num │ │ ║
║ First │ Second │ 1 │ Num 2 │ Multiplied ║
╠══════════════════════╪════════════╪═════╪═══════╪════════════╣
║ Longer text that wi… │ A2 │ 5 │ 56 │ 280 ║
║ B1 │ B2 │ 23 │ 8 │ 184 ║
║ │ B2 │ │ │ ║
║ │ B2 │ │ │ ║
║ C1 │ C2 │ 4 │ 9 │ 36 ║
║ D1 │ D2 │ 7 │ 5 │ 35 ║
╚══════════════════════╧════════════╧═════╧═══════╧════════════╝
'''.lstrip('\n')
assert table == expected
def test_truncate_front_custom_padding_cell_align_right(obj_rows):
columns = (tf.Column('First', width=20, attrib='field1', wrap_mode=tf.WrapMode.TRUNCATE_FRONT),
tf.Column('Second', attrib='field2', cell_padding=5, cell_halign=tf.ColumnAlignment.AlignRight),
tf.Column('Num 1', attrib='get_field3'),
tf.Column('Num 2', attrib='field4'),
tf.Column('Multiplied', attrib=None, obj_formatter=multiply))
table = tf.generate_table(obj_rows, columns)
expected = '''
╔══════════════════════╤════════════════╤═══════╤═══════╤════════════╗
║ First │ Second │ Num 1 │ Num 2 │ Multiplied ║
╠══════════════════════╪════════════════╪═══════╪═══════╪════════════╣
║ …the column wrapping │ A2 │ 5 │ 56 │ 280 ║
║ B1 │ B2 │ 23 │ 8 │ 184 ║
║ │ B2 │ │ │ ║
║ │ B2 │ │ │ ║
║ C1 │ C2 │ 4 │ 9 │ 36 ║
║ D1 │ D2 │ 7 │ 5 │ 35 ║
╚══════════════════════╧════════════════╧═══════╧═══════╧════════════╝
'''.lstrip('\n')
assert table == expected
def test_truncate_middle_cell_align_bottom(obj_rows):
columns = (tf.Column('First', width=20, attrib='field1', wrap_mode=tf.WrapMode.TRUNCATE_MIDDLE),
tf.Column('Second', attrib='field2'),
tf.Column('Num 1', attrib='get_field3'),
tf.Column('Num 2', attrib='field4', cell_valign=tf.ColumnAlignment.AlignBottom),
tf.Column('Multiplied', attrib=None, obj_formatter=multiply))
table = tf.generate_table(obj_rows, columns)
expected = '''
╔══════════════════════╤════════╤═══════╤═══════╤════════════╗
║ First │ Second │ Num 1 │ Num 2 │ Multiplied ║
╠══════════════════════╪════════╪═══════╪═══════╪════════════╣
║ Longer t … wrapping │ A2 │ 5 │ 56 │ 280 ║
║ B1 │ B2 │ 23 │ │ 184 ║
║ │ B2 │ │ │ ║
║ │ B2 │ │ 8 │ ║
║ C1 │ C2 │ 4 │ 9 │ 36 ║
║ D1 │ D2 │ 7 │ 5 │ 35 ║
╚══════════════════════╧════════╧═══════╧═══════╧════════════╝
'''.lstrip('\n')
assert table == expected
def test_truncate_hard_field_formatter(obj_rows):
columns = (tf.Column('First', width=20, attrib='field1', wrap_mode=tf.WrapMode.TRUNCATE_HARD),
tf.Column('Second', attrib='field2'),
tf.Column('Num 1', attrib='get_field3'),
tf.Column('Num 2', attrib='field4', formatter=int2word),
tf.Column('Multiplied', attrib=None, obj_formatter=multiply))
table = tf.generate_table(obj_rows, columns)
expected = '''
╔══════════════════════╤════════╤═══════╤═══════════╤════════════╗
║ First │ Second │ Num 1 │ Num 2 │ Multiplied ║
╠══════════════════════╪════════╪═══════╪═══════════╪════════════╣
║ Longer text that wil │ A2 │ 5 │ Fifty-six │ 280 ║
║ B1 │ B2 │ 23 │ Eight │ 184 ║
║ │ B2 │ │ │ ║
║ │ B2 │ │ │ ║
║ C1 │ C2 │ 4 │ Nine │ 36 ║
║ D1 │ D2 │ 7 │ Five │ 35 ║
╚══════════════════════╧════════╧═══════╧═══════════╧════════════╝
'''.lstrip('\n')
assert table == expected
|
py | b4083324ca7ccfc99687a1778cabdb71ede0119a | from django.contrib import admin
from django.contrib.auth import admin as auth_admin
from django.contrib.auth import get_user_model
from sochi_backend.users.forms import UserChangeForm, UserCreationForm
User = get_user_model()
@admin.register(User)
class UserAdmin(auth_admin.UserAdmin):
form = UserChangeForm
add_form = UserCreationForm
fieldsets = (("User", {"fields": ("name",)}),) + auth_admin.UserAdmin.fieldsets
list_display = ["username", "name", "is_superuser"]
search_fields = ["name"]
|
py | b408341c55e25ee869f0831de037044192ce85ba | from abc import ABCMeta
import numpy as np
from deslib.base import BaseDS
from deslib.util.aggregation import (weighted_majority_voting_rule,
majority_voting_rule,
aggregate_proba_ensemble_weighted)
class BaseDES(BaseDS):
"""Base class for a Dynamic Ensemble Selection (DES).
All dynamic ensemble selection techniques should inherit from this class.
Warning: This class should not be instantiated directly, use
derived classes instead.
"""
__metaclass__ = ABCMeta
def __init__(self, pool_classifiers=None, k=7, DFP=False, with_IH=False,
safe_k=None, IH_rate=0.30, mode='selection',
needs_proba=False, random_state=None,
knn_classifier='knn', knne=False, DSEL_perc=0.5):
super(BaseDES, self).__init__(pool_classifiers=pool_classifiers,
k=k,
DFP=DFP,
with_IH=with_IH,
safe_k=safe_k,
IH_rate=IH_rate,
needs_proba=needs_proba,
random_state=random_state,
knn_classifier=knn_classifier,
knne=knne,
DSEL_perc=DSEL_perc)
self.mode = mode
def estimate_competence(self, query, neighbors, distances=None,
predictions=None):
"""Estimate the competence of each base classifier :math:`c_{i}`
the classification of the query sample x.
Returns an array containing the level of competence estimated
for each base classifier. The size of the vector is equals to
the size of the generated_pool of classifiers.
Parameters
----------
query : array of shape (n_samples, n_features)
The test examples
neighbors : array of shape (n_samples, n_neighbors)
Indices of the k nearest neighbors according for each test sample
distances : array of shape (n_samples, n_neighbors)
Distances of the k nearest neighbors according for each test sample
predictions : array of shape (n_samples, n_classifiers)
Predictions of the base classifiers for the test examples.
Returns
-------
competences : array of shape (n_samples, n_classifiers)
Competence level estimated for each base classifier and test
example.
"""
pass
def estimate_competence_from_proba(self, query, neighbors, probabilities,
distances=None):
""" estimate the competence of each base classifier :math:`c_{i}`
the classification of the query sample x, for methods that require
probabilities.
Returns an array containing the level of competence estimated
for each base classifier. The size of the vector is equals to
the size of the generated_pool of classifiers.
Parameters
----------
query : array of shape (n_samples, n_features)
The query sample.
neighbors : array of shape (n_samples, n_neighbors)
Indices of the k nearest neighbors according for each test sample.
distances : array of shape (n_samples, n_neighbors)
Distances of the k nearest neighbors according for each test
sample.
probabilities : array of shape (n_samples, n_classifiers, n_classes)
Probabilities estimates of each base classifier for all samples.
Returns
-------
competences : array = [n_samples, n_classifiers]
Competence level estimated for each base classifier and test
example.
"""
pass
def select(self, competences):
"""Select the most competent classifiers to compose an ensemble for
the classification of the query sample X.
Parameters
----------
competences : array of shape (n_samples, n_classifiers)
Estimated competence level of each base classifier for each test
example.
Returns
-------
selected_classifiers : array of shape (n_samples, n_classifiers)
Boolean matrix containing True if the base classifier is selected.
False otherwise.
"""
pass
def classify_with_ds(self, query, predictions, probabilities=None,
neighbors=None, distances=None, DFP_mask=None):
"""Predicts the label of the corresponding query sample.
If self.mode == "selection", the selected ensemble is combined using
the majority voting rule
If self.mode == "weighting", all base classifiers are used for
classification, however their influence in the final decision are
weighted according to their estimated competence level. The weighted
majority voting scheme is used to combine the decisions of the
base classifiers.
If self.mode == "hybrid", A hybrid Dynamic selection and weighting
approach is used. First an ensemble with the competent base classifiers
are selected. Then, their decisions are aggregated using the weighted
majority voting rule according to its competence level estimates.
Parameters
----------
query : array of shape (n_samples, n_features)
The test examples.
predictions : array of shape (n_samples, n_classifiers)
Predictions of the base classifier for all test examples.
probabilities : array of shape (n_samples, n_classifiers, n_classes)
Probabilities estimates of each base classifier for all test
examples. (For methods that always require probabilities from
the base classifiers).
neighbors : array of shape (n_samples, n_neighbors)
Indices of the k nearest neighbors according for each test sample.
distances : array of shape (n_samples, n_neighbors)
Distances of the k nearest neighbors according for each test
sample.
DFP_mask : array of shape (n_samples, n_classifiers)
Mask containing 1 for the selected base classifier and 0 otherwise.
Returns
-------
predicted_label : array of shape (n_samples)
Predicted class label for each test example.
"""
if query.ndim < 2:
query = query.reshape(1, -1)
if predictions.ndim < 2:
predictions = predictions.reshape(1, -1)
if query.shape[0] != predictions.shape[0]:
raise ValueError(
'The arrays query and predictions must have the same number'
' of samples. query.shape is {}'
'and predictions.shape is {}'.format(query.shape,
predictions.shape))
if self.needs_proba:
competences = self.estimate_competence_from_proba(
query,
neighbors=neighbors,
distances=distances,
probabilities=probabilities)
else:
competences = self.estimate_competence(query,
neighbors=neighbors,
distances=distances,
predictions=predictions)
if self.DFP:
competences = competences * DFP_mask
if self.mode == "selection":
# The selected_classifiers matrix is used as a mask to remove
# the predictions of certain base classifiers.
selected_classifiers = self.select(competences)
votes = np.ma.MaskedArray(predictions, ~selected_classifiers)
predicted_label = majority_voting_rule(votes)
elif self.mode == "weighting":
votes = np.atleast_2d(predictions)
predicted_label = weighted_majority_voting_rule(votes, competences,
np.arange(
self.n_classes_
))
else:
selected_classifiers = self.select(competences)
votes = np.ma.MaskedArray(predictions, ~selected_classifiers)
predicted_label = weighted_majority_voting_rule(votes, competences,
np.arange(
self.n_classes_
))
return predicted_label
def predict_proba_with_ds(self, query, predictions, probabilities,
neighbors=None, distances=None, DFP_mask=None):
"""Predicts the posterior probabilities of the corresponding query.
If self.mode == "selection", the selected ensemble is used to estimate
the probabilities. The average rule is used
to give probabilities estimates.
If self.mode == "weighting", all base classifiers are used for
estimating the probabilities, however their influence in the final
decision are weighted according to their estimated competence level.
A weighted average method is used to give the probabilities estimates.
If self.mode == "Hybrid", A hybrid Dynamic selection and weighting
approach is used. First an ensemble with the competent base classifiers
are selected. Then, their decisions are aggregated using a weighted
average rule to give the probabilities estimates.
Parameters
----------
query : array of shape (n_samples, n_features)
The test examples.
predictions : array of shape (n_samples, n_classifiers)
Predictions of the base classifier for all test examples.
probabilities : array of shape (n_samples, n_classifiers, n_classes)
Probabilities estimates of each base classifier for all samples.
neighbors : array of shape (n_samples, n_neighbors)
Indices of the k nearest neighbors according for each test sample
distances : array of shape (n_samples, n_neighbors)
Distances of the k nearest neighbors according for each test sample
DFP_mask : array of shape (n_samples, n_classifiers)
Mask containing 1 for the selected base classifier and 0 otherwise.
Returns
-------
predicted_proba : array = [n_samples, n_classes]
The probability estimates for all test examples.
"""
if query.shape[0] != probabilities.shape[0]:
raise ValueError(
'The arrays query and predictions must have the same number'
' of samples. query.shape is {}'
'and predictions.shape is {}'.format(query.shape,
predictions.shape))
if self.needs_proba:
competences = self.estimate_competence_from_proba(
query,
neighbors=neighbors,
distances=distances,
probabilities=probabilities)
else:
competences = self.estimate_competence(query,
neighbors=neighbors,
distances=distances,
predictions=predictions)
if self.DFP:
competences = competences * DFP_mask
if self.mode == "selection":
selected_classifiers = self.select(competences)
# Broadcast the selected classifiers mask
# to cover the last axis (n_classes):
selected_classifiers = np.expand_dims(selected_classifiers, axis=2)
selected_classifiers = np.broadcast_to(selected_classifiers,
probabilities.shape)
masked_proba = np.ma.MaskedArray(probabilities,
~selected_classifiers)
predicted_proba = np.mean(masked_proba, axis=1)
elif self.mode == "weighting":
predicted_proba = aggregate_proba_ensemble_weighted(probabilities,
competences)
else:
selected_classifiers = self.select(competences)
# Broadcast the selected classifiers mask
# to cover the last axis (n_classes):
selected_classifiers = np.expand_dims(selected_classifiers, axis=2)
selected_classifiers = np.broadcast_to(selected_classifiers,
probabilities.shape)
masked_proba = np.ma.MaskedArray(probabilities,
~selected_classifiers)
predicted_proba = aggregate_proba_ensemble_weighted(masked_proba,
competences)
return predicted_proba
def _validate_parameters(self):
super(BaseDES, self)._validate_parameters()
if not isinstance(self.mode, str):
raise TypeError(
'Parameter "mode" should be a string.'
' Currently "mode" = {}' .format(type(self.mode)))
if self.mode not in ['selection', 'hybrid', 'weighting']:
raise ValueError(
'Invalid value for parameter "mode".'
' "mode" should be one of these options '
'{selection, hybrid, weighting}')
|
py | b40836ee64639849fd1bdd86b94ba5d45c99fa1e | import pytest
from unittestmock import UnitTestMock
import numpy as np
from cykhash import isin_int64, Int64Set_from, Int64Set_from_buffer
from cykhash import isin_int32, Int32Set_from, Int32Set_from_buffer
from cykhash import isin_float64, Float64Set_from, Float64Set_from_buffer
from cykhash import isin_float32, Float32Set_from, Float32Set_from_buffer
from cykhash import isin_pyobject, PyObjectSet_from, PyObjectSet_from_buffer
ISIN={'int32': isin_int32, 'int64': isin_int64, 'float64' : isin_float64, 'float32' : isin_float32}
FROM_SET={'int32': Int32Set_from, 'int64': Int64Set_from, 'float64' : Float64Set_from, 'float32' : Float32Set_from, 'pyobject' : PyObjectSet_from}
BUFFER_SIZE = {'int32': 'i', 'int64': 'q', 'float64' : 'd', 'float32' : 'f'}
FROM_BUFFER_SET={'int32': Int32Set_from_buffer, 'int64': Int64Set_from_buffer, 'float64' : Float64Set_from_buffer, 'float32' : Float32Set_from_buffer, 'pyobject' : PyObjectSet_from_buffer}
@pytest.mark.parametrize(
"value_type",
['int64', 'int32', 'float64', 'float32', 'pyobject']
)
class TestInt64Set_from(UnitTestMock):
def test_create(self, value_type):
lst=[6,7,8]
s=FROM_SET[value_type](list(lst))
self.assertEqual(len(s), len(lst))
for x in lst:
self.assertTrue(x in s)
import array
@pytest.mark.parametrize(
"value_type",
['int64', 'int32', 'float64', 'float32']
)
class TestBuffer(UnitTestMock):
def test_isin(self, value_type):
s=FROM_SET[value_type]([2,4,6])
a=array.array(BUFFER_SIZE[value_type], range(0,7))
result=array.array('B', [False]*7)
ISIN[value_type](a,s,result)
expected=array.array('B', [False, False, True, False, True, False, True])
self.assertTrue(expected==result)
def test_isin_result_shorter(self, value_type):
s=FROM_SET[value_type]([2,4,6])
a=array.array(BUFFER_SIZE[value_type], range(0,7))
result=array.array('B', [False]*6)
with pytest.raises(ValueError) as context:
ISIN[value_type](a,s,result)
self.assertEqual("Different sizes for query(7) and result(6)", context.value.args[0])
def test_isin_result_longer(self, value_type):
s=FROM_SET[value_type]([2,4,6])
a=array.array(BUFFER_SIZE[value_type], range(0,7))
result=array.array('B', [False]*8)
with pytest.raises(ValueError) as context:
ISIN[value_type](a,s,result)
self.assertEqual("Different sizes for query(7) and result(8)", context.value.args[0])
def test_isin_db_none(self, value_type):
a=array.array(BUFFER_SIZE[value_type], range(0,7))
result=array.array('B', [True]*7)
ISIN[value_type](a,None,result)
expected=array.array('B', [False, False, False, False, False, False, False])
self.assertTrue(expected==result)
def test_isin_nones(self, value_type):
s=FROM_SET[value_type]([2,4,6])
ISIN[value_type](None,s,None)
self.assertTrue(True)
def test_from_buffer(self, value_type):
a=array.array(BUFFER_SIZE[value_type], [6,7,8])
s=FROM_BUFFER_SET[value_type](a)
self.assertEqual(len(s), len(a))
for x in a:
self.assertTrue(x in s)
class TestBufferPyObject(UnitTestMock):
def test_pyobject_isin(self):
s=PyObjectSet_from([2,4,6])
a=np.array(range(0,7), dtype=np.object)
result=array.array('B', [False]*7)
isin_pyobject(a,s,result)
expected=array.array('B', [False, False, True, False, True, False, True])
self.assertTrue(expected==result)
def test_pyobject_from_buffer(self):
a=np.array([6,7,8], dtype=np.object)
s=PyObjectSet_from_buffer(a)
self.assertEqual(len(s), len(a))
for x in a:
self.assertTrue(x in s)
def test_isin_result_shorter(self):
s=PyObjectSet_from([2,4,6])
a=np.array(range(0,7), dtype=np.object)
result=array.array('B', [False]*6)
with pytest.raises(ValueError) as context:
isin_pyobject(a,s,result)
self.assertEqual("Different sizes for query(7) and result(6)", context.value.args[0])
def test_isin_result_longer(self):
s=PyObjectSet_from([2,4,6])
a=np.array(range(0,7), dtype=np.object)
result=array.array('B', [False]*8)
with pytest.raises(ValueError) as context:
isin_pyobject(a,s,result)
self.assertEqual("Different sizes for query(7) and result(8)", context.value.args[0])
def test_isin_db_none(self):
a=np.array(range(0,7), dtype=np.object)
result=array.array('B', [True]*7)
isin_pyobject(a,None,result)
expected=array.array('B', [False, False, False, False, False, False, False])
self.assertTrue(expected==result)
def test_isin_nones(self):
s=PyObjectSet_from([2,4,6])
isin_pyobject(None,s,None)
self.assertTrue(True)
|
py | b408370d7906cbbaee2edee40cc4e0d0b3f656ed | import sys
turn = 'X'
boardDict = {
'A1': ' ', 'A2': ' ', 'A3': ' ',
'B1': ' ', 'B2': ' ', 'B3': ' ',
'C1': ' ', 'C2': ' ', 'C3': ' '
}
def updateScanners():
global columnOneScan
global columnTwoScan
global columnThreeScan
global rowOneScan
global rowTwoScan
global rowThreeScan
global diagonalOneScan
global diagonalTwoScan
columnOneScan = [boardDict['A1'], boardDict['B1'], boardDict['C1']]
columnTwoScan = [boardDict['A2'], boardDict['B2'], boardDict['C2']]
columnThreeScan = [boardDict['A3'], boardDict['B3'], boardDict['C3']]
rowOneScan = [boardDict['A1'], boardDict['A2'], boardDict['A3']]
rowTwoScan = [boardDict['B1'], boardDict['B2'], boardDict['B3']]
rowThreeScan = [boardDict['C1'], boardDict['C2'], boardDict['C3']]
diagonalOneScan = [boardDict['A1'], boardDict['B2'], boardDict['C3']]
diagonalTwoScan = [boardDict['A3'], boardDict['B2'], boardDict['C1']]
def boardPrint(board):
print(' 1 2 3')
print('A |' + board['A1'] + '|' + board['A2'] + '|' + board['A3'] + '|')
print(' -------')
print('B |' + board['B1'] + '|' + board['B2'] + '|' + board['B3'] + '|')
print(' -------')
print('C |' + board['C1'] + '|' + board['C2'] + '|' + board['C3'] + '|')
def checkRow(row):
win = True
# Comparing each element with first item
for point in row:
if turn != point:
win = False
break
return win
def validPoint(pnt):
valid = False
valids = ['A1','A2','A3','B1','B2','B3','C1','C2','C3']
for val in valids:
if pnt == val:
if boardDict[pnt] == ' ':
valid = True
break
return valid
boardPrint(boardDict)
while True:
while True:
point = str(input("Choose your point, " + turn + " | Format (A1,B2,C3): "))
if validPoint(point):
boardDict[point] = str(turn)
boardPrint(boardDict)
updateScanners()
else:
print("That point is already taken and/or is not valid.")
continue
if checkRow(columnOneScan):
print(turn + ' wins!')
break
elif checkRow(columnTwoScan):
print(turn + ' wins!')
break
elif checkRow(columnThreeScan):
print(turn + ' wins!')
break
elif checkRow(rowOneScan):
print(turn + ' wins!')
break
elif checkRow(rowTwoScan):
print(turn + ' wins!')
break
elif checkRow(rowThreeScan):
print(turn + ' wins!')
break
elif checkRow(diagonalOneScan):
print(turn + ' wins!')
break
elif checkRow(diagonalTwoScan):
print(turn + ' wins!')
break
if turn == 'X':
turn = 'O'
else:
turn = 'X'
playAgain = input("Want to play again? | Respond with Y/N: ")
if playAgain.lower() == 'y':
boardDict = {
'A1': ' ', 'A2': ' ', 'A3': ' ',
'B1': ' ', 'B2': ' ', 'B3': ' ',
'C1': ' ', 'C2': ' ', 'C3': ' '
}
continue
else:
sys.exit()
|
py | b408371042d6611023226821bdedf19b1ecf0298 | from django.core.management.base import BaseCommand
from pinax.stripe.actions import plans
class Command(BaseCommand):
help = 'Sync Stripe plans to database.'
def handle(self, *args, **options):
plans.sync_plans()
|
py | b4083861825a14495996bf44b8a271a9eb4a5a1d | from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
import sys
class Window(QWidget):
def __init__(self):
QWidget.__init__(self)
layout = QGridLayout()
self.setLayout(layout)
# create menu
menubar = QMenuBar()
layout.addWidget(menubar, 0, 0)
actionFile = menubar.addMenu("File")
actionFile.addAction("New")
actionFile.addAction("Open")
actionFile.addAction("Save")
actionFile.addSeparator()
actionFile.addAction("Quit")
menubar.addMenu("Edit")
menubar.addMenu("View")
menubar.addMenu("Help")
# add textbox
tbox = QPlainTextEdit()
layout.addWidget(tbox, 1, 0)
app = QApplication(sys.argv)
screen = Window()
screen.show()
sys.exit(app.exec_()) |
py | b40838834c90240cbc32577a6de21a97a746d926 | # -*- coding: utf-8 -*-
u"""zgoubi input file parser.
l:copyright: Copyright (c) 2018 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern import pkcollections
from pykern.pkdebug import pkdp, pkdc, pkdlog
from sirepo.template.line_parser import LineParser
import copy
import re
_COMMAND_INDEX_POS = 110
_CHANGREF_MAP = {
'XS': 'XCE',
'YS': 'YCE',
'ZR': 'ALE',
}
_IGNORE_ELEMENTS = [
'faisceau',
'images',
]
#TODO(pjm): remove when we have updated to latest zgoubi
_NEW_PARTICLE_TYPES = {
'POSITRON': {
'M': 0.5109989461,
'Q': 1.602176487e-19,
'G': 1.159652181e-3,
'Tau': 1e99,
},
}
def parse_file(zgoubi_text, max_id=0):
parser = LineParser(max_id)
lines = zgoubi_text.replace('\r', '').split('\n')
elements = []
# skip first documentation line
title = lines.pop(0)
parser.increment_line_number()
unhandled_elements = {}
current_command = None
for line in lines:
parser.increment_line_number()
line = re.sub(r'\!.*$', '', line)
line = re.sub(r'^\s+', '', line)
line = re.sub(r'\s+$', '', line)
if not line:
continue
keyword = _parse_keyword(line)
if keyword:
if current_command:
_add_command(parser, current_command, elements, unhandled_elements)
if keyword == 'END' or keyword == 'FIN':
current_command = None
break
line = _strip_command_index(line)
current_command = [line.split()]
current_command[0][0] = keyword
else:
line = line.lstrip()
current_command.append(line.split())
assert current_command is None, 'missing END element'
return title, elements, sorted(unhandled_elements.keys())
def _add_command(parser, command, elements, unhandled_elements):
command_type = command[0][0]
if command_type.lower() in _IGNORE_ELEMENTS:
return
method = '_zgoubi_{}'.format(command_type).lower()
if method not in globals():
unhandled_elements[command_type] = True
# replace the element with a zero length drift
command = [['DRIFT', 'DUMMY {}'.format(command_type)], ['0']]
method = '_zgoubi_drift'
el = globals()[method](command)
if el:
if type(el) == list:
elements += el
else:
elements.append(el)
def _parse_command(command, command_def):
res = _parse_command_header(command)
for i in range(len(command_def)):
_parse_command_line(res, command[i + 1], command_def[i])
return res
def _parse_command_header(command):
res = _parse_command_line(pkcollections.Dict({}), command[0], 'type *name *label2')
for f in ('name', 'label2'):
# don't parse line numbers into name or label2
if f in res and re.search(r'^\d+$', res[f]):
del res[f]
return res
def _parse_command_line(element, line, line_def):
for k in line_def.split(' '):
if k[0] == '*':
k = k[1:]
if not len(line):
break
element[k] = line.pop(0)
return element
def _parse_keyword(line):
m = re.match(r"\s*'(\w+)'", line)
if m:
return m.group(1).upper()
return None
def _strip_command_index(line):
# strip the command index if present
if len(line) >= _COMMAND_INDEX_POS:
line = re.sub(r'\s+\d+\s*$', '', line)
return line
def _zgoubi_autoref(command):
i = command[1][0]
assert i == '4', '{}: only AUTOREF 4 is supported for now'.format(i)
return _parse_command(command, [
'I',
'XCE YCE ALE',
])
def _zgoubi_bend(command):
res = _parse_command(command, [
'IL',
'l Sk B1',
'X_E LAM_E W_E',
'NCE C_0 C_1 C_2 C_3 C_4 C_5',
'X_S LAM_S W_S',
'NCS CS_0 CS_1 CS_2 CS_3 CS_4 CS_5',
'XPAS',
'KPOS XCE YCE ALE',
])
assert res['KPOS'] in ('1', '2', '3'), '{}: BEND KPOS not yet supported'.format(res['KPOS'])
return res
def _zgoubi_cavite(command):
iopt = re.sub(r'\..*$', '', command[1][0])
command[1][0] = iopt
if iopt == '0' or iopt == '1':
return _parse_command(command, [
'IOPT',
'L h',
'V',
])
if iopt == '2' or iopt == '3':
return _parse_command(command, [
'IOPT',
'L h',
'V sig_s',
])
if iopt == '7':
return _parse_command(command, [
'IOPT',
'L f_RF',
'V sig_s',
])
if iopt == '10':
return _parse_command(command, [
'IOPT',
'l f_RF *ID',
'V sig_s IOP',
])
assert False, 'unsupported CAVITE: {}'.format(i)
def _zgoubi_changref(command):
if re.search(r'^(X|Y|Z)', command[1][0]):
# convert new format CHANGREF to a series of old format elements
el = _parse_command_header(command)
el['XCE'] = el['YCE'] = el['ALE'] = 0
res = []
for i in range(int(len(command[1]) / 2)):
name = command[1][i * 2]
value = float(command[1][i * 2 + 1])
if value == 0:
continue
if name in _CHANGREF_MAP:
el2 = el.copy()
el2[_CHANGREF_MAP[name]] = value
res.append(el2)
else:
pkdlog('zgoubi CHANGEREF skipping: {}={}', name, value)
return res
res = _parse_command(command, [
'XCE YCE ALE',
])
return res
def _zgoubi_drift(command):
return _parse_command(command, [
'l',
])
def _zgoubi_esl(command):
res = _zgoubi_drift(command)
res['type'] = 'DRIFT'
return res
def _zgoubi_marker(command):
res = _parse_command_header(command)
res['plt'] = '0'
return res
def _zgoubi_multipol(command):
res = _parse_command(command, [
'IL',
'l R_0 B_1 B_2 B_3 B_4 B_5 B_6 B_7 B_8 B_9 B_10',
'X_E LAM_E E_2 E_3 E_4 E_5 E_6 E_7 E_8 E_9 E_10',
'NCE C_0 C_1 C_2 C_3 C_4 C_5',
'X_S LAM_S S_2 S_3 S_4 S_5 S_6 S_7 S_8 S_9 S_10',
'NCS CS_0 CS_1 CS_2 CS_3 CS_4 CS_5',
'R_1 R_2 R_3 R_4 R_5 R_6 R_7 R_8 R_9 R_10',
'XPAS',
'KPOS XCE YCE ALE',
])
assert res['KPOS'] in ('1', '2', '3'), '{}: MULTIPOL KPOS not yet supported'.format(res['KPOS'])
return res
def _zgoubi_objet(command):
res = _parse_command(command, [
'rigidity',
'KOBJ'
])
kobj = res['KOBJ']
del res['KOBJ']
if 'name' in res:
del res['name']
res['type'] = 'bunch'
if kobj == '2' or kobj == '2.1':
coordinates = []
for i in range(4, len(command) - 1):
coord = _parse_command_line({}, command[i], 'Y T Z P X D')
for k in coord:
coord[k] = float(coord[k])
if kobj == '2':
if k in ('Y', 'Z', 'S'):
coord[k] *= 1e-2
elif k in ('T', 'P'):
coord[k] *= 1e-3
coordinates.append(coord)
res.particleCount2 = len(coordinates)
res.method = 'OBJET2.1'
res.coordinates = coordinates
return res
def _zgoubi_mcobjet(command):
kobj = command[2][0]
assert kobj == '3', '{}: only MCOBJET 3 is supported for now'.format(kobj)
res = _parse_command(command, [
'rigidity',
'KOBJ',
'particleCount',
'KY KT KZ KP KX KD',
'Y0 T0 Z0 P0 X0 D0',
'alpha_Y beta_Y emit_Y n_cutoff_Y *n_cutoff2_Y *DY *DT',
'alpha_Z beta_Z emit_Z n_cutoff_Z *n_cutoff2_Z *DZ *DP',
'alpha_X beta_X emit_X n_cutoff_X *n_cutoff2_X',
# 'IR1 IR2 IR3',
])
if 'n_cutoff2_Y' in res and float(res['n_cutoff_Y']) >= 0:
res['DT'] = res['DY']
res['DY'] = res['n_cutoff2_Y']
if 'n_cutoff2_Z' in res and float(res['n_cutoff_Z']) >= 0:
res['DP'] = res['DZ']
res['DZ'] = res['n_cutoff2_Z']
del res['KOBJ']
if 'name' in res:
del res['name']
res['type'] = 'bunch'
return res
def _zgoubi_particul(command):
if re.search(r'^[\-\.0-9]+', command[1][0]):
res = _parse_command(command, [
'M Q G Tau',
])
res['particleType'] = 'Other'
else:
res = _parse_command(command, [
'particleType',
])
if res['particleType'] in _NEW_PARTICLE_TYPES:
res.update(_NEW_PARTICLE_TYPES[res['particleType']])
res['particleType'] = 'Other'
if 'name' in res:
del res['name']
res['type'] = 'particle'
return res
def _zgoubi_quadrupo(command):
return _parse_command(command, [
'IL',
'l R_0 B_0',
'X_E LAM_E',
'NCE C_0 C_1 C_2 C_3 C_4 C_5',
'X_S LAM_S',
'NCS CS_0 CS_1 CS_2 CS_3 CS_4 CS_5',
'XPAS',
'KPOS XCE YCE ALE',
])
def _zgoubi_scaling(command):
command2 = copy.deepcopy(command)
pattern = [
'IOPT NFAM',
]
res = _parse_command(command, pattern)
for idx in range(1, int(res['NFAM']) + 1):
pattern.append('NAMEF{}'.format(idx))
pattern.append('ignore'.format(idx))
pattern.append('SCL{}'.format(idx))
pattern.append('ignore'.format(idx))
res = _parse_command(command2, pattern)
del res['NFAM']
del res['ignore']
return res
def _zgoubi_sextupol(command):
return _zgoubi_quadrupo(command)
def _zgoubi_ymy(command):
return _parse_command_header(command)
|
py | b4083a2096c1001c3be35588ca6b94c5786eb0ef | """
Various accuracy metrics:
* :func:`accuracy`
* :func:`average_accuracy`
* :func:`mean_average_accuracy`
"""
import numpy as np
from catalyst.utils import get_activation_fn
def accuracy(
outputs,
targets,
topk=(1, ),
threshold: float = None,
activation: str = None
):
"""
Computes the accuracy.
It can be used either for:
- multi-class task:
-you can use topk.
-threshold and activation are not required.
-targets is a tensor: batch_size
-outputs is a tensor: batch_size x num_classes
-computes the accuracy@k for the specified values of k.
- OR multi-label task, in this case:
-you must specify threshold and activation
-topk will not be used
(because of there is no method to apply top-k in
multi-label classification).
-outputs, targets are tensors with shape: batch_size x num_classes
-targets is a tensor with binary vectors
"""
activation_fn = get_activation_fn(activation)
outputs = activation_fn(outputs)
if threshold:
outputs = (outputs > threshold).long()
# multi-label classification
if len(targets.shape) > 1 and targets.size(1) > 1:
res = (targets.long() == outputs.long()).sum().float() / np.prod(
targets.shape
)
return [res]
max_k = max(topk)
batch_size = targets.size(0)
if len(outputs.shape) == 1 or outputs.shape[1] == 1:
pred = outputs.t()
else:
_, pred = outputs.topk(max_k, 1, True, True)
pred = pred.t()
correct = pred.eq(targets.long().view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(1.0 / batch_size))
return res
def average_accuracy(outputs, targets, k=10):
"""
Computes the average accuracy at k.
This function computes the average
accuracy at k between two lists of items.
Args:
outputs (list): A list of predicted elements
targets (list): A list of elements that are to be predicted
k (int, optional): The maximum number of predicted elements
Returns:
double: The average accuracy at k over the input lists
"""
if len(outputs) > k:
outputs = outputs[:k]
score = 0.0
num_hits = 0.0
for i, predict in enumerate(outputs):
if predict in targets and predict not in outputs[:i]:
num_hits += 1.0
score += num_hits / (i + 1.0)
if not targets:
return 0.0
return score / min(len(targets), k)
def mean_average_accuracy(outputs, targets, topk=(1, )):
"""
Computes the mean average accuracy at k.
This function computes the mean average accuracy at k between two lists
of lists of items.
Args:
outputs (list): A list of lists of predicted elements
targets (list): A list of lists of elements that are to be predicted
topk (int, optional): The maximum number of predicted elements
Returns:
double: The mean average accuracy at k over the input lists
"""
max_k = max(topk)
_, pred = outputs.topk(max_k, 1, True, True)
targets = targets.data.cpu().numpy().tolist()
actual_list = []
for a in targets:
actual_list.append([a])
targets = actual_list
pred = pred.tolist()
res = []
for k in topk:
ap = np.mean(
[average_accuracy(p, a, k) for a, p in zip(targets, pred)]
)
res.append(ap)
return res
__all__ = ["accuracy", "average_accuracy", "mean_average_accuracy"]
|
py | b4083a279a05d2d0f6c7feac8a4c0ec37bef8a48 | #!/usr/bin/env python3
# ----- ---- --- -- -
# Copyright 2019 Oneiro NA, Inc. All Rights Reserved.
#
# Licensed under the Apache License 2.0 (the "License"). You may not use
# this file except in compliance with the License. You can obtain a copy
# in the file LICENSE in the source distribution or at
# https://www.apache.org/licenses/LICENSE-2.0.txt
# - -- --- ---- -----
import re
from base64 import b64decode
from pathlib import Path
from pprint import pformat
from urllib.parse import urljoin
import requests
import msgpack
SERVICES = "https://s3.us-east-2.amazonaws.com/ndau-json/services.json"
TXS = re.compile(r"TxID\((?P<txid>\d+)\):\s*&(?P<name>\w+)\{\},")
# TX names
tx_file = Path(__file__).parent / Path(
"../vendor/github.com/ndau/ndau/pkg/ndau/transactions.go"
)
TX_NAMES = {}
with tx_file.open() as f:
for line in f:
m = TXS.search(line)
if m:
txid, name = m.group("txid", "name")
TX_NAMES[int(txid)] = name
class Transaction:
def __init__(self, b64_data):
data = b64decode(b64_data)
self.raw = msgpack.loads(data)
self.tx = self.raw[b"Transactable"]
try:
self.name = TX_NAMES[self.raw[b"TransactableID"]]
except KeyError:
self.name = "unknown"
def __str__(self):
return f"{self.name}: {pformat(self.tx)}".strip()
def get_net_url(netname):
netname = netname.lower()
nets = {"local", "dev", "test", "main"}
if netname.endswith("net"):
netname = netname[:-3]
if netname not in nets:
return netname
if netname == "local":
return "http://localhost:3030"
netname += "net"
resp = requests.get(SERVICES)
resp.raise_for_status()
return "https://" + resp.json()["networks"][netname]["nodes"][f"{netname}-0"]["api"]
def get_txs(url, block):
resp = requests.get(urljoin(url, f"/block/height/{block}"))
resp.raise_for_status()
return [Transaction(data) for data in resp.json()["block"]["data"]["txs"]]
def main(args):
url = get_net_url(args.net)
for tx in get_txs(url, args.block):
print(tx)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("block", type=int, help="block number from which to get txs")
parser.add_argument("-n", "--net", default="main", help="net name or ndauapi URL")
args = parser.parse_args()
main(args)
|
py | b4083d9df64925e12634d56c6043cbf442af60d6 | #!/usr/bin/env python
# coding: utf-8
import torch.nn as nn
import numpy as np
from opts import parser
FLAGS = parser.parse_args()
def init_kernel(m):
if isinstance(m, nn.Conv2d):
# Initialize kernels of Conv2d layers as kaiming normal
nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='relu')
# Initialize biases of Conv2d layers at 0
nn.init.zeros_(m.bias)
class resnet8(nn.Module):
"""
Define model architecture.
# Arguments
img_channels: Number of channels in target image
img_width: Target image widht.
img_height: Target image height.
output_dim: Dimension of model output.
"""
def __init__(self, img_channels, output_dim):
super(resnet8, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(in_channels=img_channels,out_channels=32,
kernel_size=[5,5], stride=[2,2], padding=[5//2,5//2]),
nn.MaxPool2d(kernel_size=2))
self.residual_block_1a = nn.Sequential(
nn.BatchNorm2d(32),
nn.ReLU(),
nn.Conv2d(in_channels=32,out_channels=32, kernel_size=[3,3],
stride=[2,2], padding=[3//2,3//2]),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.Conv2d(in_channels=32,out_channels=32, kernel_size=[3,3],
padding=[3//2,3//2]))
self.parallel_conv_1 = nn.Conv2d(in_channels=32,out_channels=32,
kernel_size=[1,1], stride=[2,2],
padding=[1//2,1//2])
self.residual_block_2a = nn.Sequential(
nn.BatchNorm2d(32),
nn.ReLU(),
nn.Conv2d(in_channels=32,out_channels=64, kernel_size=[3,3],
stride=[2,2], padding=[3//2,3//2]),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.Conv2d(in_channels=64,out_channels=64, kernel_size=[3,3],
padding=[3//2,3//2]))
self.parallel_conv_2 = nn.Conv2d(in_channels=32,out_channels=64,
kernel_size=[1,1], stride=[2,2],
padding=[1//2,1//2])
self.residual_block_3a = nn.Sequential(
nn.BatchNorm2d(64),
nn.ReLU(),
nn.Conv2d(in_channels=64,out_channels=128, kernel_size=[3,3],
stride=[2,2], padding=[3//2,3//2]),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Conv2d(in_channels=128,out_channels=128, kernel_size=[3,3],
padding=[3//2,3//2]))
self.parallel_conv_3 = nn.Conv2d(in_channels=64,out_channels=128,
kernel_size=[1,1], stride=[2,2],
padding=[1//2,1//2])
self.output_dim = output_dim
self.last_block = nn.Sequential(
nn.ReLU(),
nn.Dropout2d(),
nn.Linear(6272,self.output_dim))
# Initialize layers exactly as in Keras
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight, gain=nn.init.calculate_gain('relu'))
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
# Initialize kernels of Conv2d layers as kaiming normal
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
self.residual_block_1a.apply(init_kernel)
self.residual_block_2a.apply(init_kernel)
self.residual_block_3a.apply(init_kernel)
def forward(self, x):
x1 = self.layer1(x)
# First residual block
x2 = self.residual_block_1a(x1)
x1 = self.parallel_conv_1(x1)
x3 = x1.add(x2)
# Second residual block
x4 = self.residual_block_2a(x3)
x3 = self.parallel_conv_2(x3)
x5 = x3.add(x4)
# Third residual block
x6 = self.residual_block_3a(x5)
x5 = self.parallel_conv_3(x5)
x7 = x5.add(x6)
out = x7.view(x7.size(0), -1) # Flatten
out = self.last_block(out)
return out
class resnet8_MCDO(nn.Module):
"""
Define model architecture.
# Arguments
img_channels: Number of channels in target image
img_width: Target image widht.
img_height: Target image height.
output_dim: Dimension of model output.
Dropout is here applied after every convolutional layer,
not only after inner-product ones. Dropout will be enabled at test
time. As mentioned by Gal, place Dropout after conv layers and before
MaxPool.
"""
def __init__(self, img_channels, in_height, in_width, output_dim):
super(resnet8_MCDO, self).__init__()
p = FLAGS.dropout
self.layer1 = nn.Sequential(
nn.Conv2d(in_channels=img_channels,out_channels=32,
kernel_size=[5,5], stride=[2,2], padding=[5//2,5//2]),
nn.Dropout2d(p=p),
nn.MaxPool2d(kernel_size=2))
self.residual_block_1a = nn.Sequential(
nn.BatchNorm2d(32),
nn.ReLU(),
nn.Conv2d(in_channels=32,out_channels=32, kernel_size=[3,3],
stride=[2,2], padding=[3//2,3//2]),
nn.Dropout2d(p=p),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.Conv2d(in_channels=32,out_channels=32, kernel_size=[3,3],
padding=[3//2,3//2]),
nn.Dropout2d(p=p))
self.parallel_conv_1 = nn.Sequential(
nn.Conv2d(in_channels=32,out_channels=32, kernel_size=[1,1],
stride=[2,2], padding=[1//2,1//2]),
nn.Dropout2d(p=p))
self.residual_block_2a = nn.Sequential(
nn.BatchNorm2d(32),
nn.ReLU(),
nn.Conv2d(in_channels=32,out_channels=64, kernel_size=[3,3],
stride=[2,2], padding=[3//2,3//2]),
nn.Dropout2d(p=p),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.Conv2d(in_channels=64,out_channels=64, kernel_size=[3,3],
padding=[3//2,3//2]),
nn.Dropout2d(p=p))
self.parallel_conv_2 = nn.Sequential(
nn.Conv2d(in_channels=32,out_channels=64, kernel_size=[1,1],
stride=[2,2], padding=[1//2,1//2]),
nn.Dropout2d(p=p))
self.residual_block_3a = nn.Sequential(
nn.BatchNorm2d(64),
nn.ReLU(),
nn.Conv2d(in_channels=64,out_channels=128, kernel_size=[3,3],
stride=[2,2], padding=[3//2,3//2]),
nn.Dropout2d(p=p),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Conv2d(in_channels=128,out_channels=128, kernel_size=[3,3],
padding=[3//2,3//2]),
nn.Dropout2d(p=p))
self.parallel_conv_3 = nn.Sequential(
nn.Conv2d(in_channels=64,out_channels=128, kernel_size=[1,1],
stride=[2,2], padding=[1//2,1//2]),
nn.Dropout2d(p=p))
self.output_dim = output_dim
self.last_block = nn.Sequential(
nn.ReLU(),
nn.Linear(6272,self.output_dim))
# Initialize layers exactly as in Keras
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight, gain=nn.init.calculate_gain('relu'))
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
# Initialize kernels of Conv2d layers as kaiming normal
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
self.residual_block_1a.apply(init_kernel)
self.residual_block_2a.apply(init_kernel)
self.residual_block_3a.apply(init_kernel)
def forward(self, x):
x1 = self.layer1(x)
# First residual block
x2 = self.residual_block_1a(x1)
x1 = self.parallel_conv_1(x1)
x3 = x1.add(x2)
# Second residual block
x4 = self.residual_block_2a(x3)
x3 = self.parallel_conv_2(x3)
x5 = x3.add(x4)
# Third residual block
x6 = self.residual_block_3a(x5)
x5 = self.parallel_conv_3(x5)
x7 = x5.add(x6)
out = x7.view(x7.size(0), -1) # Flatten
# We model the network to learn also log var
out = self.last_block(out)
return out
class resnet8_MCDO_ale(nn.Module):
"""
Define model architecture.
# Arguments
img_channels: Number of channels in target image
img_width: Target image widht.
img_height: Target image height.
output_dim: Dimension of model output.
Dropout is here applied after every convolutional layer,
not only after inner-product ones. Dropout will be enabled at test
time. As mentioned by Gal, place Dropout after conv layers and before
MaxPool.
"""
def __init__(self, img_channels, in_height, in_width, output_dim):
super(resnet8_MCDO_ale, self).__init__()
p = FLAGS.dropout
self.layer1 = nn.Sequential(
nn.Conv2d(in_channels=img_channels,out_channels=32,
kernel_size=[5,5], stride=[2,2], padding=[5//2,5//2]),
nn.Dropout2d(p=p),
nn.MaxPool2d(kernel_size=2))
self.residual_block_1a = nn.Sequential(
nn.BatchNorm2d(32),
nn.ReLU(),
nn.Conv2d(in_channels=32,out_channels=32, kernel_size=[3,3],
stride=[2,2], padding=[3//2,3//2]),
nn.Dropout2d(p=p),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.Conv2d(in_channels=32,out_channels=32, kernel_size=[3,3],
padding=[3//2,3//2]),
nn.Dropout2d(p=p))
self.parallel_conv_1 = nn.Sequential(
nn.Conv2d(in_channels=32,out_channels=32, kernel_size=[1,1],
stride=[2,2], padding=[1//2,1//2]),
nn.Dropout2d(p=p))
self.residual_block_2a = nn.Sequential(
nn.BatchNorm2d(32),
nn.ReLU(),
nn.Conv2d(in_channels=32,out_channels=64, kernel_size=[3,3],
stride=[2,2], padding=[3//2,3//2]),
nn.Dropout2d(p=p),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.Conv2d(in_channels=64,out_channels=64, kernel_size=[3,3],
padding=[3//2,3//2]),
nn.Dropout2d(p=p))
self.parallel_conv_2 = nn.Sequential(
nn.Conv2d(in_channels=32,out_channels=64, kernel_size=[1,1],
stride=[2,2], padding=[1//2,1//2]),
nn.Dropout2d(p=p))
self.residual_block_3a = nn.Sequential(
nn.BatchNorm2d(64),
nn.ReLU(),
nn.Conv2d(in_channels=64,out_channels=128, kernel_size=[3,3],
stride=[2,2], padding=[3//2,3//2]),
nn.Dropout2d(p=p),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Conv2d(in_channels=128,out_channels=128, kernel_size=[3,3],
padding=[3//2,3//2]),
nn.Dropout2d(p=p))
self.parallel_conv_3 = nn.Sequential(
nn.Conv2d(in_channels=64,out_channels=128, kernel_size=[1,1],
stride=[2,2], padding=[1//2,1//2]),
nn.Dropout2d(p=p))
self.output_dim = output_dim
self.last_block_mean = nn.Sequential(
nn.ReLU(),
nn.Linear(6272,self.output_dim))
self.last_block_var = nn.Sequential(
nn.ReLU(),
nn.Linear(6272,self.output_dim))
# Initialize layers exactly as in Keras
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight, gain=nn.init.calculate_gain('relu'))
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
# Initialize kernels of Conv2d layers as kaiming normal
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
self.residual_block_1a.apply(init_kernel)
self.residual_block_2a.apply(init_kernel)
self.residual_block_3a.apply(init_kernel)
def forward(self, x):
x1 = self.layer1(x)
# First residual block
x2 = self.residual_block_1a(x1)
x1 = self.parallel_conv_1(x1)
x3 = x1.add(x2)
# Second residual block
x4 = self.residual_block_2a(x3)
x3 = self.parallel_conv_2(x3)
x5 = x3.add(x4)
# Third residual block
x6 = self.residual_block_3a(x5)
x5 = self.parallel_conv_3(x5)
x7 = x5.add(x6)
out = x7.view(x7.size(0), -1) # Flatten
# We model the network to learn also log var
out_mean = self.last_block_mean(out)
out_log_var = self.last_block_var(out)
out = {'mean': out_mean,
'log_var': out_log_var}
return out
|
py | b4083dff454fcb26af112b5495a3a22e1b32c197 | import itertools
__all__ = ['apply_mask']
def apply_mask(data, mask):
"""
Apply masking to websocket message.
"""
if len(mask) != 4:
raise ValueError("mask must contain 4 bytes")
return bytes(b ^ m for b, m in zip(data, itertools.cycle(mask)))
|
py | b4083ed9ced7c044153f5a81e03043fb5c3a301d | import numpy as np
import nnfs
nnfs.init()
layer_outputs = [[4.8, 1.21, 2.385],
[8.9, -1.81, 0.2],
[1.41, 1.051, 0.026]]
# E = math.e # 2.71828182846
exp_values = np.exp(layer_outputs)
print(np.sum(layer_outputs, axis=1, keepdims=True))
norm_values = exp_values / np.sum(exp_values, axis=1, keepdims=True)
print(norm_values)
# print(sum(norm_values))
|
py | b4084001603337c63dfefd052eab763b31c6523e | import asyncio
import dataclasses
import logging
import multiprocessing
from concurrent.futures.process import ProcessPoolExecutor
from enum import Enum
from typing import Any, Callable, Dict, List, Optional, Set, Tuple
from apple.consensus.block_header_validation import validate_finished_header_block, validate_unfinished_header_block
from apple.consensus.block_record import BlockRecord
from apple.consensus.blockchain_interface import BlockchainInterface
from apple.consensus.constants import ConsensusConstants
from apple.consensus.difficulty_adjustment import get_next_sub_slot_iters_and_difficulty
from apple.consensus.find_fork_point import find_fork_point_in_chain
from apple.consensus.full_block_to_block_record import block_to_block_record
from apple.consensus.multiprocess_validation import PreValidationResult, pre_validate_blocks_multiprocessing
from apple.types.blockchain_format.sized_bytes import bytes32
from apple.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from apple.types.coin_solution import CoinSolution
from apple.types.header_block import HeaderBlock
from apple.types.unfinished_header_block import UnfinishedHeaderBlock
from apple.util.errors import Err, ValidationError
from apple.util.ints import uint32, uint64
from apple.util.streamable import recurse_jsonify
from apple.wallet.block_record import HeaderBlockRecord
from apple.wallet.wallet_block_store import WalletBlockStore
from apple.wallet.wallet_coin_store import WalletCoinStore
from apple.wallet.wallet_pool_store import WalletPoolStore
from apple.wallet.wallet_transaction_store import WalletTransactionStore
log = logging.getLogger(__name__)
class ReceiveBlockResult(Enum):
"""
When Blockchain.receive_block(b) is called, one of these results is returned,
showing whether the block was added to the chain (extending the peak),
and if not, why it was not added.
"""
NEW_PEAK = 1 # Added to the peak of the blockchain
ADDED_AS_ORPHAN = 2 # Added as an orphan/stale block (not a new peak of the chain)
INVALID_BLOCK = 3 # Block was not added because it was invalid
ALREADY_HAVE_BLOCK = 4 # Block is already present in this blockchain
DISCONNECTED_BLOCK = 5 # Block's parent (previous pointer) is not in this blockchain
class WalletBlockchain(BlockchainInterface):
constants: ConsensusConstants
constants_json: Dict
# peak of the blockchain
_peak_height: Optional[uint32]
# All blocks in peak path are guaranteed to be included, can include orphan blocks
__block_records: Dict[bytes32, BlockRecord]
# Defines the path from genesis to the peak, no orphan blocks
__height_to_hash: Dict[uint32, bytes32]
# all hashes of blocks in block_record by height, used for garbage collection
__heights_in_cache: Dict[uint32, Set[bytes32]]
# All sub-epoch summaries that have been included in the blockchain from the beginning until and including the peak
# (height_included, SubEpochSummary). Note: ONLY for the blocks in the path to the peak
__sub_epoch_summaries: Dict[uint32, SubEpochSummary] = {}
# Stores
coin_store: WalletCoinStore
tx_store: WalletTransactionStore
pool_store: WalletPoolStore
block_store: WalletBlockStore
# Used to verify blocks in parallel
pool: ProcessPoolExecutor
new_transaction_block_callback: Any
reorg_rollback: Any
wallet_state_manager_lock: asyncio.Lock
# Whether blockchain is shut down or not
_shut_down: bool
# Lock to prevent simultaneous reads and writes
lock: asyncio.Lock
log: logging.Logger
@staticmethod
async def create(
block_store: WalletBlockStore,
coin_store: WalletCoinStore,
tx_store: WalletTransactionStore,
pool_store: WalletPoolStore,
consensus_constants: ConsensusConstants,
new_transaction_block_callback: Callable, # f(removals: List[Coin], additions: List[Coin], height: uint32)
reorg_rollback: Callable,
lock: asyncio.Lock,
):
"""
Initializes a blockchain with the BlockRecords from disk, assuming they have all been
validated. Uses the genesis block given in override_constants, or as a fallback,
in the consensus constants config.
"""
self = WalletBlockchain()
self.lock = asyncio.Lock()
self.coin_store = coin_store
self.tx_store = tx_store
self.pool_store = pool_store
cpu_count = multiprocessing.cpu_count()
if cpu_count > 61:
cpu_count = 61 # Windows Server 2016 has an issue https://bugs.python.org/issue26903
num_workers = max(cpu_count - 2, 1)
self.pool = ProcessPoolExecutor(max_workers=num_workers)
log.info(f"Started {num_workers} processes for block validation")
self.constants = consensus_constants
self.constants_json = recurse_jsonify(dataclasses.asdict(self.constants))
self.block_store = block_store
self._shut_down = False
self.new_transaction_block_callback = new_transaction_block_callback
self.reorg_rollback = reorg_rollback
self.log = logging.getLogger(__name__)
self.wallet_state_manager_lock = lock
await self._load_chain_from_store()
return self
def shut_down(self):
self._shut_down = True
self.pool.shutdown(wait=True)
async def _load_chain_from_store(self) -> None:
"""
Initializes the state of the Blockchain class from the database.
"""
height_to_hash, sub_epoch_summaries = await self.block_store.get_peak_heights_dicts()
self.__height_to_hash = height_to_hash
self.__sub_epoch_summaries = sub_epoch_summaries
self.__block_records = {}
self.__heights_in_cache = {}
blocks, peak = await self.block_store.get_block_records_close_to_peak(self.constants.BLOCKS_CACHE_SIZE)
for block_record in blocks.values():
self.add_block_record(block_record)
if len(blocks) == 0:
assert peak is None
self._peak_height = None
return None
assert peak is not None
self._peak_height = self.block_record(peak).height
assert len(self.__height_to_hash) == self._peak_height + 1
def get_peak(self) -> Optional[BlockRecord]:
"""
Return the peak of the blockchain
"""
if self._peak_height is None:
return None
return self.height_to_block_record(self._peak_height)
async def receive_block(
self,
header_block_record: HeaderBlockRecord,
pre_validation_result: Optional[PreValidationResult] = None,
trusted: bool = False,
fork_point_with_peak: Optional[uint32] = None,
additional_coin_spends: List[CoinSolution] = None,
) -> Tuple[ReceiveBlockResult, Optional[Err], Optional[uint32]]:
"""
Adds a new block into the blockchain, if it's valid and connected to the current
blockchain, regardless of whether it is the child of a head, or another block.
Returns a header if block is added to head. Returns an error if the block is
invalid. Also returns the fork height, in the case of a new peak.
"""
if additional_coin_spends is None:
additional_coin_spends = []
block = header_block_record.header
genesis: bool = block.height == 0
if self.contains_block(block.header_hash):
return ReceiveBlockResult.ALREADY_HAVE_BLOCK, None, None
if not self.contains_block(block.prev_header_hash) and not genesis:
return (
ReceiveBlockResult.DISCONNECTED_BLOCK,
Err.INVALID_PREV_BLOCK_HASH,
None,
)
if block.height == 0:
prev_b: Optional[BlockRecord] = None
else:
prev_b = self.block_record(block.prev_header_hash)
sub_slot_iters, difficulty = get_next_sub_slot_iters_and_difficulty(
self.constants, len(block.finished_sub_slots) > 0, prev_b, self
)
if trusted is False and pre_validation_result is None:
required_iters, error = validate_finished_header_block(
self.constants, self, block, False, difficulty, sub_slot_iters
)
elif trusted:
unfinished_header_block = UnfinishedHeaderBlock(
block.finished_sub_slots,
block.reward_chain_block.get_unfinished(),
block.challenge_chain_sp_proof,
block.reward_chain_sp_proof,
block.foliage,
block.foliage_transaction_block,
block.transactions_filter,
)
required_iters, val_error = validate_unfinished_header_block(
self.constants, self, unfinished_header_block, False, difficulty, sub_slot_iters, False, True
)
error = ValidationError(Err(val_error)) if val_error is not None else None
else:
assert pre_validation_result is not None
required_iters = pre_validation_result.required_iters
error = (
ValidationError(Err(pre_validation_result.error)) if pre_validation_result.error is not None else None
)
if error is not None:
return ReceiveBlockResult.INVALID_BLOCK, error.code, None
assert required_iters is not None
block_record = block_to_block_record(
self.constants,
self,
required_iters,
None,
block,
)
heights_changed: Set[Tuple[uint32, Optional[bytes32]]] = set()
# Always add the block to the database
async with self.wallet_state_manager_lock:
async with self.block_store.db_wrapper.lock:
try:
await self.block_store.db_wrapper.begin_transaction()
await self.block_store.add_block_record(header_block_record, block_record, additional_coin_spends)
self.add_block_record(block_record)
self.clean_block_record(block_record.height - self.constants.BLOCKS_CACHE_SIZE)
fork_height, records_to_add = await self._reconsider_peak(
block_record, genesis, fork_point_with_peak, additional_coin_spends, heights_changed
)
for record in records_to_add:
if record.sub_epoch_summary_included is not None:
self.__sub_epoch_summaries[record.height] = record.sub_epoch_summary_included
await self.block_store.db_wrapper.commit_transaction()
except BaseException as e:
self.log.error(f"Error during db transaction: {e}")
if self.block_store.db_wrapper.db._connection is not None:
await self.block_store.db_wrapper.rollback_transaction()
self.remove_block_record(block_record.header_hash)
self.block_store.rollback_cache_block(block_record.header_hash)
await self.coin_store.rebuild_wallet_cache()
await self.tx_store.rebuild_tx_cache()
await self.pool_store.rebuild_cache()
for height, replaced in heights_changed:
# If it was replaced change back to the previous value otherwise pop the change
if replaced is not None:
self.__height_to_hash[height] = replaced
else:
self.__height_to_hash.pop(height)
raise
if fork_height is not None:
self.log.info(f"💰 Updated wallet peak to height {block_record.height}, weight {block_record.weight}, ")
return ReceiveBlockResult.NEW_PEAK, None, fork_height
else:
return ReceiveBlockResult.ADDED_AS_ORPHAN, None, None
async def _reconsider_peak(
self,
block_record: BlockRecord,
genesis: bool,
fork_point_with_peak: Optional[uint32],
additional_coin_spends_from_wallet: Optional[List[CoinSolution]],
heights_changed: Set[Tuple[uint32, Optional[bytes32]]],
) -> Tuple[Optional[uint32], List[BlockRecord]]:
"""
When a new block is added, this is called, to check if the new block is the new peak of the chain.
This also handles reorgs by reverting blocks which are not in the heaviest chain.
It returns the height of the fork between the previous chain and the new chain, or returns
None if there was no update to the heaviest chain.
"""
peak = self.get_peak()
if genesis:
if peak is None:
block: Optional[HeaderBlockRecord] = await self.block_store.get_header_block_record(
block_record.header_hash
)
assert block is not None
replaced = None
if uint32(0) in self.__height_to_hash:
replaced = (self.__height_to_hash[uint32(0)],)
self.__height_to_hash[uint32(0)] = block.header_hash
heights_changed.add((uint32(0), replaced))
assert len(block.additions) == 0 and len(block.removals) == 0
await self.new_transaction_block_callback(block.removals, block.additions, block_record, [])
self._peak_height = uint32(0)
return uint32(0), [block_record]
return None, []
assert peak is not None
if block_record.weight > peak.weight:
# Find the fork. if the block is just being appended, it will return the peak
# If no blocks in common, returns -1, and reverts all blocks
if fork_point_with_peak is not None:
fork_h: int = fork_point_with_peak
else:
fork_h = find_fork_point_in_chain(self, block_record, peak)
# Rollback to fork
self.log.debug(f"fork_h: {fork_h}, SB: {block_record.height}, peak: {peak.height}")
if block_record.prev_hash != peak.header_hash:
await self.reorg_rollback(fork_h)
# Rollback sub_epoch_summaries
heights_to_delete = []
for ses_included_height in self.__sub_epoch_summaries.keys():
if ses_included_height > fork_h:
heights_to_delete.append(ses_included_height)
for height in heights_to_delete:
del self.__sub_epoch_summaries[height]
# Collect all blocks from fork point to new peak
blocks_to_add: List[Tuple[HeaderBlockRecord, BlockRecord, List[CoinSolution]]] = []
curr = block_record.header_hash
while fork_h < 0 or curr != self.height_to_hash(uint32(fork_h)):
fetched_header_block: Optional[HeaderBlockRecord] = await self.block_store.get_header_block_record(curr)
fetched_block_record: Optional[BlockRecord] = await self.block_store.get_block_record(curr)
if curr == block_record.header_hash:
additional_coin_spends = additional_coin_spends_from_wallet
else:
additional_coin_spends = await self.block_store.get_additional_coin_spends(curr)
if additional_coin_spends is None:
additional_coin_spends = []
assert fetched_header_block is not None
assert fetched_block_record is not None
blocks_to_add.append((fetched_header_block, fetched_block_record, additional_coin_spends))
if fetched_header_block.height == 0:
# Doing a full reorg, starting at height 0
break
curr = fetched_block_record.prev_hash
records_to_add: List[BlockRecord] = []
for fetched_header_block, fetched_block_record, additional_coin_spends in reversed(blocks_to_add):
replaced = None
if fetched_block_record.height in self.__height_to_hash:
replaced = self.__height_to_hash[fetched_block_record.height]
self.__height_to_hash[fetched_block_record.height] = fetched_block_record.header_hash
heights_changed.add((fetched_block_record.height, replaced))
records_to_add.append(fetched_block_record)
if fetched_block_record.is_transaction_block:
await self.new_transaction_block_callback(
fetched_header_block.removals,
fetched_header_block.additions,
fetched_block_record,
additional_coin_spends,
)
# Changes the peak to be the new peak
await self.block_store.set_peak(block_record.header_hash)
self._peak_height = block_record.height
if fork_h < 0:
return None, records_to_add
return uint32(fork_h), records_to_add
# This is not a heavier block than the heaviest we have seen, so we don't change the coin set
return None, []
def get_next_difficulty(self, header_hash: bytes32, new_slot: bool) -> uint64:
assert self.contains_block(header_hash)
curr = self.block_record(header_hash)
if curr.height <= 2:
return self.constants.DIFFICULTY_STARTING
return get_next_sub_slot_iters_and_difficulty(self.constants, new_slot, curr, self)[1]
def get_next_slot_iters(self, header_hash: bytes32, new_slot: bool) -> uint64:
assert self.contains_block(header_hash)
curr = self.block_record(header_hash)
if curr.height <= 2:
return self.constants.SUB_SLOT_ITERS_STARTING
return get_next_sub_slot_iters_and_difficulty(self.constants, new_slot, curr, self)[0]
async def pre_validate_blocks_multiprocessing(
self, blocks: List[HeaderBlock], batch_size: int = 4
) -> Optional[List[PreValidationResult]]:
return await pre_validate_blocks_multiprocessing(
self.constants, self.constants_json, self, blocks, self.pool, True, {}, None, batch_size
)
def contains_block(self, header_hash: bytes32) -> bool:
"""
True if we have already added this block to the chain. This may return false for orphan blocks
that we have added but no longer keep in memory.
"""
return header_hash in self.__block_records
def block_record(self, header_hash: bytes32) -> BlockRecord:
return self.__block_records[header_hash]
def height_to_block_record(self, height: uint32, check_db=False) -> BlockRecord:
header_hash = self.height_to_hash(height)
return self.block_record(header_hash)
def get_ses_heights(self) -> List[uint32]:
return sorted(self.__sub_epoch_summaries.keys())
def get_ses(self, height: uint32) -> SubEpochSummary:
return self.__sub_epoch_summaries[height]
def height_to_hash(self, height: uint32) -> Optional[bytes32]:
return self.__height_to_hash[height]
def contains_height(self, height: uint32) -> bool:
return height in self.__height_to_hash
def get_peak_height(self) -> Optional[uint32]:
return self._peak_height
async def warmup(self, fork_point: uint32):
"""
Loads blocks into the cache. The blocks loaded include all blocks from
fork point - BLOCKS_CACHE_SIZE up to and including the fork_point.
Args:
fork_point: the last block height to load in the cache
"""
if self._peak_height is None:
return None
blocks = await self.block_store.get_block_records_in_range(
fork_point - self.constants.BLOCKS_CACHE_SIZE, self._peak_height
)
for block_record in blocks.values():
self.add_block_record(block_record)
def clean_block_record(self, height: int):
"""
Clears all block records in the cache which have block_record < height.
Args:
height: Minimum height that we need to keep in the cache
"""
if height < 0:
return None
blocks_to_remove = self.__heights_in_cache.get(uint32(height), None)
while blocks_to_remove is not None and height >= 0:
for header_hash in blocks_to_remove:
del self.__block_records[header_hash]
del self.__heights_in_cache[uint32(height)] # remove height from heights in cache
if height == 0:
break
height -= 1
blocks_to_remove = self.__heights_in_cache.get(uint32(height), None)
def clean_block_records(self):
"""
Cleans the cache so that we only maintain relevant blocks.
This removes block records that have height < peak - BLOCKS_CACHE_SIZE.
These blocks are necessary for calculating future difficulty adjustments.
"""
if len(self.__block_records) < self.constants.BLOCKS_CACHE_SIZE:
return None
peak = self.get_peak()
assert peak is not None
if peak.height - self.constants.BLOCKS_CACHE_SIZE < 0:
return None
self.clean_block_record(peak.height - self.constants.BLOCKS_CACHE_SIZE)
async def get_block_records_in_range(self, start: int, stop: int) -> Dict[bytes32, BlockRecord]:
return await self.block_store.get_block_records_in_range(start, stop)
async def get_header_blocks_in_range(
self, start: int, stop: int, tx_filter: bool = True
) -> Dict[bytes32, HeaderBlock]:
return await self.block_store.get_header_blocks_in_range(start, stop)
async def get_block_record_from_db(self, header_hash: bytes32) -> Optional[BlockRecord]:
if header_hash in self.__block_records:
return self.__block_records[header_hash]
return await self.block_store.get_block_record(header_hash)
def remove_block_record(self, header_hash: bytes32):
sbr = self.block_record(header_hash)
del self.__block_records[header_hash]
self.__heights_in_cache[sbr.height].remove(header_hash)
def add_block_record(self, block_record: BlockRecord):
self.__block_records[block_record.header_hash] = block_record
if block_record.height not in self.__heights_in_cache.keys():
self.__heights_in_cache[block_record.height] = set()
self.__heights_in_cache[block_record.height].add(block_record.header_hash)
|
py | b40840524d3228d86af60719ac36be58a936c83a |
# parameters.py
"""
Exp 53 - {'Initial_genes': '500', 'Host_mutation_rate': '0.03', 'TE_progeny': '0.00, 0, 0.55, 1, 0.30, 2, 0.15, 3', 'TE_Insertion_Distribution': 'Triangle( pmax=0, pzero=3.0/3.0 )', 'Carrying_capacity': '30', 'TE_excision_rate': '0.1', 'Junk_BP': '14', 'Gene_Insertion_Distribution': 'Triangle( pzero=1.0/3.0, pmax=1 )', 'mutation_effect': '0.10', 'TE_death_rate': '0.0005'}
"""
from TEUtil import *;
# note that "#" indicates a comment
# set the following to True if you want messages printed to the screen
# while the program runs - search for these keywords in TESim.py to see
# what each one prints out
output = {
"SPLAT": False,
"SPLAT FITNESS": False,
"INITIALIZATION": False,
"GENERATION": True,
"HOST EXTINCTION": True,
"TE EXTINCTION": True,
"TRIAL NO": True,
"GENE INIT": False,
"TE INIT": False,
};
TE_Insertion_Distribution = Triangle( pmax=0, pzero=3.0/3.0 );
Gene_Insertion_Distribution = Triangle( pzero=1.0/3.0, pmax=1 );
# Triangle( pmax, pzero ) generates values between pmax and pzero with
# a triangular probability distribution, where pmax is the point of highest
# probability, and pzero is the point of lowest probability
# - you can change the orientation of the triangle by reversing the values
# of pmax and pzero
# Flat() generates values between 0 and 1 with uniform probability
Gene_length = 1000; # use 1000?
TE_length = 1000; # use 1000?
TE_death_rate = 0.0005;
TE_excision_rate = 0.1; # set this to zero for retro transposons
# for retro transposons this is the probability of the given number of progeny
# for dna transposons this is the probability of the given number of progeny
# ___PLUS___ the original re-inserting
TE_progeny = ProbabilityTable( 0.00, 0, 0.55, 1, 0.30, 2, 0.15, 3 );
Initial_genes = 500;
Append_gene = True; # True: when the intialization routine tries to place
# a gene inside another gene, it instead appends it
# at the end of the original gene (use this with small
# amounts of Junk_BP).
# False: when the intialization routine tries to place
# a gene inside another gene, try to place it somewhere
# else again (don't use theis option with samll amounts
# of Junk_BP).
Initial_TEs = 1;
MILLION = 1000000;
Junk_BP = 14 * MILLION;
Host_start_fitness = 1.0;
Host_mutation_rate = 0.03;
Host_mutation = ProbabilityTable( 0.40, lambda fit: 0.0,
0.30, lambda fit: fit - random.random()*0.10,
0.15, lambda fit: fit,
0.15, lambda fit: fit + random.random()*0.10
);
# what happens when a TA hits a gene
Insertion_effect = ProbabilityTable(0.30, lambda fit: 0.0,
0.20, lambda fit: fit - random.random()*0.10,
0.30, lambda fit: fit,
0.20, lambda fit: fit + random.random()*0.10
);
Carrying_capacity = 30;
Host_reproduction_rate = 1; # how many offspring each host has
Host_survival_rate = lambda propfit: min( Carrying_capacity * propfit, 0.95 );
# propfit = proportion of fitness owned by this individual
Maximum_generations = 1500;
Terminate_no_TEs = True; # end simulation if there are no TEs left
# seed = 0;
seed = None; # if seed = None, the random number generator's initial state is
# set "randomly"
save_frequency = 50; # Frequency with with which to save state of experiment
saved = None; # if saved = None then we start a new simulation from scratch
# if saves = string, then we open that file and resume a simulation
|
py | b40840cd20501de85f1b2d51bfaecadf8246c0da | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('videos', '0007_category_videos'),
]
operations = [
migrations.RemoveField(
model_name='category',
name='videos',
),
migrations.AddField(
model_name='video',
name='category',
field=models.ForeignKey(to='videos.Category', null=True),
preserve_default=True,
),
]
|
py | b408422dc6bf4990794795c3c31d089a22261f63 | # -*- coding: utf-8 -*-
import logging
import urllib
from json import dumps, loads
import requests
from flask import Blueprint, current_app, request
from flask_restful import Resource, Api
from jwcrypto import jwk
from DetailedHTTPException import error_handler
from helpers_srv import Helpers, CR_tool, Sequences, api_logging
from signed_requests.signed_request_auth import SignedRequest
debug_log = logging.getLogger("debug")
logger = logging.getLogger("sequence")
api_Sink_blueprint = Blueprint("api_Sink_blueprint", __name__)
api = Api()
api.init_app(api_Sink_blueprint)
sq = Sequences("Service_Components Mgmnt (Sink)")
# import xmltodict
# @api.representation('application/xml')
# def output_xml(data, code, headers=None):
# if isinstance(data, dict):
# xm = {"response": data}
# resp = make_response(xmltodict.unparse(xm, pretty=True), code)
# resp.headers.extend(headers)
# return resp
class Status(Resource):
@error_handler
@api_logging
def get(self):
status = {"status": "running", "service_mode": "Sink"}
return status
class DebugDataFlow(Resource):
def __init__(self):
super(DebugDataFlow, self).__init__()
self.service_url = current_app.config["SERVICE_URL"]
self.own_url = current_app.config["SINK_URL"]
self.operator_url = current_app.config["OPERATOR_URL"]
self.helpers = Helpers(current_app.config)
@error_handler
@api_logging
def get(self, rs_id):
debug_log.info("Got rs_id {} to DebugDataFlow endpoint".format(rs_id))
records = self.helpers.query_db_multiple("select rs_id, cr_id, slr_id, surrogate_id from cr_storage", ())
#rs_id =
debug_log.info("DB query resulted in following results:\n{}".format(records))
for record in records:
rs_id = record[0]
cr_id = record[1]
tool = CR_tool()
tool.cr = self.helpers.get_cr_json(cr_id)
role = tool.get_role()
debug_log.info("Found role {}".format(role))
if role == "Sink":
if record[0] == rs_id:
surrogate_id = record[3]
payload = {"user_id": surrogate_id,
"cr_id": cr_id,
"rs_id": urllib.quote_plus(rs_id)}
# TODO get the url from, config
debug_log.info(dumps(payload, indent=2))
req = requests.post(self.own_url+"/api/1.3/sink_flow/dc", json=payload)
return req.content
class DataFlow(Resource):
def __init__(self):
super(DataFlow, self).__init__()
self.service_url = current_app.config["SERVICE_URL"]
self.operator_url = current_app.config["OPERATOR_URL"]
self.helpers = Helpers(current_app.config)
@error_handler
@api_logging
def post(self): # TODO Make this a GET, is this valid anymore?
def renew_token(operator_url, record_id):
sq.task("Renewing Auth Token.")
token = requests.get(
"{}/api/1.3/cr/auth_token/{}".format(operator_url, record_id)) # TODO Get api path from some config?
debug_log.info("{}, {}, {}, {}".format(token.url, token.reason, token.status_code, token.text))
store_dict = {cr_id: dumps(loads(token.text.encode()))}
self.helpers.storeToken(store_dict)
def fetch_data_request_urls():
params = request.json
debug_log.info(params)
debug_log.info(request.json)
user_id = params["user_id"]
cr_id = params["cr_id"]
rs_id = params["rs_id"]
sq.task("Get data_set_id from POST json")
data_set_id = request.args.get("dataset_id", None)
debug_log.info("data_set_id is ({}), cr_id is ({}), user_id ({}) and rs_id ({})"
.format(data_set_id, cr_id, user_id, rs_id))
sq.task("Create request")
req = {"we want": "data"}
sq.task("Validate CR")
cr = self.helpers.validate_cr(cr_id, surrogate_id=user_id)
sq.task("Validate Request from UI")
distribution_urls = self.helpers.validate_request_from_ui(cr, data_set_id, rs_id)
# Fetch data request urls
# Data request urls fetched.
debug_log.info("Data request urls fetched.")
return cr_id, cr, distribution_urls
cr_id, cr, distribution_urls = fetch_data_request_urls()
sq.task("Validate Authorisation Token")
surrogate_id = cr["cr"]["common_part"]["surrogate_id"]
our_key = self.helpers.get_key()
our_key_pub = our_key["pub"]
tries = 3 # TODO: Get this from config
while True:
try:
aud = self.helpers.validate_authorization_token(cr_id, surrogate_id, our_key_pub)
break
except ValueError as e:
debug_log.exception(e)
renew_token(self.operator_url, cr_id)
if tries == 0:
raise EnvironmentError("Auth token validation failed and retry counter exceeded.")
tries -= 1
except TypeError as e:
debug_log.exception(e)
raise EnvironmentError("Token used too soon, halting.")
# Most verifying and checking below is done in the validate_authorization_token function by jwcrypto
# Fetch Authorisation Token related to CR from data storage by rs_id (cr_id?)
# Check Integrity ( Signed by operator, Operator's public key can be found from SLR)
# Check "Issued" timestamp
# Check "Not Before" timestamp
# Check "Not After" timestamp
# Check that "sub" contains correct public key(Our key.)
# OPT: Token expired
# Get new Authorization token, start again from validation. # TODO: Make these steps work as functions that call the next step.
# Check URL patterns in "aud" field
# Check that fetched distribution urls can be found from "aud" field
# Token validated
debug_log.info("Auth Token Validated.")
# With these two steps Sink has verified that it's allowed to make request.
# Construct request
sq.task("Construct request")
# Select request URL from "aud" field
# Add Authorisation Token to request
# Request constructed.
# Sign request
# Fetch private key pair of public key specified in Authorisation Token's "sub" field.
# Sign with fetched private key
sq.task("Fetch key used to sign request")
our_key_full = jwk.JWK()
our_key_full.import_key(**our_key["key"])
# Add signature to request
# Request signed.
# Request created.
sq.send_to("Service_Components Mgmnt (Source)", "Data Request (PoP stuff)")
# Make Data Request
data = []
for url in distribution_urls:
req = requests.get(url,
auth=SignedRequest(token=aud, sign_method=True, sign_path=True, key=our_key_full, protected=dumps(our_key["prot"])))
if req.ok:
data.append(loads(req.content))
debug_log.info("Made data request and received following data from Source: \n{}"
.format(dumps(loads(req.content), indent=2)))
return {"response_data": data}
api.add_resource(Status, '/init')
api.add_resource(DataFlow, '/dc')
api.add_resource(DebugDataFlow, '/debug_dc/<string:rs_id>')
#api.add_resource(DataFlow, '/user/<string:user_id>/consentRecord/<string:cr_id>/resourceSet/<string:rs_id>')
#"http://service_components:7000/api/1.3/sink_flow/user/95479a08-80cc-4359-ba28-b8ca23ff5572_53af88dc-33de-44be-bc30-e0826db9bd6c/consentRecord/cd431509-777a-4285-8211-95c5ac577537/resourceSet/http%3A%2F%2Fservice_components%3A7000%7C%7C9aebb487-0c83-4139-b12c-d7fcea93a3ad" |
py | b40842a89f86f5bd30fd5feee4ea4f149e8153cf | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Author : Heethesh Vhavle
Email : [email protected]
Version : 1.0.0
Date : Apr 07, 2019
'''
# Handle paths and OpenCV import
from init_paths import *
# External modules
import PIL.Image
# ROS modules
import rospy
from cv_bridge import CvBridge, CvBridgeError
from diagnostic_msgs.msg import DiagnosticStatus
from tf.transformations import (translation_matrix,
quaternion_matrix,
concatenate_matrices,
euler_from_quaternion)
# Global variables
CV_BRIDGE = CvBridge()
########################### Functions ###########################
class FPSLogger:
def __init__(self, name):
self.name = name
self.reset()
def reset(self):
self.fps = None
self.last = None
self.total_time = 0
self.total_frames = 0
def lap(self):
self.last = time.time()
def tick(self, count=1):
self.total_time += time.time() - self.last
self.total_frames += count
self.fps = self.total_frames / self.total_time
def log(self, tick=False):
if tick: self.tick()
print('\033[94m %s FPS:\033[00m \033[93m%.1f\033[00m' % (self.name, self.fps))
def get_log(self, tick=False):
if tick: self.tick()
return '\033[94m %s FPS:\033[00m \033[93m%.1f\033[00m' % (self.name, self.fps)
def pil_image(img):
return PIL.Image.fromarray(img)
def cv_image(img):
return cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
def message_to_cv2(msg, coding='bgr8'):
# Read image using CV bridge
try:
img = CV_BRIDGE.imgmsg_to_cv2(msg, coding)
return img
except CvBridgeError as e:
print(e)
rospy.logerr(e)
return None
def cv2_to_message(img, pub, coding='bgr8'):
# Publish image using CV bridge
if len(img.shape) == 2:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
try:
pub.publish(CV_BRIDGE.cv2_to_imgmsg(img, coding))
except CvBridgeError as e:
print(e)
rospy.logerr(e)
def increase_brightness(img, value=30):
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv)
lim = 255 - value
v[v > lim] = 255
v[v <= lim] += value
final_hsv = cv2.merge((h, s, v))
img = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)
return img
def get_bbox_vertices(pose, dims, scale=None):
'''
Returns:
vertices - 8 * [x, y, z, 1] ndarray
'''
if scale is not None: dims = [scale.x, scale.y, scale.z]
dx, dy, dz = dims[0] / 2.0, dims[1] / 2.0, dims[2]
vertices = [[dx, dy, 0, 1],
[dx, dy, dz, 1],
[dx, -dy, 0, 1],
[dx, -dy, dz, 1],
[-dx, dy, 0, 1],
[-dx, dy, dz, 1],
[-dx, -dy, 0, 1],
[-dx, -dy, dz, 1]]
vertices = np.matmul(pose, np.asarray(vertices).T).T
return vertices
def position_to_numpy(position):
return np.asarray([position.x, position.y, position.z])
def orientation_to_numpy(orientation):
return np.asarray([orientation.x, orientation.y, orientation.z, orientation.w])
def numpy_to_position(numpy_pos, position):
position.position.x = numpy_pos[0]
position.position.y = numpy_pos[1]
position.position.z = numpy_pos[2]
return position
def orientation_to_rpy(orientation):
return euler_from_quaternion(orientation_to_numpy(quaternion))
def quaternion_to_rpy(quaternion):
return euler_from_quaternion(quaternion)
def pose_to_transformation(pose=None, position=None, orientation=None):
if position is None:
position = position_to_numpy(pose.position)
if orientation is None:
orientation = orientation_to_numpy(pose.orientation)
return concatenate_matrices(translation_matrix(position), quaternion_matrix(orientation))
def make_diagnostics_status(name, pipeline, fps, level=DiagnosticStatus.OK):
msg = DiagnosticStatus()
msg.level = DiagnosticStatus.OK
msg.name = name
msg.message = fps
msg.hardware_id = pipeline
return msg
|
py | b4084377db0247a5c4b82a3df54faee9588a3df2 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def cps_85(path):
"""Data from the 1985 Current Population Survey (CPS85)
The Current Population Survey (CPS) is used to supplement census
information between census years. These data consist of a random sample
of persons from the CPS85, with information on wages and other
characteristics of the workers, including sex, number of years of
education, years of work experience, occupational status, region of
residence and union membership.
A data frame with 534 observations on the following variables.
- `wage` wage (US dollars per hour)
- `educ` number of years of education
- `race` a factor with levels `NW` (nonwhite) or `W` (white)
- `sex` a factor with levels `F` `M`
- `hispanic` a factor with levels `Hisp` `NH`
- `south` a factor with levels `NS` `S`
- `married` a factor with levels `Married` `Single`
- `exper` number of years of work experience (inferred from `age`
and `educ`)
- `union` a factor with levels `Not` `Union`
- `age` age in years
- `sector` a factor with levels `clerical` `const` `manag`
`manuf` `other` `prof` `sales` `service`
Data are from http://lib.stat.cmu.edu/DASL.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `cps_85.csv`.
Returns:
Tuple of np.ndarray `x_train` with 534 rows and 11 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'cps_85.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/mosaicData/CPS85.csv'
maybe_download_and_extract(path, url,
save_file_name='cps_85.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
|
py | b40843f130f2f063c8878448313396141879d58c | # Copyright (C) 2018 Garth N. Wells
#
# SPDX-License-Identifier: MIT
"""This module provides a model for a monitoring station, and tools
for manipulating/modifying station data
"""
class MonitoringStation:
"""This class represents a river level monitoring station"""
def __init__(self, station_id, measure_id, label, coord, typical_range,
river, town, threat='Unavailable'):
self.station_id = station_id
self.measure_id = measure_id
# Handle case of erroneous data where data system returns
# '[label, label]' rather than 'label'
self.name = label
if isinstance(label, list):
self.name = label[0]
self.coord = coord
self.typical_range = typical_range
self.river = river
self.town = town
self.latest_level = None
self.threat = threat
def __repr__(self):
d = "Station name: {}\n".format(self.name)
d += " id: {}\n".format(self.station_id)
d += " measure id: {}\n".format(self.measure_id)
d += " coordinate: {}\n".format(self.coord)
d += " town: {}\n".format(self.town)
d += " river: {}\n".format(self.river)
d += " typical range: {}".format(self.typical_range)
return d
def typical_range_consistent(self):
if not self.typical_range:
return False
else:
low, high = self.typical_range
return low < high
def relative_water_level(self):
if self.typical_range_consistent():
try:
relative_level = (self.latest_level - self.typical_range[0]) / (
self.typical_range[1] - self.typical_range[0])
except:
relative_level = None
else:
relative_level = None
return relative_level
def set_threat(self, threat):
self.threat = threat
def inconsistent_typical_range_stations(stations):
inconsistent_stations = list()
for station in stations:
if not station.typical_range_consistent():
inconsistent_stations.append(station)
return inconsistent_stations
|
py | b40846086fa04ae02e9726b2cd0f941235eb06e2 | import numpy as np
import pdb
class Resampling:
"""
References: Thrun, Sebastian, Wolfram Burgard, and Dieter Fox. Probabilistic robotics. MIT press, 2005.
[Chapter 4.3]
"""
def __init__(self):
"""
TODO : Initialize resampling process parameters here
"""
def multinomial_sampler(self, X_bar):
"""
param[in] X_bar : [num_particles x 4] sized array containing [x, y, theta, wt] values for all particles
param[out] X_bar_resampled : [num_particles x 4] sized array containing [x, y, theta, wt] values for resampled set of particles
"""
"""
TODO : Add your code here
"""
return X_bar_resampled
def low_variance_sampler(self, X_bar):
"""
param[in] X_bar : [num_particles x 4] sized array containing [x, y, theta, wt] values for all particles
param[out] X_bar_resampled : [num_particles x 4] sized array containing [x, y, theta, wt] values for resampled set of particles
"""
"""
TODO : Add your code here
"""
return X_bar_resampled
if __name__ == "__main__":
pass |
py | b4084645001a2af9ccd8854fbab4d0750d1b2377 | # Copyright (c) 2015, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import unittest
from mixbox.exceptions import ignored
def _divide_by_zero():
return 1/0
class TestContextManagers(unittest.TestCase):
def test_without_context_manager(self):
self.assertRaises(ZeroDivisionError, _divide_by_zero)
def test_with_context_manager(self):
with ignored(ZeroDivisionError):
_divide_by_zero()
# This should never be reached, since the context manager should
# exit after the error above.
raise AssertionError()
|
py | b40846f15b9969ba636e5e764b5d5fa85bd3bb08 | # Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//div[@class='block-pack']/ul[@class='block-top']/li[6]/h1",
'price' : "//table[@class='product_details']//tr[3]/td/span | //table[@class='product_details']//tr[4]/td/span",
'category' : "//div[@class='block-pack']/ul[@class='block-top']/li/a",
'description' : "//div[@class='block-ct']/div[@class='text-info']",
'images' : "//div[@class='product_thumbnail']/a/div[@class='zoomPad']/img/@src",
'canonical' : "",
'base_url' : "",
'brand' : ""
}
name = 'dientuchinhhang.com'
allowed_domains = ['dientuchinhhang.com']
start_urls = ['http://dientuchinhhang.com']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = []
rules = [
Rule(LinkExtractor(), 'parse_item'),
Rule(LinkExtractor(), 'parse'),
#Rule(LinkExtractor(), 'parse_item_and_links'),
]
|
py | b4084779127cc058dfdd2f16af35c0c774022251 | # -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.4.2
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Particle in a box with two finite-potential external walls and a barrier between them
# ### Authors:
# * [Vinícius Wilian D. Cruzeiro](https://scholar.google.com/citations?user=iAK04WMAAAAJ). E-mail: [email protected]
# * Xiang Gao. E-mail: [email protected]
# * [Valeria D. Kleiman](http://kleiman.chem.ufl.edu/). E-mail: [email protected]
#
# Department of Chemistry
#
# Physical Chemistry Division
#
# University of Florida
#
# P.O. Box 117200
#
# Gainesville, FL 32611-7200
#
# United States
# **Instructions:**
# * The reader should follow this notebook in the order that it is presented, executing code cells in consecutive order.
# * In order to execute a cell you may click on the cell and click the *PLAY* button, press *Shift+Enter*, or got to *Cell-->Run cells*. The user may also execute all cells at once by clicking on *Cell --> Run All* at the toolbar above.
# * **Important:** Some cells **are only going to execute after the user enters input values in the corresponding boxes**.
from IPython.display import HTML
HTML('''<script>
code_show=true;
function code_toggle() {
if (code_show){
$('div.input').hide();
} else {
$('div.input').show();
}
code_show = !code_show
}
$( document ).ready(code_toggle);
</script>
The raw code for this IPython notebook is by default hidden for easier reading. As you scroll through code cell,
they appear as empty cells
with a blue right-side edge
To toggle on/off the raw code, click <a href="javascript:code_toggle()">here</a>.''')
# ### Libraries used in this notebook:
# On the next cell we are going to import the libraries used in this notebook as well as call some important functions.
import matplotlib as mpl # matplotlib library for plotting and visualization
import matplotlib.pylab as plt # matplotlib library for plotting and visualization
import numpy as np #numpy library for numerical manipulation, especially suited for data arrays
from IPython.display import HTML # Importing function necessary to display the animation videos on the screen
# In the next cell we are shutting down eventual warnings displayed by IPython. This cell is optional.
import warnings
warnings.filterwarnings('ignore')
# Executing the next cell prints on the screen the versions of IPython, Python and its libraries on your computer. Please check if the versions are up-to-date to facilitate a smooth running of the program.
import sys # checking the version of Python
import IPython # checking the version of IPython
print("Python version = {}".format(sys.version))
print("IPython version = {}".format(IPython.__version__))
print("Matplotlib version = {}".format(plt.__version__))
print("Numpy version = {}".format(np.__version__))
# ### Special calls:
# The next cell configures `matplotlib` to show figures embedded within the cells it was executed from, instead of opening a new window for each figure.
# %matplotlib inline
# ## Approaching the problem
# We can extend the calculations for the particle in a finite potential box (presented in a previous notebook) and apply it to a particle contained within two boxes intermediated by a barrier, all with independent finite potentials. In this problem, not only the height of the potentials is important, but the distance between them is critical to the understanding of the behavior of hte particle.
#
# This is the potential $V(x)$ we are going to consider:
from IPython.display import Image
Image(filename='particle_in_two_finite_boxes_diagram.png')
# Or mathematically:
#
# $V(x) =
# \left\{
# \begin{array}{lll}
# I: & V_o & \; \mbox{if } \; x \leq -\left(\frac{d}{2}+ L \right) \\
# II: & 0 & \; \mbox{if}\; -\left(\frac{d}{2} +L\right) \leq x \leq -\frac{d}{2} \\
# III: & V_1 & \; \mbox{if }\; -\frac{d}{2} \leq x \leq \frac{d}{2} \\
# IV: & 0 & \; \mbox{if } \;\frac{d}{2} \leq x \leq \frac{d}{2}+L \\
# V: & V_o & \; \mbox{if } \; x \geq \frac{d}{2}+ L
# \end{array}
# \right.$
#
# For simplicity, **we will only consider the case in which $V_1 \geq V_o$**.
# As it was the case for a single box, the interesting situation is when $E \leq V_o$ and we look for the bound states. For energies larger than $V_o$ and $V_1$ the particle will be free.
#
# Since the potential is even $[V(-x) = V(x)]$, the wavefunctions can be choosen to be even [$\psi(-x) = \psi(x)$] or odd $[\psi(-x) = -\psi(x)]$. Applying the first constrain, in which we consider that the wavefunctions must be finite, leads to the solutions for the Schrödinger equation for $E \leq V_o$ (bounded states) as:
#
# **Solutions: $\psi(x) = \pm \psi(-x)$**
# $$\begin{array} {lll}
# \mbox{For } I: & \psi(x) = \pm\; De^{\alpha_o x} \\
# \mbox{For } II: & \psi(x) = \pm \left[B\cos(kx) + C\sin(kx)\right] \\
# \mbox{For } III: & \psi(x) = A\left(e^{\alpha_1 x}\pm e^{-\alpha_1 x}\right) \\
# \mbox{For } IV: & \psi(x) = B\cos(kx) + C\sin(kx) \\
# \mbox{For } V: & \psi(x) = De^{-\alpha_o x} \\
# \end{array} \tag{1}$$
#
# where the $\pm $ corresponds to the even or odd solutions, respectively. As before, the wavefunctions and their first derivatives must be continuous. In addition, at the boundaries between regions, the wavefucntions must have the same value.
#
# After imposing the continuity and boundary conditions we reach the following relation for the allowed energy values:
#
# **Even solutions:**
# $$\frac{\alpha_1\tanh\left(\alpha_1\frac{d}{2}\right)+k\tan\left(k\frac{d}{2}\right)}{k-\alpha_1\tanh\left(\alpha_1\frac{d}{2}\right)\tan\left(k\frac{d}{2}\right)}=\frac{-\alpha_o+k\tan\left[k\left(\frac{d}{2}+L\right)\right]}{k+\alpha_o\tan\left[k\left(\frac{d}{2}+L\right)\right]} \tag{2}$$
#
#
#
# **Odd solutions:**
#
# $$\frac{k\tanh\left(\alpha_1\frac{d}{2}\right)-\alpha_1\tan\left(k\frac{d}{2}\right)}{\alpha_1+k\tanh\left(\alpha_1\frac{d}{2}\right)\tan\left(k\frac{d}{2}\right)}=\frac{k+\alpha_o\tan\left[k\left(\frac{d}{2}+L\right)\right]}{-\alpha_o+k\tan\left[k\left(\frac{d}{2}+L\right)\right]} \tag{3}$$
#
# where:
#
# $$\alpha_o = \frac{\sqrt{2m(V_o-E)}}{\hbar} \hspace 3cm \alpha_1 = \frac{\sqrt{2m(V_1-E)}}{\hbar} \hspace 3cm k = \frac{\sqrt{2mE}}{\hbar}$$
#
# The allowed solutions will be those corresponding to energies that obey equations 2 and 3.
# Once we find the allowed energies, we can go back and obtain the wavefunctions with the proper coefficients.
# ## Finding the allowed Energies graphically
# +
# Reading the input variables from the user and making sure V1 > Vo
Vo = abs(float(input("Enter the value for Vo (in eV) = ")))
L = abs(float(input("Enter the value for L (box size, in Angstroms) = ")))
V1 = 0.9*Vo
while(V1<Vo):
V1 = float(input("Enter the value for V1 (V1 >= Vo, in eV) = "))
if(V1<Vo):
print ("V1 cannot be higher than Vo. Please enter the value for V1 again.\n")
d = abs(float(input("Enter the value for d (barrier size, in Angstroms) = ")))
val = np.sqrt(2.0*9.10938356e-31*1.60217662e-19)*1e-10/(1.05457180013e-34)
# equal to sqrt(2m_e (kg)* (Joules/eV)* 1 (m/A)/hbar (in J.sec)
# Defining functions that come from the energy expression
def f0(E):
var = -np.sqrt(Vo-E)+np.sqrt(E)*np.tan(np.sqrt(E)*val*(d/2.0+L))
var = var/(np.sqrt(E)+np.sqrt(Vo-E)*np.tan(np.sqrt(E)*val*(d/2.0+L)))
return var
def f1(E):
var = np.sqrt(V1-E)*np.tanh(d*np.sqrt(V1-E)*val/2.0)+np.sqrt(E)*np.tan(d*np.sqrt(E)*val/2.0)
var = var/(np.sqrt(E)-np.sqrt(V1-E)*np.tanh(d*np.sqrt(V1-E)*val/2.0)*np.tan(d*np.sqrt(E)*val/2.0))
return var
def f2(E):
var = np.sqrt(E)+np.sqrt(Vo-E)*np.tan(np.sqrt(E)*val*(d/2.0+L))
var = var/(np.sqrt(E)*np.tan(np.sqrt(E)*val*(d/2.0+L))-np.sqrt(Vo-E))
return var
def f3(E):
var = np.sqrt(E)*np.tanh(d*np.sqrt(V1-E)*val/2.0)-np.sqrt(V1-E)*np.tan(d*np.sqrt(E)*val/2.0)
var = var/(np.sqrt(V1-E)+np.sqrt(E)*np.tanh(d*np.sqrt(V1-E)*val/2.0)*np.tan(d*np.sqrt(E)*val/2.0))
return var
# Defining maximum values to be displayed on the y axes
maxval = 0.0
for n in range(10):
if (abs(f1(n*Vo/10.0))>maxval and abs(f1(n*Vo/10.0))!=float('inf')): maxval = abs(f1(n*Vo/10.0))
if (np.sqrt(Vo)*1.9>maxval):
ymax1 = 1.9*np.sqrt(Vo)
else:
ymax1 = maxval
maxval = 0.0
for n in range(10):
if (abs(f3(n*Vo/10.0))>maxval and abs(f3(n*Vo/10.0))!=float('inf')): maxval = abs(f3(n*Vo/10.0))
if (np.sqrt(Vo)*1.9>maxval):
ymax2 = 1.9*np.sqrt(Vo)
else:
ymax2 = maxval
# Generating the wavefunction graph
plt.rcParams.update({'font.size': 18, 'font.family': 'STIXGeneral', 'mathtext.fontset': 'stix'})
fig, axes = plt.subplots(1, 2, figsize=(13,4))
axes[0].axis([0.0,Vo,-ymax1,ymax1])
axes[0].set_xlabel(r'$E$ (eV)')
axes[0].set_ylabel(r'')
axes[0].set_title('Even solutions')
axes[1].axis([0.0,Vo,-ymax2,ymax2])
axes[1].set_xlabel(r'$E$ (eV)')
axes[1].set_ylabel(r'')
axes[1].set_title('Odd solutions')
E_even = np.linspace(0.0, Vo, 100000)
E_odd = np.linspace(0.0, Vo, 100000)
# Removing discontinuity points
for n in range(100000):
if abs(np.sqrt(E_even[n])+np.sqrt(Vo-E_even[n])*np.tan(np.sqrt(E_even[n])*val*(d/2.0+L)))<0.01: E_even[n] = np.nan
if abs(np.sqrt(E_even[n])-np.sqrt(V1-E_even[n])*np.tanh(d*np.sqrt(V1-E_even[n])*val/2.0)*np.tan(d*np.sqrt(E_even[n])*val/2.0))<0.01: E_even[n] = np.nan
if abs(np.sqrt(E_odd[n])*np.tan(np.sqrt(E_odd[n])*val*(d/2.0+L))-np.sqrt(Vo-E_odd[n]))<0.01: E_odd[n] = np.nan
if abs(np.sqrt(V1-E_odd[n])+np.sqrt(E_odd[n])*np.tanh(d*np.sqrt(V1-E_odd[n])*val/2.0)*np.tan(d*np.sqrt(E_odd[n])*val/2.0))<0.01: E_odd[n] = np.nan
# Plotting the curves and setting the labelsaxes[0].plot(E_even, f0(E_even), label=r"$\frac{-\alpha_o+k\tan\left[k\left(\frac{d}{2}+a\right)\right]}{k+\alpha_o\tan\left[k\left(\frac{d}{2}+a\right)\right]}$", color="blue")
axes[0].plot(E_even, f0(E_even), label=r"$\frac{-\alpha_o+k\tan\left[k\left(\frac{d}{2}+L\right)\right]}{k+\alpha_o\tan\left[k\left(\frac{d}{2}+L\right)\right]}$", color="blue")
axes[0].plot(E_even, f1(E_even), label=r"$\frac{\alpha_1\tanh\left(\alpha_1\frac{d}{2}\right)+k\tan\left(k\frac{d}{2}\right)}{k-\alpha_1\tanh\left(\alpha_1\frac{d}{2}\right)\tan\left(k\frac{d}{2}\right)}$", color="red")
axes[1].plot(E_odd, f2(E_odd), label=r"$\frac{k+\alpha_o\tan\left[k\left(\frac{d}{2}+L\right)\right]}{-\alpha_o+k\tan\left[k\left(\frac{d}{2}+L\right)\right]}$", color="blue")
axes[1].plot(E_odd, f3(E_odd), label=r"$\frac{k\tanh\left(\alpha_1\frac{d}{2}\right)-\alpha_1\tan\left(k\frac{d}{2}\right)}{\alpha_1+k\tanh\left(\alpha_1\frac{d}{2}\right)\tan\left(k\frac{d}{2}\right)}$", color="red")
# Chosing the positions of the legends
axes[0].legend(bbox_to_anchor=(0.05, -0.2), loc=2, borderaxespad=0.0)
axes[1].legend(bbox_to_anchor=(0.05, -0.2), loc=2, borderaxespad=0.0)
# Show the plots on the screen once the code reaches this point
plt.show()
# -
# The allowed values would then be the values of $E$ in which the two curves cross.
# Here are some question to think about:
# * **Q1:** How many bound states are possible?
# * **Q2:** How many bound states are even and how many are odd?
# * **Q3:** Is the ground state described by an even or an odd wavefunction?
# * **Q4:** Can you read accuretely the allowed values of energy from these graphs?
#
# The allowed values of $E$ can be found numerically, yielding:
print ("The allowed bounded energies are:")
# We want to find the values of E in which f_even and f_odd are zero
f_even = lambda E : f0(E)-f1(E)
f_odd = lambda E : f2(E)-f3(E)
E_old = 0.0
f_even_old = f_even(0.0)
f_odd_old = f_odd(0.0)
n_even = 1
n_odd = 1
E_vals = np.zeros(999)
n = 1
# Here we loop from E = 0 to E = Vo seeking roots
for E in np.linspace(0.0, Vo, 200000):
f_even_now = f_even(E)
# If the difference is zero or if it changes sign then we might have passed through a root
if (f_even_now == 0.0 or f_even_now/f_even_old < 0.0):
# If the old values of f are not close to zero, this means we didn't pass through a root but
# through a discontinuity point
if (abs(f_even_now)<1.0 and abs(f_even_old)<1.0):
E_vals[n-1] = (E+E_old)/2.0
print (" State #%3d (Even wavefunction): %9.4f eV, %13.6g J" % (n,E_vals[n-1],E_vals[n-1]*1.60217662e-19))
n += 1
n_even += 1
f_odd_now = f_odd(E)
# If the difference is zero or if it changes sign then we might have passed through a root
if (f_odd_now == 0.0 or f_odd_now/f_odd_old < 0.0) and (E>0.0):
# If the old values of f are not close to zero, this means we didn't pass through a root but
# through a discontinuity point
if (abs(f_odd_now)<1.0 and abs(f_odd_old)<1.0):
E_vals[n-1] = (E+E_old)/2.0
print (" State #%3d (Odd wavefunction): %9.4f eV, %13.6g J" % (n,E_vals[n-1],E_vals[n-1]*1.60217662e-19))
n += 1
n_odd += 1
E_old = E
f_even_old = f_even_now
f_odd_old = f_odd_now
nstates = n-1
print ("\nTHERE ARE %3d POSSIBLE BOUNDED ENERGIES" % nstates)
# Since the lowest energy corresponds to the even solution of the wavefunction, and the states are labeled by their quantum number $(n=1,2...n_{max})$, the even solutions will correspond to odd quantum numbers and the odd solutions will correspond to even quantum numbers.
# **Plotting an Energy Diagram helps us to see the energy separation between the states.**
# Generating the energy diagram
fig, ax = plt.subplots(figsize=(8,14))
ax.spines['right'].set_color('none')
ax.yaxis.tick_left()
ax.spines['bottom'].set_color('none')
ax.axes.get_xaxis().set_visible(False)
ax.spines['top'].set_color('none')
ax.axis([0.0,20.0,0.0,1.1*Vo])
ax.set_ylabel(r'$E_n$ (eV)')
for n in range(1,nstates+1):
str1="$n = "+str(n)+r"$, $E_{"+str(n)+r"} = %.3f$ eV"%(E_vals[n-1])
if(n%2==1):
ax.text(0.5, E_vals[n-1]-0.005*Vo, str1, fontsize=16, color="#ff4d4d")
ax.hlines(E_vals[n-1], 7.2, 18.3, linewidth=1.8, linestyle='--', color="#ff4d4d")
else:
ax.text(18.5, E_vals[n-1]-0.005*Vo, str1, fontsize=16, color="#800000")
ax.hlines(E_vals[n-1], 7.2, 18.3, linewidth=1.8, linestyle='--', color="#800000")
str1="$V_o = %.3f$ eV"%(Vo)
#ax.text(18.5, Vo-0.01*Vo, str1, fontsize=16, color="blue")
ax.text(10, Vo+0.01*Vo, str1, fontsize=16, color="blue")
ax.hlines(Vo, 7.2, 18.3, linewidth=1.8, linestyle='-', color="blue")
ax.hlines(0.0, 0.0, 20.0, linewidth=1.8, linestyle='-', color="black")
plt.title("Energy Levels", fontsize=30)
plt.show()
# We can now plug the values of $E$ back into the wavefunction expressions and plot the **wavefunctions** and the corresponding **probability densities**.
# +
print ("\nThe Wavefunctions are:")
# Drawing the backgroung graph
fig, axes = plt.subplots(1, 2, figsize=(19,9))
axes[0].spines['right'].set_color('none')
axes[0].xaxis.tick_bottom()
axes[0].spines['left'].set_color('none')
axes[0].axes.get_yaxis().set_visible(False)
axes[0].spines['top'].set_color('none')
if (V1 > 1.4*Vo):
Ymax=1.4*Vo
else:
Ymax=1.1*V1
axes[0].axis([-1.5*L-d/2.0,1.5*L+d/2.0,0.0,Ymax])
axes[0].set_xlabel(r'$X$ (Angstroms)')
str1="$V_o = %.2f$ eV"%(Vo)
str2="$V_1 = %.2f$ eV"%(V1)
axes[0].text(1.05*(L+d/2.0), 1.02*Vo, str1, fontsize=24, color="blue")
axes[0].text(-1.5*(L+d/2.0), 1.02*Vo, str1, fontsize=24, color="blue")
if(d>0.0): axes[0].text(d/2, 1.02*Vo, str2, fontsize=24, color="blue")
# Defining the maximum amplitude of the wavefunction
if ((E_vals[1]-E_vals[0])/(E_vals[2]-E_vals[0]) < 0.2):
amp = np.sqrt((E_vals[2]-E_vals[0])/1.5)
else:
amp = np.sqrt((E_vals[1]-E_vals[0])/1.5)
# Plotting the energy levels
for n in range(1,nstates+1):
# Odd solution
if (n%2==0): axes[0].hlines(E_vals[n-1], -1.5*L-d/2.0, 1.5*L+d/2.0, linewidth=1.8, linestyle='--', color="#800000")
# Even solution
if (n%2==1): axes[0].hlines(E_vals[n-1], -1.5*L-d/2.0, 1.5*L+d/2.0, linewidth=1.8, linestyle='--', color="#ff4d4d")
axes[0].margins(0.00)
axes[0].vlines(-L-d/2.0, 0.0, Vo, linewidth=4.8, color="blue")
if(d>0.0):
axes[0].vlines(-d/2.0, 0.0, V1, linewidth=4.8, color="blue")
axes[0].vlines(d/2.0, 0.0, V1, linewidth=4.8, color="blue")
axes[0].vlines(L+d/2.0, 0.0, Vo, linewidth=4.8, color="blue")
axes[0].hlines(Vo, -1.5*L-d/2.0, -L-d/2.0, linewidth=4.8, color="blue")
axes[0].hlines(0.0, -L-d/2.0, -d/2.0, linewidth=4.8, color="blue")
axes[0].hlines(V1, -d/2.0, d/2.0, linewidth=4.8, color="blue")
axes[0].hlines(0.0, d/2.0, L+d/2.0, linewidth=4.8, color="blue")
axes[0].hlines(Vo, L+d/2.0, 1.5*L+d/2.0, linewidth=4.8, color="blue")
axes[0].set_title('Even Wavefunctions', fontsize=30)
# Defining the X ranges
X_lef2 = np.linspace(-1.5*L-d/2.0, -L-d/2.0, 900,endpoint=True)
X_lef1 = np.linspace(-L-d/2.0, -d/2.0, 900,endpoint=True)
X_mid = np.linspace(-d/2.0, d/2.0, 900,endpoint=True)
X_rig1 = np.linspace(d/2.0, L+d/2.0, 900,endpoint=True)
X_rig2 = np.linspace(L+d/2.0, 1.5*L+d/2.0, 900,endpoint=True)
# Plotting the wavefunctions
for n in range(1,nstates+1):
k = np.sqrt(E_vals[n-1])*val
a0 = np.sqrt(Vo-E_vals[n-1])*val
a1 = np.sqrt(V1-E_vals[n-1])*val
str1="$n = "+str(n)+r"$, $E_{"+str(n)+r"} = %.3f$ eV"%(E_vals[n-1])
# Even solution wavefunctions
if (n%2==1):
B = amp/np.sqrt(f1(E_vals[n-1])*f1(E_vals[n-1])+1.0)
C = f1(E_vals[n-1])*B
A = (B*np.cos(k*d/2.0)+C*np.sin(k*d/2.0))/(np.exp(-a1*d/2.0)+np.exp(a1*d/2.0))
D = np.exp(a0*(L+d/2.0))*(B*np.cos(k*(L+d/2.0))+C*np.sin(k*(L+d/2.0)))
axes[0].plot(X_lef2, E_vals[n-1]+D*np.exp(a0*X_lef2), color="#ff4d4d", label="", linewidth=2.8)
axes[0].plot(X_lef1, E_vals[n-1]+B*np.cos(k*X_lef1)-C*np.sin(k*X_lef1), color="#ff4d4d", label="", linewidth=2.8)
axes[0].plot(X_mid, E_vals[n-1]+A*(np.exp(-a1*X_mid)+np.exp(a1*X_mid)), color="#ff4d4d", label="", linewidth=2.8)
axes[0].plot(X_rig1, E_vals[n-1]+B*np.cos(k*X_rig1)+C*np.sin(k*X_rig1), color="#ff4d4d", label="", linewidth=2.8)
axes[0].plot(X_rig2, E_vals[n-1]+D*np.exp(-a0*X_rig2), color="#ff4d4d", label="", linewidth=2.8)
axes[0].text(1.25*(L+d/2.0), E_vals[n-1]+0.01*Vo, str1, fontsize=16, color="#ff4d4d")
# Drawing the backgroung graph
axes[1].spines['right'].set_color('none')
axes[1].xaxis.tick_bottom()
axes[1].spines['left'].set_color('none')
axes[1].axes.get_yaxis().set_visible(False)
axes[1].spines['top'].set_color('none')
axes[1].axis([-1.5*L-d/2.0,1.5*L+d/2.0,0.0,Ymax])
axes[1].set_xlabel(r'$X$ (Angstroms)')
str1="$V_o = %.3f$ eV"%(Vo)
str11="$V_1= %.3f$ eV"% (V1)
axes[1].text(1.05*(L+d/2.0), 1.02*Vo, str1, fontsize=24, color="blue")
axes[1].text(-1.5*(L+d/2.0), 1.02*Vo, str1, fontsize=24, color="blue")
if(d>0.0): axes[1].text(d/2, 1.02*Vo, str2, fontsize=24, color="blue")
# Defining the maximum amplitude of the wavefunction
if ((E_vals[1]-E_vals[0])/(E_vals[2]-E_vals[0]) < 0.2):
amp = np.sqrt((E_vals[2]-E_vals[0])/1.5)
else:
amp = np.sqrt((E_vals[1]-E_vals[0])/1.5)
# Plotting the energy levels
for n in range(1,nstates+1):
# Odd solution
if (n%2==0): axes[1].hlines(E_vals[n-1], -1.5*L-d/2.0, 1.5*L+d/2.0, linewidth=1.8, linestyle='--', color="#800000")
# Even solution
if (n%2==1): axes[1].hlines(E_vals[n-1], -1.5*L-d/2.0, 1.5*L+d/2.0, linewidth=1.8, linestyle='--', color="#ff4d4d")
ax.margins(0.00)
axes[1].vlines(-L-d/2.0, 0.0, Vo, linewidth=4.8, color="blue")
if(d>0.0):
axes[1].vlines(-d/2.0, 0.0, V1, linewidth=4.8, color="blue")
axes[1].vlines(d/2.0, 0.0, V1, linewidth=4.8, color="blue")
axes[1].vlines(L+d/2.0, 0.0, Vo, linewidth=4.8, color="blue")
axes[1].hlines(Vo, -1.5*L-d/2.0, -L-d/2.0, linewidth=4.8, color="blue")
axes[1].hlines(0.0, -L-d/2.0, -d/2.0, linewidth=4.8, color="blue")
axes[1].hlines(V1, -d/2.0, d/2.0, linewidth=4.8, color="blue")
axes[1].hlines(0.0, d/2.0, L+d/2.0, linewidth=4.8, color="blue")
axes[1].hlines(Vo, L+d/2.0, 1.5*L+d/2.0, linewidth=4.8, color="blue")
axes[1].set_title('Odd Wavefunctions', fontsize=30)
# Defining the X ranges
X_lef2 = np.linspace(-1.5*L-d/2.0, -L-d/2.0, 900,endpoint=True)
X_lef1 = np.linspace(-L-d/2.0, -d/2.0, 900,endpoint=True)
X_mid = np.linspace(-d/2.0, d/2.0, 900,endpoint=True)
X_rig1 = np.linspace(d/2.0, L+d/2.0, 900,endpoint=True)
X_rig2 = np.linspace(L+d/2.0, 1.5*L+d/2.0, 900,endpoint=True)
# Plotting the wavefunctions
for n in range(1,nstates+1):
k = np.sqrt(E_vals[n-1])*val
a0 = np.sqrt(Vo-E_vals[n-1])*val
a1 = np.sqrt(V1-E_vals[n-1])*val
str1="$n = "+str(n)+r"$, $E_{"+str(n)+r"} = %.3f$ eV"%(E_vals[n-1])
# Odd solution
if (n%2==0):
C = amp/np.sqrt(f3(E_vals[n-1])*f3(E_vals[n-1])+1.0)
B = f3(E_vals[n-1])*C
A = (B*np.cos(k*d/2.0)+C*np.sin(k*d/2.0))/(-np.exp(-a1*d/2.0)+np.exp(a1*d/2.0))
D = np.exp(a0*(L+d/2.0))*(B*np.cos(k*(L+d/2.0))+C*np.sin(k*(L+d/2.0)))
axes[1].plot(X_lef2, E_vals[n-1]-D*np.exp(a0*X_lef2), color="#800000", label="", linewidth=2.8)
axes[1].plot(X_lef1, E_vals[n-1]-B*np.cos(k*X_lef1)+C*np.sin(k*X_lef1), color="#800000", label="", linewidth=2.8)
axes[1].plot(X_mid, E_vals[n-1]+A*(-np.exp(-a1*X_mid)+np.exp(a1*X_mid)), color="#800000", label="", linewidth=2.8)
axes[1].plot(X_rig1, E_vals[n-1]+B*np.cos(k*X_rig1)+C*np.sin(k*X_rig1), color="#800000", label="", linewidth=2.8)
axes[1].plot(X_rig2, E_vals[n-1]+D*np.exp(-a0*X_rig2), color="#800000", label="", linewidth=2.8)
axes[1].text(1.25*(L+d/2.0), E_vals[n-1]+0.01*Vo, str1, fontsize=16, color="#800000")
plt.show()
# -
# The even wavefunctions maintain the numbers of nodes as in the separate boxes, while the odd solutions have an additional node at the center of the barrier. Since the lowest energy corresponds to the even solution of the wavefunction, and the states are labeled by their quantum number (n=1,2...nmax), the even solutions will correspond to odd quantum numbers and the odd solutions will correspond to even quantum numbers.
#
# +
print ("\nThe Probability Densities are:")
# Drawing the backgroung graph
fig, axes = plt.subplots(1, 2, figsize=(19,9))
axes[0].spines['right'].set_color('none')
axes[0].xaxis.tick_bottom()
axes[0].spines['left'].set_color('none')
axes[0].axes.get_yaxis().set_visible(False)
axes[0].spines['top'].set_color('none')
if (V1 > 1.4*Vo):
Ymax=1.4*Vo
else:
Ymax=1.1*V1
axes[0].axis([-1.5*L-d/2.0,1.5*L+d/2.0,0.0,Ymax])
axes[0].set_xlabel(r'$X$ (Angstroms)')
str1="$V_o = %.2f$ eV"%(Vo)
str2="$V_1 = %.2f$ eV"%(V1)
axes[0].text(1.05*(L+d/2.0), 1.02*Vo, str1, fontsize=24, color="blue")
axes[0].text(-1.5*(L+d/2.0), 1.02*Vo, str1, fontsize=24, color="blue")
if(d>0.0): axes[0].text(d/2, 1.02*Vo, str2, fontsize=24, color="blue")
# Defining the maximum amplitude of the wavefunction
if ((E_vals[1]-E_vals[0])/(E_vals[2]-E_vals[0]) < 0.2):
amp = np.sqrt((E_vals[2]-E_vals[0])/1.5)
else:
amp = np.sqrt((E_vals[1]-E_vals[0])/1.5)
# Plotting the energy levels
for n in range(1,nstates+1):
# Odd solution
if (n%2==0): axes[0].hlines(E_vals[n-1], -1.5*L-d/2.0, 1.5*L+d/2.0, linewidth=1.8, linestyle='--', color="#800000")
# Even solution
if (n%2==1): axes[0].hlines(E_vals[n-1], -1.5*L-d/2.0, 1.5*L+d/2.0, linewidth=1.8, linestyle='--', color="#ff4d4d")
axes[0].margins(0.00)
axes[0].vlines(-L-d/2.0, 0.0, Vo, linewidth=4.8, color="blue")
if(d>0.0):
axes[0].vlines(-d/2.0, 0.0, V1, linewidth=4.8, color="blue")
axes[0].vlines(d/2.0, 0.0, V1, linewidth=4.8, color="blue")
axes[0].vlines(L+d/2.0, 0.0, Vo, linewidth=4.8, color="blue")
axes[0].hlines(Vo, -1.5*L-d/2.0, -L-d/2.0, linewidth=4.8, color="blue")
axes[0].hlines(0.0, -L-d/2.0, -d/2.0, linewidth=4.8, color="blue")
axes[0].hlines(V1, -d/2.0, d/2.0, linewidth=4.8, color="blue")
axes[0].hlines(0.0, d/2.0, L+d/2.0, linewidth=4.8, color="blue")
axes[0].hlines(Vo, L+d/2.0, 1.5*L+d/2.0, linewidth=4.8, color="blue")
axes[0].set_title('Probability Density for Even Wavefunctions', fontsize=30)
# Defining the X ranges
X_lef2 = np.linspace(-1.5*L-d/2.0, -L-d/2.0, 900,endpoint=True)
X_lef1 = np.linspace(-L-d/2.0, -d/2.0, 900,endpoint=True)
X_mid = np.linspace(-d/2.0, d/2.0, 900,endpoint=True)
X_rig1 = np.linspace(d/2.0, L+d/2.0, 900,endpoint=True)
X_rig2 = np.linspace(L+d/2.0, 1.5*L+d/2.0, 900,endpoint=True)
# Plotting the probability densities
for n in range(1,nstates+1):
k = np.sqrt(E_vals[n-1])*val
a0 = np.sqrt(Vo-E_vals[n-1])*val
a1 = np.sqrt(V1-E_vals[n-1])*val
str1="$n = "+str(n)+r"$, $E_{"+str(n)+r"} = %.3f$ eV"%(E_vals[n-1])
# Even solution wavefunctions
if (n%2==1):
B = amp/np.sqrt(f1(E_vals[n-1])*f1(E_vals[n-1])+1.0)
C = f1(E_vals[n-1])*B
A = (B*np.cos(k*d/2.0)+C*np.sin(k*d/2.0))/(np.exp(-a1*d/2.0)+np.exp(a1*d/2.0))
D = np.exp(a0*(L+d/2.0))*(B*np.cos(k*(L+d/2.0))+C*np.sin(k*(L+d/2.0)))
axes[0].plot(X_lef2, E_vals[n-1]+(D*np.exp(a0*X_lef2))**2, color="#ff4d4d", label="", linewidth=2.8)
axes[0].fill_between(X_lef2, E_vals[n-1], E_vals[n-1]+(D*np.exp(a0*X_lef2))**2, color="#3dbb2a")
axes[0].plot(X_lef1, E_vals[n-1]+(B*np.cos(k*X_lef1)-C*np.sin(k*X_lef1))**2, color="#ff4d4d", label="", linewidth=2.8)
axes[0].plot(X_mid, E_vals[n-1]+(A*(np.exp(-a1*X_mid)+np.exp(a1*X_mid)))**2, color="#ff4d4d", label="", linewidth=2.8)
if(d>0.0): axes[0].fill_between(X_mid, E_vals[n-1], E_vals[n-1]+(A*(np.exp(-a1*X_mid)+np.exp(a1*X_mid)))**2, color="purple")
axes[0].plot(X_rig1, E_vals[n-1]+(B*np.cos(k*X_rig1)+C*np.sin(k*X_rig1))**2, color="#ff4d4d", label="", linewidth=2.8)
axes[0].plot(X_rig2, E_vals[n-1]+(D*np.exp(-a0*X_rig2))**2, color="#ff4d4d", label="", linewidth=2.8)
axes[0].fill_between(X_rig2, E_vals[n-1], E_vals[n-1]+(D*np.exp(-a0*X_rig2))**2, color="#3dbb2a")
axes[0].text(1.25*(L+d/2.0), E_vals[n-1]+0.01*Vo, str1, fontsize=16, color="#ff4d4d")
# Drawing the backgroung graph
axes[1].spines['right'].set_color('none')
axes[1].xaxis.tick_bottom()
axes[1].spines['left'].set_color('none')
axes[1].axes.get_yaxis().set_visible(False)
axes[1].spines['top'].set_color('none')
axes[1].axis([-1.5*L-d/2.0,1.5*L+d/2.0,0.0,Ymax])
axes[1].set_xlabel(r'$X$ (Angstroms)')
str1="$V_o = %.3f$ eV"%(Vo)
str11="$V_1= %.3f$ eV"% (V1)
axes[1].text(1.05*(L+d/2.0), 1.02*Vo, str1, fontsize=24, color="blue")
axes[1].text(-1.5*(L+d/2.0), 1.02*Vo, str1, fontsize=24, color="blue")
if(d>0.0): axes[1].text(d/2, 1.02*Vo, str2, fontsize=24, color="blue")
# Defining the maximum amplitude of the wavefunction
if ((E_vals[1]-E_vals[0])/(E_vals[2]-E_vals[0]) < 0.2):
amp = np.sqrt((E_vals[2]-E_vals[0])/1.5)
else:
amp = np.sqrt((E_vals[1]-E_vals[0])/1.5)
# Plotting the energy levels
for n in range(1,nstates+1):
# Odd solution
if (n%2==0): axes[1].hlines(E_vals[n-1], -1.5*L-d/2.0, 1.5*L+d/2.0, linewidth=1.8, linestyle='--', color="#800000")
# Even solution
if (n%2==1): axes[1].hlines(E_vals[n-1], -1.5*L-d/2.0, 1.5*L+d/2.0, linewidth=1.8, linestyle='--', color="#ff4d4d")
ax.margins(0.00)
axes[1].vlines(-L-d/2.0, 0.0, Vo, linewidth=4.8, color="blue")
if(d>0.0):
axes[1].vlines(-d/2.0, 0.0, V1, linewidth=4.8, color="blue")
axes[1].vlines(d/2.0, 0.0, V1, linewidth=4.8, color="blue")
axes[1].vlines(L+d/2.0, 0.0, Vo, linewidth=4.8, color="blue")
axes[1].hlines(Vo, -1.5*L-d/2.0, -L-d/2.0, linewidth=4.8, color="blue")
axes[1].hlines(0.0, -L-d/2.0, -d/2.0, linewidth=4.8, color="blue")
axes[1].hlines(V1, -d/2.0, d/2.0, linewidth=4.8, color="blue")
axes[1].hlines(0.0, d/2.0, L+d/2.0, linewidth=4.8, color="blue")
axes[1].hlines(Vo, L+d/2.0, 1.5*L+d/2.0, linewidth=4.8, color="blue")
axes[1].set_title('Probability Density for Odd Wavefunctions', fontsize=30)
# Defining the X ranges
X_lef2 = np.linspace(-1.5*L-d/2.0, -L-d/2.0, 900,endpoint=True)
X_lef1 = np.linspace(-L-d/2.0, -d/2.0, 900,endpoint=True)
X_mid = np.linspace(-d/2.0, d/2.0, 900,endpoint=True)
X_rig1 = np.linspace(d/2.0, L+d/2.0, 900,endpoint=True)
X_rig2 = np.linspace(L+d/2.0, 1.5*L+d/2.0, 900,endpoint=True)
# Plotting the wavefunctions
for n in range(1,nstates+1):
k = np.sqrt(E_vals[n-1])*val
a0 = np.sqrt(Vo-E_vals[n-1])*val
a1 = np.sqrt(V1-E_vals[n-1])*val
str1="$n = "+str(n)+r"$, $E_{"+str(n)+r"} = %.3f$ eV"%(E_vals[n-1])
# Odd solution
if (n%2==0):
C = amp/np.sqrt(f3(E_vals[n-1])*f3(E_vals[n-1])+1.0)
B = f3(E_vals[n-1])*C
A = (B*np.cos(k*d/2.0)+C*np.sin(k*d/2.0))/(-np.exp(-a1*d/2.0)+np.exp(a1*d/2.0))
D = np.exp(a0*(L+d/2.0))*(B*np.cos(k*(L+d/2.0))+C*np.sin(k*(L+d/2.0)))
axes[1].plot(X_lef2, E_vals[n-1]+(D*np.exp(a0*X_lef2))**2, color="#800000", label="", linewidth=2.8)
axes[1].fill_between(X_lef2, E_vals[n-1], E_vals[n-1]+(D*np.exp(a0*X_lef2))**2, color="#3dbb2a")
axes[1].plot(X_lef1, E_vals[n-1]+(-B*np.cos(k*X_lef1)+C*np.sin(k*X_lef1))**2, color="#800000", label="", linewidth=2.8)
axes[1].plot(X_mid, E_vals[n-1]+(A*(-np.exp(-a1*X_mid)+np.exp(a1*X_mid)))**2, color="#800000", label="", linewidth=2.8)
if(d>0.0): axes[1].fill_between(X_mid, E_vals[n-1], E_vals[n-1]+(A*(-np.exp(-a1*X_mid)+np.exp(a1*X_mid)))**2, color="purple")
axes[1].plot(X_rig1, E_vals[n-1]+(B*np.cos(k*X_rig1)+C*np.sin(k*X_rig1))**2, color="#800000", label="", linewidth=2.8)
axes[1].plot(X_rig2, E_vals[n-1]+(D*np.exp(-a0*X_rig2))**2, color="#800000", label="", linewidth=2.8)
axes[1].fill_between(X_rig2, E_vals[n-1], E_vals[n-1]+(D*np.exp(-a0*X_rig2))**2, color="#3dbb2a")
axes[1].text(1.25*(L+d/2.0), E_vals[n-1]+0.01*Vo, str1, fontsize=16, color="#800000")
plt.show()
# -
# When the boxes are close together, the coupling effect between both boxes is observed. The energy levels of the single finite box split into two levels for the two-box potential: one higher (odd solutions) and one lower in energy (even solutions). The even solution wavefunctions have the unusual result of a probability density of finding the particle inside the $V_1$ barrier (area shaded in cyan) to be larger, while for the odd solutions, the probability of finding the particle between the boxes reaches zero.
#
# The tunneling probability (areas shaded in green in the graphs above) correspond to the area under the probability densities outside both boxes, that is, for $x \geq \frac{d}{2}+L$ and for $x \leq -\frac{d}{2}-L$. It corresponds to:
# $$ \frac{\int^{-\frac{d}{2}-L}_{-\infty} |\psi(x)|^2\ dx +\int^{+\infty}_{\frac{d}{2}+L} |\psi(x)|^2\ dx }{\int_{-\infty}^{+\infty} |\psi(x)|^2\ dx }$$
#
# It is also interesting to evaluate the probability of finding the particle inside the barrier (area shaded in purple in the cell above), that is, between $-\frac{d}{2} \leq x \leq \frac{d}{2}$. This probability equals to:
# $$ \frac{\int_{-\frac{d}{2}}^{\frac{d}{2}} |\psi(x)|^2\ dx}{\int_{-\infty}^{+\infty} |\psi(x)|^2\ dx }$$
#
# These integrals can be solved analytically (for the even and for the odd solutions). After doing that, the tunneling probability and probability of being inside the barrier for each state is:
print ("\nThe tunneling probabilities and probabilities of being inside the barrier are:")
for n in range(1,nstates+1):
k = np.sqrt(E_vals[n-1])*val
a0 = np.sqrt(Vo-E_vals[n-1])*val
a1 = np.sqrt(V1-E_vals[n-1])*val
# Odd wavefunction
if (n%2==0):
C = 1.0
B = f3(E_vals[n-1])*C
D = np.exp(a0*(L+d/2.0))*(B*np.cos(k*(L+d/2.0))+C*np.sin(k*(L+d/2.0)))
tunn_prob = D*D*np.exp(-a0*(2.0*L+d))/(2.0*a0)
total_prob = tunn_prob
if (d>0.0):
A = (B*np.cos(k*d/2.0)+C*np.sin(k*d/2.0))/(-np.exp(-a1*d/2.0)+np.exp(a1*d/2.0))
barr_prob = A*A*(np.sinh(a1*d)/a1-d)
total_prob += barr_prob
else:
barr_prob = 0.0
total_prob += (B*B*(np.sin(k*(2.0*L+d))+2.0*L*k-np.sin(k*d))+2.0*B*C*(np.cos(k*d)-np.cos(k*(2.0*L+d)))+C*C*(-np.sin(k*(2.0*L+d))+2.0*L*k+np.sin(k*d)))/(4.0*k)
# Even wavefunction
else:
B = 1.0
C = f1(E_vals[n-1])*B
A = (B*np.cos(k*d/2.0)+C*np.sin(k*d/2.0))/(np.exp(-a1*d/2.0)+np.exp(a1*d/2.0))
D = np.exp(a0*(L+d/2.0))*(B*np.cos(k*(L+d/2.0))+C*np.sin(k*(L+d/2.0)))
barr_prob = A*A*(np.sinh(a1*d)/a1+d)
tunn_prob = D*D*np.exp(-a0*(2.0*L+d))/(2.0*a0)
total_prob = barr_prob + tunn_prob
total_prob += (B*B*(np.sin(k*(2.0*L+d))+2.0*L*k-np.sin(k*d))+2.0*B*C*(np.cos(k*d)-np.cos(k*(2.0*L+d)))+C*C*(-np.sin(k*(2.0*L+d))+2.0*L*k+np.sin(k*d)))/(4.0*k)
print (" State n=%3d tunneling probability = %5.2f%%, barrier probability = %5.2f%%" % (n,100*tunn_prob/total_prob,100*barr_prob/total_prob))
# See how low the probability of finding the particle inside the barrier is for odd wavefunctions (even values of $n$)
|
py | b408487aa7b68287420dfec7db84073b47d791c3 | import configparser
from weather import weather_data
from clothes import gear_recommendation
from twitter import tweet_recommendation
# TODO: Add logs
# TODO: Add debugging flag with no tweeting
def main(event=None, context=None):
# Intitialisation
config = configparser.ConfigParser()
config.read('config.cfg')
weather_forecast = weather_data(config)
clothes = gear_recommendation(weather_forecast)
tweet_recommendation(config, weather_forecast, clothes)
if __name__=='__main__':
main() |
py | b40848b8fca9c7bf63406ff970c4454e3273d0f5 | # -*- coding: utf-8 -*-
# noqa: D205,D400
"""
Indicators utilities
====================
The `Indicator` class wraps indices computations with pre- and post-processing functionality. Prior to computations,
the class runs data and metadata health checks. After computations, the class masks values that should be considered
missing and adds metadata attributes to the output object.
There are many ways to construct indicators. A good place to start is `this notebook <notebooks/extendxclim.ipynb#Defining-new-indicators>`_.
Dictionary and YAML parser
--------------------------
To construct indicators dynamically, xclim can also use dictionaries and parse them from YAML files.
This is especially useful for generating whole indicator "submodules" from files.
This functionality is based on and extends the work of [clix-meta](https://github.com/clix-meta/clix-meta/).
YAML file structure
~~~~~~~~~~~~~~~~~~~
Indicator-defining yaml files are structured in the following way:
.. code-block:: yaml
module: <module name> # Defaults to the file name
realm: <realm> # If given here, applies to all indicators that do no give it.
base: <base indicator class> # Defaults to "Daily"
doc: <module docstring> # Defaults to a minimal header, only valid if the module doesn't already exists.
indices:
<identifier>:
base: <base indicator class> # Defaults to module-wide base class or "Daily".
realm: <realm> # Defaults to the module-wide realm or "atmos"
reference: <references>
references: <references> # Plural or singular accepted (for harmonizing clix-meta and xclim)
keywords: <keywords>
notes: <notes>
title: <title>
abstract: <abstract>
period: # If given, both "allowed" and "default" must also be given.
allowed: # A list of allowed periods (resampling frequencies)
annual: # Translates to "A" (includes "Y")
seasonal: # Translates to "Q"
monthly: # Translates to "M"
weekly: # Translates to "W"
default: annual # Translates to "YS", "QS-DEC", "MS" or "W-SUN". See xclim.core.units.FREQ_NAMES.
output:
var_name: <var_name> # Defaults to "identifier",
standard_name: <standard_name>
long_name: <long_name>
description: <description>
comment: <comment>
units: <units> # Defaults to ""
cell_methods:
- <dim1> : <method 1>
...
index_function:
name: <function name> # Refering to a function in xclim.indices.generic or xclim.indices
parameters: # See below for details on that section.
<param name> # Refering to a parameter of the function above.
kind: <param kind> # Optional, one of quantity, operator or reducer
data: <param data>
units: <param units>
operator: <param data>
reducer: <param data>
...
input:
<var1> : <variable type 1> # <var1> refers to a name in the function above, see below.
...
... # and so on.
All fields are optional. Other fields can be found in the yaml file, but they will not be used by xclim.
In the following, the section under `<identifier>` is refered to as `data`. When creating indicators from
a dictionary, with :py:meth:`Indicator.from_dict`, the input dict must follow the structure of `data`.
Indicator parameters
~~~~~~~~~~~~~~~~~~~~
`clix-meta` defines three kinds of parameters:
- "quantity", a quantity with a magnitude and some units, (equivalent to xclim.core.utils.InputKind.QUANTITY_STR)
The value is given through the magnitude in "data" and units in "units".
- "operator", one of "<", "<=", ">", ">=", "==", "!=", an operator for conditional computations.
The value is given in "operator".
- "reducer", one of "maximum", "minimum", "mean", "sum", a reducing method name.
The value is given in "reducer".
xclim supports both this syntax and a simpler one where only the "data" key is given.
As YAML is able to cast simple python literals, no passing of "kind" is needed, if a string parameter could be
mistranslated to a boolean or a number, simply use quotes to isolate it. To pass a number sequence, use
the yaml list syntax.
Inputs
~~~~~~
As xclim has strict definitions of possible input variables (see :py:data:`xclim.core.yaml.variables`),
the mapping of `data.input` simply links a variable name from the function in `data.index_function.name`
to one of those official variables.
"""
import logging
import re
import warnings
import weakref
from collections import OrderedDict, defaultdict
from copy import deepcopy
from inspect import Parameter, _empty, signature
from os import PathLike
from pathlib import Path
from types import ModuleType
from typing import Any, Callable, Dict, List, Mapping, Optional, Sequence, Type, Union
import numpy as np
from boltons.funcutils import copy_function, wraps
from xarray import DataArray, Dataset
from yaml import safe_load
from .. import indices
from . import datachecks
from .calendar import parse_offset
from .cfchecks import generate_cfcheck
from .formatting import (
AttrFormatter,
default_formatter,
generate_indicator_docstring,
merge_attributes,
parse_cell_methods,
parse_doc,
update_history,
)
from .locales import TRANSLATABLE_ATTRS, get_local_attrs, get_local_formatter
from .options import MISSING_METHODS, MISSING_OPTIONS, OPTIONS
from .units import FREQ_NAMES, convert_units_to, declare_units, units
from .utils import (
MissingVariableError,
infer_kind_from_parameter,
variables,
wrapped_partial,
)
# Indicators registry
registry = dict() # Main class registry
_indicators_registry = defaultdict(list) # Private instance registry
class IndicatorRegistrar:
"""Climate Indicator registering object."""
def __new__(cls):
"""Add subclass to registry."""
name = cls.__name__.upper()
module = cls.__module__
# If the module is not one of xclim's default, prepend the submodule name.
if module.startswith("xclim.indicators"):
submodule = module.split(".")[2]
if submodule not in ["atmos", "land", "ocean", "seaIce"]:
name = f"{submodule}.{name}"
else:
name = f"{module}.{name}"
if name in registry:
warnings.warn(
f"Class {name} already exists and will be overwritten.", stacklevel=1
)
registry[name] = cls
cls._registry_id = name
return super().__new__(cls)
def __init__(self):
_indicators_registry[self.__class__].append(weakref.ref(self))
@classmethod
def get_instance(cls):
"""Return first found instance.
Raises `ValueError` if no instance exists.
"""
for inst_ref in _indicators_registry[cls]:
inst = inst_ref()
if inst is not None:
return inst
raise ValueError(
f"There is no existing instance of {cls.__name__}. Either none were created or they were all garbage-collected."
)
class Indicator(IndicatorRegistrar):
r"""Climate indicator base class.
Climate indicator object that, when called, computes an indicator and assigns its output a number of
CF-compliant attributes. Some of these attributes can be *templated*, allowing metadata to reflect
the value of call arguments.
Instantiating a new indicator returns an instance but also creates and registers a custom subclass.
Parameters in `Indicator._cf_names` will be added to the output variable(s). When creating new `Indicators` subclasses,
if the compute function returns multiple variables, attributes may be given as lists of strings or strings.
In the latter case, the same value is used on all variables.
Compared to their base `compute` function, indicators add the possibility of using dataset as input,
with the injected argument `ds` in the call signature. All arguments that were indicated by the compute function
to be DataArrays through annotations will be promoted to also accept strings that correspond to variable names
in the `ds` dataset.
Parameters
----------
identifier: str
Unique ID for class registry, should be a valid slug.
realm : {'atmos', 'seaIce', 'land', 'ocean'}
General domain of validity of the indicator. Indicators created outside xclim.indicators must set this attribute.
compute: func
The function computing the indicators. It should return one or more DataArray.
var_name: str or Sequence[str]
Output variable(s) name(s). May use tags {<tag>}. If the indicator outputs multiple variables,
var_name *must* be a list of the same length.
standard_name: str or Sequence[str]
Variable name (CF).
long_name: str or Sequence[str]
Descriptive variable name. Parsed from `compute` docstring if not given.
units: str or Sequence[str]
Representative units of the physical quantity (CF).
cell_methods: str or Sequence[str]
List of blank-separated words of the form "name: method" (CF).
description: str or Sequence[str]
Sentence meant to clarify the qualifiers of the fundamental quantities, such as which
surface a quantity is defined on or what the flux sign conventions are.
comment: str or Sequence[str]
Miscellaneous information about the data or methods used to produce it.
title: str
A succinct description of what is in the computed outputs. Parsed from `compute` docstring if None.
abstract: str
A long description of what is in the computed outputs. Parsed from `compute` docstring if None.
keywords: str
Comma separated list of keywords. Parsed from `compute` docstring if None.
references: str
Published or web-based references that describe the data or methods used to produce it. Parsed from
`compute` docstring if None.
notes: str
Notes regarding computing function, for example the mathematical formulation. Parsed from `compute`
docstring if None.
missing: {any, wmo, pct, at_least_n, skip, from_context}
The name of the missing value method. See `xclim.core.missing.MissingBase` to create new custom methods. If
None, this will be determined by the global configuration (see `xclim.set_options`). Defaults to "from_context".
freq: {"D", "H", None}
The expected frequency of the input data. Use None if irrelevant.
missing_options : dict, None
Arguments to pass to the `missing` function. If None, this will be determined by the global configuration.
context: str
The `pint` unit context, for example use 'hydro' to allow conversion from kg m-2 s-1 to mm/day.
allowed_periods : Sequence[str], optional
A list of allowed periods, i.e. base parts of the `freq` parameter. For example, indicators meant to be
computed annually only will have `allowed_periods=["Y", "A"]`. `None` means, "any period" or that the
indicator doesn't take a `freq` argument.
Notes
-----
All subclasses created are available in the `registry` attribute and can be used to define custom subclasses
or parse all available instances.
"""
#: Number of input DataArray variables. Should be updated by subclasses if needed.
#: This number sets which inputs are passed to the tests.
nvar = 1
# Allowed metadata attributes on the output variables
_cf_names = [
"var_name",
"standard_name",
"long_name",
"units",
"cell_methods",
"description",
"comment",
]
# metadata fields that are formatted as free text.
_text_fields = ["long_name", "description", "comment"]
_funcs = ["compute", "cfcheck", "datacheck"]
# Will become the class's name
identifier = None
missing = "from_context"
missing_options = None
context = "none"
freq = None
allowed_periods = None
# Variable metadata (_cf_names, those that can be lists or strings)
# A developper should access those through cf_attrs on instances
var_name = None
standard_name = ""
long_name = ""
units = ""
cell_methods = ""
description = ""
comment = ""
# Global metadata (must be strings, not attributed to the output)
realm = None
title = ""
abstract = ""
keywords = ""
references = ""
notes = ""
parameters: Mapping[str, Any]
"""A dictionary mapping metadata about the input parameters to the indicator.
Contains : "default", "description", "kind" and, sometimes, "units" and "choices".
"kind" refers to the constants of :py:class:`xclim.core.utils.InputKind`.
"""
cf_attrs: Sequence[Mapping[str, Any]]
"""A list of metadata information for each output of the indicator.
It minimally contains a "var_name" entry, and may contain : "standard_name", "long_name",
"units", "cell_methods", "description" and "comment".
"""
def __new__(cls, **kwds):
"""Create subclass from arguments."""
identifier = kwds.get("identifier", cls.identifier)
if identifier is None:
raise AttributeError("`identifier` has not been set.")
kwds["var_name"] = kwds.get("var_name", cls.var_name) or identifier
# Parse and update compute's signature.
kwds["compute"] = kwds.get("compute", None) or cls.compute
# Updated to allow string variable names and the ds arg.
# Parse docstring of the compute function, its signature and its parameters
kwds["_indcompute"], docmeta, params = _parse_indice(
kwds["compute"],
passed=kwds.get("parameters"),
ds={
"annotation": Dataset,
"description": "A dataset with the variables given by name.",
},
)
# The update signature
kwds["_sig"] = kwds["_indcompute"].__signature__
# The input parameters' name
kwds["_parameters"] = tuple(kwds["_sig"].parameters.keys())
# All fields parsed by parse_doc except "parameters"
# i.e. : title, abstract, notes, references, long_name
for name, value in docmeta.items():
if not getattr(cls, name):
# Set if neither the class attr is set nor the kwds attr
kwds.setdefault(name, value)
# The input parameters' metadata
# We dump whatever the base class had and take what was parsed from the current compute function.
kwds["parameters"] = params
# Parse kwds to organize cf_attrs
# Must be done after parsing var_name
# And before converting callables to staticmethods
kwds["cf_attrs"] = cls._parse_cf_attrs(kwds)
# Convert function objects to static methods.
for key in cls._funcs + cls._cf_names:
if key in kwds and callable(kwds[key]):
kwds[key] = staticmethod(kwds[key])
# Infer realm for built-in xclim instances
if cls.__module__.startswith(__package__.split(".")[0]):
xclim_realm = cls.__module__.split(".")[2]
else:
xclim_realm = None
# Priority given to passed realm -> parent's realm -> location of the class declaration (official inds only)
kwds.setdefault("realm", cls.realm or xclim_realm)
if kwds["realm"] not in ["atmos", "seaIce", "land", "ocean"]:
raise AttributeError(
"Indicator's realm must be given as one of 'atmos', 'seaIce', 'land' or 'ocean'"
)
# Create new class object
new = type(identifier.upper(), (cls,), kwds)
# Forcing the module is there so YAML-generated submodules are correctly seen by IndicatorRegistrar.
if "module" in kwds:
new.__module__ = f"xclim.indicators.{kwds['module']}"
else:
# If the module was not forced, set the module to the base class' module.
# Otherwise all indicators will have module `xclim.core.indicator`.
new.__module__ = cls.__module__
# Generate docstring
new._indcompute.__doc__ = new.__doc__ = generate_indicator_docstring(new)
# Add the created class to the registry
# This will create an instance from the new class and call __init__.
return super().__new__(new)
@classmethod
def _parse_cf_attrs(
cls, kwds: Dict[str, Any]
) -> Union[List[Dict[str, str]], List[Dict[str, Union[str, Callable]]]]:
"""CF-compliant metadata attributes for all output variables."""
# Get number of outputs
n_outs = (
len(kwds["var_name"]) if isinstance(kwds["var_name"], (list, tuple)) else 1
)
# Populate cf_attrs from attribute set during class creation and __new__
cf_attrs = [{} for i in range(n_outs)]
for name in cls._cf_names:
values = kwds.get(name, getattr(cls, name))
if not isinstance(values, (list, tuple)):
values = [values] * n_outs
elif len(values) != n_outs:
raise ValueError(
f"Attribute {name} has {len(values)} elements but should have {n_outs} according to passed var_name."
)
for attrs, value in zip(cf_attrs, values):
if value:
attrs[name] = value
return cf_attrs
@classmethod
def from_dict(
cls,
data: dict,
identifier: str,
module: Optional[str] = None,
realm: Optional[str] = None,
keywords: Optional[str] = None,
references: Optional[str] = None,
notes: Optional[str] = None,
):
"""Create an indicator subclass and instance from a dictionary of parameters.
Parameters
----------
data: dict
The exact structure of this dictionary is detailed in the submodule documentation.
identifier : str
The name of the subclass and internal indicator name.
module : str
The module name of the indicator. This is meant to be used only if the indicator
is part of a dynamically generated submodule, to override the module of the base class.
realm: str, optional
keywords: str, optional
references str, optional
notes: str, optional
Other indicator attributes to fill in for missing values in the individual definition.
"""
# Make cell methods. YAML will generate a list-of-dict structure, put it back in a space-divided string
if data.get("output", {}).get("cell_methods") is not None:
cell_methods = parse_cell_methods(data["output"]["cell_methods"])
else:
cell_methods = None
params = {}
if "input" in data:
# Override input metadata
input_units = {}
nvar = len(data["input"])
for varname, name in data["input"].items():
# Indicator's new will put the name of the variable as its default,
# we override this with the real variable name.
# Also take the dimensionaliy and description from the yaml of official variables.
# Description overrides the one parsed from the generic compute docstring
# Dimensionality goes into the declare_units wrapper.
params[varname] = {
"default": name,
"description": variables[name]["description"],
}
input_units[varname] = variables[name]["dimensionality"]
cfcheck = generate_cfcheck(*[varname for varname in data["input"].values()])
else:
nvar = None
cfcheck = None
input_units = None
metadata_placeholders = {}
if "index_function" in data:
# Generate compute function
# data.index_function.name refers to a function in xclim.indices.generic or xclim.indices (in this order of priority).
# data.index_function.parameters is a list of injected arguments.
funcname = data["index_function"].get("name")
if funcname is None:
# No index function given, reuse the one from the base class.
compute = cls.compute
else:
compute = getattr(
indices.generic, funcname, getattr(indices, funcname, None)
)
if compute is None:
raise ImportError(
f"Indice function {funcname} not found in xclim.indices or xclim.indices.generic."
)
injected_params = {}
# In clix-meta, when there are no parameters, the key is still there with a None value.
for name, param in (data["index_function"].get("parameters") or {}).items():
# Handle clix-meta cases
if param.get("kind") == "quantity" and isinstance(
param["data"], (str, int, float)
):
# A string with units, but not a placeholder (where data is a dict)
value = f"{param['data']} {param['units']}"
elif param.get("kind") in ["reducer", "operator"]:
# clix-meta defined kinds :value is stored in a field of the same name as the kind.
value = param[param["kind"]]
else:
# All other xclim-defined kinds in "data"
value = param["data"]
if isinstance(value, dict):
# User-chosen parameter. placeholder.
# It should be a string, this is a bug from clix-meta.
value = list(value.keys())[0]
params[name] = {
"default": param.get("default"),
"description": param.get(
"description", param.get("standard_name", name)
),
}
if "units" in param:
params[name]["units"] = param["units"]
input_units = input_units or {}
input_units[name] = param["units"]
# We will need to replace placeholders in metadata strings (only for clix-meta indicators)
if value != name:
metadata_placeholders["{" + value + "}"] = "{" + name + "}"
else:
# Injected parameter
injected_params[name] = value
if input_units is not None:
compute = declare_units(**input_units)(compute)
compute = wrapped_partial(compute, **injected_params)
else:
compute = None
# Allowed resampling frequencies
if "period" in data:
params["freq"] = {"default": FREQ_NAMES[data["period"]["default"]][1]}
allowed_periods = []
for period_name in data["period"]["allowed"]:
allowed_periods.append(FREQ_NAMES[period_name][0])
else:
allowed_periods = None
kwargs = dict(
# General
identifier=identifier,
module=module,
realm=data.get("realm", realm),
keywords=data.get("keywords", keywords),
references=data.get("references", data.get("reference", references)),
notes=data.get("notes", notes),
# Indicator-specific metadata
title=data.get("title"),
abstract=data.get("abstract"),
# Output meta
var_name=data.get("output", {}).get("var_name", identifier),
standard_name=data.get("output", {}).get("standard_name"),
long_name=data.get("output", {}).get("long_name"),
description=data.get("output", {}).get("description"),
comment=data.get("output", {}).get("comment"),
units=data.get("output", {}).get("units"),
cell_methods=cell_methods,
# Input data, override defaults given in generic compute's signature.
parameters=params or None, # None if an empty dict
nvar=nvar,
compute=compute,
# Checks
cfcheck=cfcheck,
allowed_periods=allowed_periods,
)
for cf_name in cls._cf_names:
if isinstance(kwargs[cf_name], str):
for old, new in metadata_placeholders.items():
kwargs[cf_name] = kwargs[cf_name].replace(old, new)
# Remove kwargs passed as "None", they will be taken from the base class instead.
# For most parameters it would be ok to pass a None anyway (we figure that out in __new__),
# but some (like nvar) would not like that.
return cls(**{k: v for k, v in kwargs.items() if v is not None})
def __init__(self, **kwds):
"""Run checks and organizes the metadata."""
# keywords of kwds that are class attributes have already been set in __new__
self._check_identifier(self.identifier)
if self.missing == "from_context" and self.missing_options is not None:
raise ValueError(
"Cannot set `missing_options` with `missing` method being from context."
)
# Validate hard-coded missing options
kls = MISSING_METHODS[self.missing]
self._missing = kls.execute
if self.missing_options:
kls.validate(**self.missing_options)
# Validation is done : register the instance.
super().__init__()
# Update call signature
self.__call__ = wraps(self._indcompute)(self.__call__)
def __call__(self, *args, **kwds):
"""Call function of Indicator class."""
# For convenience
n_outs = len(self.cf_attrs)
# Bind call arguments to `compute` arguments and set defaults.
ba = self._sig.bind(*args, **kwds)
ba.apply_defaults()
# Assign inputs passed as strings from ds.
self._assign_named_args(ba)
# Assume the first arguments are always the DataArrays.
# Only the first nvar inputs are checked (data + cf checks)
das = OrderedDict()
for name in self._parameters[: self.nvar]:
das[name] = ba.arguments.pop(name)
# Metadata attributes from templates
var_id = None
var_attrs = []
for attrs in self.cf_attrs:
if n_outs > 1:
var_id = f"{self._registry_id}.{attrs['var_name']}"
var_attrs.append(
self._update_attrs(ba, das, attrs, names=self._cf_names, var_id=var_id)
)
# Pre-computation validation checks on DataArray arguments
self._bind_call(self.datacheck, **das)
self._bind_call(self.cfcheck, **das)
# Check if the period is allowed:
if (
self.allowed_periods is not None
and "freq" in kwds
and parse_offset(kwds["freq"])[1] not in self.allowed_periods
):
raise ValueError(
f"Resampling frequency {kwds['freq']} is not allowed for indicator {self.identifier} (needs something equivalent to one of {self.allowed_periods})."
)
# Compute the indicator values, ignoring NaNs and missing values.
outs = self.compute(**das, **ba.kwargs)
if isinstance(outs, DataArray):
outs = [outs]
if len(outs) != n_outs:
raise ValueError(
f"Indicator {self.identifier} was wrongly defined. Expected {n_outs} outputs, got {len(outs)}."
)
# Convert to output units
outs = [
convert_units_to(out, attrs.get("units", ""), self.context)
for out, attrs in zip(outs, var_attrs)
]
# Update variable attributes
for out, attrs in zip(outs, var_attrs):
var_name = attrs.pop("var_name")
out.attrs.update(attrs)
out.name = var_name
# Mask results that do not meet criteria defined by the `missing` method.
# This means all variables must have the same dimensions...
mask = self._mask(*das.values(), **ba.arguments)
outs = [out.where(~mask) for out in outs]
# Return a single DataArray in case of single output, otherwise a tuple
if n_outs == 1:
return outs[0]
return tuple(outs)
def _assign_named_args(self, ba):
"""Assign inputs passed as strings from ds."""
ds = ba.arguments.pop("ds")
for name, param in self._sig.parameters.items():
if param.annotation is Union[str, DataArray] and isinstance(
ba.arguments[name], str
):
if ds is not None:
try:
ba.arguments[name] = ds[ba.arguments[name]]
except KeyError:
raise MissingVariableError(
f"For input '{name}', variable '{ba.arguments[name]}' was not found in the input dataset."
)
else:
raise ValueError(
f"Passing variable names as string requires giving the `ds` dataset (got {name}='{ba.arguments[name]}')"
)
def _bind_call(self, func, **das):
"""Call function using `__call__` `DataArray` arguments.
This will try to bind keyword arguments to `func` arguments. If this fails, `func` is called with positional
arguments only.
Notes
-----
This method is used to support two main use cases.
In use case #1, we have two compute functions with arguments in a different order:
`func1(tasmin, tasmax)` and `func2(tasmax, tasmin)`
In use case #2, we have two compute functions with arguments that have different names:
`generic_func(da)` and `custom_func(tas)`
For each case, we want to define a single `cfcheck` and `datacheck` methods that will work with both compute
functions.
Passing a dictionary of arguments will solve #1, but not #2.
"""
# First try to bind arguments to function.
try:
ba = signature(func).bind(**das)
except TypeError:
# If this fails, simply call the function using positional arguments
return func(*das.values())
else:
# Call the func using bound arguments
return func(*ba.args, **ba.kwargs)
@classmethod
def _update_attrs(cls, ba, das, attrs, var_id=None, names=None):
"""Format attributes with the run-time values of `compute` call parameters.
Cell methods and xclim_history attributes are updated, adding to existing values. The language of the string is
taken from the `OPTIONS` configuration dictionary.
Parameters
----------
das: tuple
Input arrays.
ba: bound argument object
Keyword arguments of the `compute` call.
attrs : Mapping[str, str]
The attributes to format and update.
var_id : str
The identifier to use when requesting the attributes translations.
Defaults to the class name (for the translations) or the `identifier` field of the class (for the xclim_history attribute).
If given, the identifier will be converted to uppercase to get the translation attributes.
This is meant for multi-outputs indicators.
names : Sequence[str]
List of attribute names for which to get a translation.
Returns
-------
dict
Attributes with {} expressions replaced by call argument values. With updated `cell_methods` and `xclim_history`.
`cell_methods` is not added is `names` is given and those not contain `cell_methods`.
"""
args = ba.arguments
out = cls._format(attrs, args)
for locale in OPTIONS["metadata_locales"]:
out.update(
cls._format(
get_local_attrs(
var_id or cls._registry_id,
locale,
names=names or list(attrs.keys()),
append_locale_name=True,
),
args=args,
formatter=get_local_formatter(locale),
)
)
# Generate a signature string for the history attribute
# We remove annotations, replace default float/int/str by values
# and replace others by type
callstr = []
for (k, v) in das.items():
callstr.append(f"{k}=<array>")
for (k, v) in ba.arguments.items():
if isinstance(v, (float, int, str)):
callstr.append(f"{k}={v!r}") # repr so strings have ' '
else:
callstr.append(
f"{k}={type(v)}"
) # don't take chance of having unprintable values
# Get history and cell method attributes from source data
attrs = defaultdict(str)
if names is None or "cell_methods" in names:
attrs["cell_methods"] = merge_attributes(
"cell_methods", new_line=" ", missing_str=None, **das
)
if "cell_methods" in out:
attrs["cell_methods"] += " " + out.pop("cell_methods")
attrs["xclim_history"] = update_history(
f"{var_id or cls._registry_id}({', '.join(callstr)})",
new_name=out.get("var_name"),
**das,
)
attrs.update(out)
return attrs
@staticmethod
def _check_identifier(identifier: str) -> None:
"""Verify that the identifier is a proper slug."""
if not re.match(r"^[-\w]+$", identifier):
warnings.warn(
"The identifier contains non-alphanumeric characters. It could make life "
"difficult for downstream software reusing this class.",
UserWarning,
)
def translate_attrs(
self, locale: Union[str, Sequence[str]], fill_missing: bool = True
):
"""Return a dictionary of unformated translated translatable attributes.
Translatable attributes are defined in :py:const:`xclim.core.locales.TRANSLATABLE_ATTRS`.
Parameters
----------
locale : Union[str, Sequence[str]]
The POSIX name of the locale or a tuple of a locale name and a path to a
json file defining the translations. See `xclim.locale` for details.
fill_missing : bool
If True (default fill the missing attributes by their english values.
"""
def _translate(var_id, var_attrs, names):
attrs = get_local_attrs(
var_id,
locale,
names=names,
append_locale_name=False,
)
if fill_missing:
for name in names:
if name not in attrs and var_attrs.get(name):
attrs[name] = var_attrs.get(name)
return attrs
# Translate global attrs
attrid = self._registry_id
attrs = _translate(
attrid,
self.__dict__,
# Translate only translatable attrs that are not variable attrs
set(TRANSLATABLE_ATTRS).difference(set(self._cf_names)),
)
# Translate variable attrs
attrs["outputs"] = []
for var_attrs in self.cf_attrs: # Translate for each variable
if len(self.cf_attrs) > 1:
attrid = f"{self.registry_id}.{var_attrs['var_name']}"
attrs["outputs"].append(_translate(attrid, var_attrs, TRANSLATABLE_ATTRS))
return attrs
def json(self, args=None):
"""Return a serializable dictionary representation of the class.
Parameters
----------
args : mapping, optional
Arguments as passed to the call method of the indicator.
If not given, the default arguments will be used when formatting the attributes.
Notes
-----
This is meant to be used by a third-party library wanting to wrap this class into another interface.
"""
names = ["identifier", "title", "abstract", "keywords"]
out = {key: getattr(self, key) for key in names}
out = self._format(out, args)
# Format attributes
out["outputs"] = [self._format(attrs, args) for attrs in self.cf_attrs]
out["notes"] = self.notes
# We need to deepcopy, otherwise empty defaults get overwritten!
# All those tweaks are to ensure proper serialization of the returned dictionary.
out["parameters"] = deepcopy(self.parameters)
for param in out["parameters"].values():
if param["default"] is _empty:
param.pop("default")
param["kind"] = param["kind"].value # Get the int.
if "choices" in param: # A set is stored, convert to list
param["choices"] = list(param["choices"])
return out
@classmethod
def _format(
cls,
attrs: dict,
args: dict = None,
formatter: AttrFormatter = default_formatter,
):
"""Format attributes including {} tags with arguments.
Parameters
----------
attrs: dict
Attributes containing tags to replace with arguments' values.
args : dict, optional
Function call arguments. If not given, the default arguments will be used when formatting the attributes.
formatter : AttrFormatter
"""
# Use defaults
if args is None:
args = {k: v["default"] for k, v in cls.parameters.items()}
args.update(getattr(cls._indcompute, "_injected", {}))
out = {}
for key, val in attrs.items():
mba = {"indexer": "annual"}
# Add formatting {} around values to be able to replace them with _attrs_mapping using format.
for k, v in args.items():
if isinstance(v, dict):
if v:
dk, dv = v.copy().popitem()
if dk == "month":
dv = "m{}".format(dv)
mba[k] = dv
elif isinstance(v, units.Quantity):
mba[k] = "{:g~P}".format(v)
elif isinstance(v, (int, float)):
mba[k] = "{:g}".format(v)
else:
mba[k] = v
if callable(val):
val = val(**mba)
out[key] = formatter.format(val, **mba)
if key in cls._text_fields:
out[key] = out[key].strip().capitalize()
return out
def _default_freq(self, **indexer):
"""Return default frequency."""
if self.freq in ["D", "H"]:
return indices.generic.default_freq(**indexer)
return None
def _mask(self, *args, **kwds):
"""Return whether mask for output values, based on the output of the `missing` method."""
from functools import reduce
indexer = kwds.get("indexer") or {}
freq = kwds.get("freq") if "freq" in kwds else self._default_freq(**indexer)
options = self.missing_options or OPTIONS[MISSING_OPTIONS].get(self.missing, {})
# We flag periods according to the missing method.
miss = (self._missing(da, freq, self.freq, options, indexer) for da in args)
return reduce(np.logical_or, miss)
# The following static methods are meant to be replaced to define custom indicators.
@staticmethod
def compute(*args, **kwds):
"""Compute the indicator.
This would typically be a function from `xclim.indices`.
"""
raise NotImplementedError
@staticmethod
def cfcheck(**das):
"""Compare metadata attributes to CF-Convention standards.
When subclassing this method, use functions decorated using `xclim.core.options.cfcheck`.
"""
return True
@staticmethod
def datacheck(**das):
"""Verify that input data is valid.
When subclassing this method, use functions decorated using `xclim.core.options.datacheck`.
For example, checks could include:
- assert temporal frequency is daily
- assert no precipitation is negative
- assert no temperature has the same value 5 days in a row
"""
return True
class Indicator2D(Indicator):
"""Indicator using two dimensions."""
nvar = 2
class Daily(Indicator):
"""Indicator defined for inputs at daily frequency."""
freq = "D"
@staticmethod
def datacheck(**das): # noqa
for key, da in das.items():
datachecks.check_daily(da)
class Daily2D(Daily):
"""Indicator using two dimensions at daily frequency."""
nvar = 2
class Hourly(Indicator):
"""Indicator defined for inputs at strict hourly frequency, meaning 3-hourly inputs would raise an error."""
freq = "H"
@staticmethod
def datacheck(**das): # noqa
for key, da in das.items():
datachecks.check_freq(da, "H")
def _parse_indice(indice: Callable, passed=None, **new_kwargs):
"""Parse an indice function and return corresponding elements needed for constructing an indicator.
Parameters
----------
indice : Callable
A indice function, written according to xclim's guidelines.
new_kwargs :
Mapping from name to dicts containing the necessary info for injecting new keyword-only
arguments into the indice_wrapper function. The meta dict can include (all optional):
`default`, `description`, `annotation`.
Returns
-------
indice_wrapper : callable
A function with a new signature including the injected args in new_kwargs.
docmeta : Mapping[str, str]
A dictionary of the metadata attributes parsed in the docstring.
params : Mapping[str, Mapping[str, Any]]
A dictionary of metadata for each input parameter of the indice. The metadata dictionaries
include the following entries: "default", "description", "kind" and, optionally, "choices" and "units".
"kind" is one of the constants in :py:class:`xclim.core.utils.InputKind`.
"""
# Base signature
sig = signature(indice)
passed = passed or {}
# Update
def _upd_param(param):
# Required DataArray arguments receive their own name as new default
# + the Union[str, DataArray] annotation
# Parameters with no default receive None
if param.kind in [param.VAR_KEYWORD, param.VAR_POSITIONAL]:
return param
if param.annotation is DataArray:
annot = Union[str, DataArray]
else:
annot = param.annotation
default = passed.get(param.name, {}).get("default", param.default)
if default is _empty:
if param.annotation is DataArray:
default = param.name
else:
default = None
return Parameter(
param.name,
# We keep the kind, except we replace POSITIONAL_ONLY by POSITONAL_OR_KEYWORD
max(param.kind, 1),
default=default,
annotation=annot,
)
# Parse all parameters, replacing annotations and default where needed and possible.
new_params = list(map(_upd_param, sig.parameters.values()))
# Injection
for name, meta in new_kwargs.items():
# ds argunent
param = Parameter(
name,
Parameter.KEYWORD_ONLY,
default=meta.get("default"),
annotation=meta.get("annotation"),
)
if new_params[-1].kind == Parameter.VAR_KEYWORD:
new_params.insert(-1, param)
else:
new_params.append(param)
# Create new compute function to be wrapped in __call__
indice_wrapper = copy_function(indice)
indice_wrapper.__signature__ = new_sig = sig.replace(parameters=new_params)
indice_wrapper.__doc__ = indice.__doc__
# Docstring parsing
parsed = parse_doc(indice.__doc__)
# Extract params and pop those not in the signature.
params = parsed.pop("parameters", {})
for dropped in set(params.keys()) - set(new_sig.parameters.keys()):
params.pop(dropped)
if hasattr(indice, "in_units"):
# Try to put units
for var, ustr in indice.in_units.items():
if var in params:
params[var]["units"] = ustr
# Fill default values and annotation in parameter doc
for name, param in new_sig.parameters.items():
if name in new_kwargs and "description" in new_kwargs[name]:
params[name] = {"description": new_kwargs[name]["description"]}
param_doc = params.setdefault(name, {"description": ""})
param_doc["default"] = param.default
param_doc["kind"] = infer_kind_from_parameter(param, "units" in param_doc)
param_doc.update(passed.get(name, {}))
return indice_wrapper, parsed, params
def build_indicator_module(
name: str,
objs: Mapping[str, Indicator],
doc: Optional[str] = None,
) -> ModuleType:
"""Create a module from imported objects.
The module is inserted as a submodule of `xclim.indicators`.
Parameters
----------
name : str
New module name. If it already exists, the module is extended with the passed objects,
overwriting those with same names.
objs : dict
Mapping of the indicators to put in the new module. Keyed by the name they will take in that module.
doc : str
Docstring of the new module. Defaults to a simple header. Invalid if the module already exists.
Returns
-------
ModuleType
A indicator module built from a mapping of Indicators.
"""
from xclim import indicators
if hasattr(indicators, name):
if doc is not None:
warnings.warn(
"Passed docstring ignored when extending existing module.", stacklevel=1
)
out = getattr(indicators, name)
else:
doc = doc or f"{name.capitalize()} indicators\n" + "=" * (len(name) + 11)
try:
out = ModuleType(name, doc)
except TypeError as err:
raise TypeError(f"Module '{name}' is not properly formatted") from err
indicators.__dict__[name] = out
out.__dict__.update(objs)
return out
def build_indicator_module_from_yaml(
filename: PathLike,
name: Optional[str] = None,
base: Type[Indicator] = Daily,
doc: Optional[str] = None,
mode: str = "raise",
realm: Optional[str] = None,
keywords: Optional[str] = None,
references: Optional[str] = None,
notes: Optional[str] = None,
) -> ModuleType:
"""Build or extend an indicator module from a YAML file.
The module is inserted as a submodule of `xclim.indicators`.
Parameters
----------
filename: PathLike
Path to a YAML file.
name: str, optional
The name of the new or existing module, defaults to the name of the file.
(e.g: `atmos.yml` -> `atmos`)
base: Indicator subclass
The Indicator subclass from which the new indicators are based. Superseeded by
the class given in the yaml file or in individual indicator definitions (see submodule's doc).
doc : str, optional
The docstring of the new submodule. Defaults to a very minimal header with the submodule's name.
mode: {'raise', 'warn', 'ignore'}
How to deal with broken indice definitions.
realm: str, optional
keywords: str, optional
Comma separated keywords.
references: str, optional
Source citations.
notes: str, optional
Other indicator attributes that would apply to all indicators in this module.
Values given here are overridden by the ones given in individual definition, but
they override the ones given at top-level in the YAMl file.
Returns
-------
ModuleType
A submodule of `xclim.indicators`.
See also
--------
The doc of :py:mod:`xclim.core.indicator` and of :py:func:`build_module`.
"""
# Read YAML file
filepath = Path(filename)
with filepath.open() as f:
yml = safe_load(f)
# Load values from top-level in yml.
# Priority of arguments differ.
module_name = name or yml.get("module", filepath.stem)
default_base = registry.get(yml.get("base"), base)
doc = doc or yml.get("doc")
# Module-wide default values for some attributes
defkwargs = {
# We can override the module of indicators in their init (weird but cool)
# This way, submodule indicators are prefixed with the module name in the registry.
"module": module_name,
# Other default argument, only given in case the indicator definition does not give them.
"realm": realm or yml.get("realm"),
"keywords": keywords or yml.get("keywords"),
"references": references or yml.get("references"),
"notes": notes or yml.get("notes"),
}
# Parse the indicators:
mapping = {}
for identifier, data in yml["indices"].items():
# clix-meta has illegal characters in the identifiers.
clean_id = identifier.replace("{", "").replace("}", "")
try:
if "base" in data:
base = registry[data["base"].upper()]
else:
base = default_base
mapping[clean_id] = base.from_dict(data, clean_id, **defkwargs)
except Exception as err:
msg = f"Constructing {identifier} failed with {err!r}"
if mode == "ignore":
logging.info(msg)
elif mode == "warn":
warnings.warn(msg)
else: # mode == "raise"
raise ValueError(msg) from err
# Construct module
return build_indicator_module(module_name, objs=mapping, doc=doc)
|
py | b408491e30f6beaacdfd3a83319455d868e56951 | # Copyright (c) 2012, 2014 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2005 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Ron Dreslinski
# Mitch Hayenga
from ClockedObject import ClockedObject
from m5.params import *
from m5.proxy import *
class BasePrefetcher(ClockedObject):
type = 'BasePrefetcher'
abstract = True
cxx_header = "mem/cache/prefetch/base.hh"
sys = Param.System(Parent.any, "System this prefetcher belongs to")
on_miss = Param.Bool(False, "Only notify prefetcher on misses")
on_read = Param.Bool(True, "Notify prefetcher on reads")
on_write = Param.Bool(True, "Notify prefetcher on writes")
on_data = Param.Bool(True, "Notify prefetcher on data accesses")
on_inst = Param.Bool(True, "Notify prefetcher on instruction accesses")
class DynamicPrefetcher(BasePrefetcher):
type = 'DynamicPrefetcher'
cxx_class = "DynamicPrefetcher"
cxx_header = "mem/cache/prefetch/dynamic.hh"
instruction_interval = Param.Int(200, "Instruction interval")
class QueuedPrefetcher(BasePrefetcher):
type = "QueuedPrefetcher"
abstract = True
cxx_class = "QueuedPrefetcher"
cxx_header = "mem/cache/prefetch/queued.hh"
latency = Param.Int(1, "Latency for generated prefetches")
queue_size = Param.Int(32, "Maximum number of queued prefetches")
queue_squash = Param.Bool(True, "Squash queued prefetch on demand access")
queue_filter = Param.Bool(True, "Don't queue redundant prefetches")
cache_snoop = Param.Bool(False, "Snoop cache to eliminate redundant request")
tag_prefetch = Param.Bool(True, "Tag prefetch with PC of generating access")
class StridePrefetcher(QueuedPrefetcher):
type = 'StridePrefetcher'
cxx_class = 'StridePrefetcher'
cxx_header = "mem/cache/prefetch/stride.hh"
max_conf = Param.Int(7, "Maximum confidence level")
thresh_conf = Param.Int(4, "Threshold confidence level")
min_conf = Param.Int(0, "Minimum confidence level")
start_conf = Param.Int(4, "Starting confidence for new entries")
table_sets = Param.Int(16, "Number of sets in PC lookup table")
table_assoc = Param.Int(4, "Associativity of PC lookup table")
use_master_id = Param.Bool(True, "Use master id based history")
degree = Param.Int(4, "Number of prefetches to generate")
class TaggedPrefetcher(QueuedPrefetcher):
type = 'TaggedPrefetcher'
cxx_class = 'TaggedPrefetcher'
cxx_header = "mem/cache/prefetch/tagged.hh"
degree = Param.Int(2, "Number of prefetches to generate")
|
py | b408492b5b1df2b2af32b38db8dc5c02d0397d32 | #!/usr/bin/env python3
# Run from the current directory.
import argparse
import testconfig
import logging
import subprocess
from pathlib import Path
from typing import List
def subdirs(root_dirs: List[str]) -> List[Path]:
dirs: List[Path] = []
for r in root_dirs:
dirs.extend(Path(r).rglob("**/"))
return [d for d in dirs if not (any(c.startswith("_") for c in d.parts) or any(c.startswith(".") for c in d.parts))]
def module_dict(module):
return {k: v for k, v in module.__dict__.items() if not k.startswith("_")}
def dir_config(dir):
import importlib
module_name = str(dir).replace("/", ".") + ".config"
try:
return module_dict(importlib.import_module(module_name))
except ModuleNotFoundError:
return {}
def read_file(filename):
with open(filename) as f:
return f.read()
return None
# Input file
def workflow_yaml_template_text(os):
return Path(f"runner-templates/build-test-{os}").read_text()
# Output files
def workflow_yaml_file(dir, os, test_name):
return Path(dir / f"build-test-{os}-{test_name}.yml")
# String function from test dir to test name
def test_name(dir):
return str(dir).replace("/", "-")
def transform_template(template_text, replacements):
t = template_text
for r, v in replacements.items():
t = t.replace(r, v)
return t
def test_files_in_dir(dir):
g = dir.glob("test_*.py")
return [] if g is None else [f for f in g]
# -----
default_replacements = {
"INSTALL_TIMELORD": read_file("runner-templates/install-timelord.include.yml").rstrip(),
"CHECKOUT_TEST_BLOCKS_AND_PLOTS": read_file("runner-templates/checkout-test-plots.include.yml").rstrip(),
"TEST_DIR": "",
"TEST_NAME": "",
"PYTEST_PARALLEL_ARGS": "",
}
# -----
# Replace with update_config
def generate_replacements(defaults, conf, dir, test_files):
assert len(test_files) > 0
replacements = dict(defaults)
if not conf["checkout_blocks_and_plots"]:
replacements[
"CHECKOUT_TEST_BLOCKS_AND_PLOTS"
] = "# Omitted checking out blocks and plots repo Stai-Network/test-cache"
if not conf["install_timelord"]:
replacements["INSTALL_TIMELORD"] = "# Omitted installing Timelord"
if conf["parallel"]:
replacements["PYTEST_PARALLEL_ARGS"] = " -n auto"
if conf["job_timeout"]:
replacements["JOB_TIMEOUT"] = str(conf["job_timeout"])
test_paths = ["tests/" + str(f) for f in test_files]
# We have to list the test files individually until pytest has the
# option to only collect tests in the named dir, and not those below
replacements["TEST_DIR"] = " ".join(sorted(test_paths))
replacements["TEST_NAME"] = test_name(str(dir))
if "test_name" in conf:
replacements["TEST_NAME"] = conf["test_name"]
for var in conf["custom_vars"]:
replacements[var] = conf[var] if var in conf else ""
return replacements
# Overwrite with directory specific values
def update_config(parent, child):
if child is None:
return parent
conf = child
for k, v in parent.items():
if k not in child:
conf[k] = v
return conf
def dir_path(string):
p = Path(string)
if p.is_dir():
return p
else:
raise NotADirectoryError(string)
# args
arg_parser = argparse.ArgumentParser(description="Build github workflows")
arg_parser.add_argument("--output-dir", "-d", default="../.github/workflows", type=dir_path)
arg_parser.add_argument("--verbose", "-v", action="store_true")
args = arg_parser.parse_args()
if args.verbose:
logging.basicConfig(format="%(asctime)s:%(message)s", level=logging.DEBUG)
# main
test_dirs = subdirs(testconfig.root_test_dirs)
for os in testconfig.oses:
template_text = workflow_yaml_template_text(os)
for dir in test_dirs:
test_files = test_files_in_dir(dir)
if len(test_files) == 0:
logging.info(f"Skipping {dir}: no tests collected")
continue
conf = update_config(module_dict(testconfig), dir_config(dir))
replacements = generate_replacements(default_replacements, conf, dir, test_files)
txt = transform_template(template_text, replacements)
logging.info(f"Writing {os}-{test_name(dir)}")
workflow_yaml_file(args.output_dir, os, test_name(dir)).write_text(txt)
out = subprocess.run(["git", "diff", args.output_dir])
if out.stdout:
print(out.stdout)
|
py | b4084939e6ef34187f4a6143f1866cbd7d47de45 | #####################################################
#
# A library for accessing FRC team media via the Blue Alliance API
#
# Authors: Andrew Merrill and Jacob Bendicksen (Fall 2014)
#
######################################################
import blueapi
def getTeamMedia(teamNumber,year):
mediaList = []
media = blueapi.getTeamMedia(teamNumber,year)
for n in range(0,len(media)):
if media[n]['type'] == "cdphotothread":
mediaList.append("http://www.chiefdelphi.com/media/img/" + media[n]['details']['image_partial'])
elif media[n]['type'] == "youtube":
mediaList.append("www.youtube.com/watch?v=" + media[n]['foreign_key'] + "&feature=youtu.be")
return mediaList
print getTeamMedia(254,2014)
|
py | b4084a3d00f3cff03cce0c1e53a3ad0c0e4175fe | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.21
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1Endpoints(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'subsets': 'list[V1EndpointSubset]'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'subsets': 'subsets'
}
def __init__(self, api_version=None, kind=None, metadata=None, subsets=None, local_vars_configuration=None): # noqa: E501
"""V1Endpoints - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._kind = None
self._metadata = None
self._subsets = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if subsets is not None:
self.subsets = subsets
@property
def api_version(self):
"""Gets the api_version of this V1Endpoints. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1Endpoints. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1Endpoints.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1Endpoints. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this V1Endpoints. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1Endpoints. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1Endpoints.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1Endpoints. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1Endpoints. # noqa: E501
:return: The metadata of this V1Endpoints. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1Endpoints.
:param metadata: The metadata of this V1Endpoints. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def subsets(self):
"""Gets the subsets of this V1Endpoints. # noqa: E501
The set of all endpoints is the union of all subsets. Addresses are placed into subsets according to the IPs they share. A single address with multiple ports, some of which are ready and some of which are not (because they come from different containers) will result in the address being displayed in different subsets for the different ports. No address will appear in both Addresses and NotReadyAddresses in the same subset. Sets of addresses and ports that comprise a service. # noqa: E501
:return: The subsets of this V1Endpoints. # noqa: E501
:rtype: list[V1EndpointSubset]
"""
return self._subsets
@subsets.setter
def subsets(self, subsets):
"""Sets the subsets of this V1Endpoints.
The set of all endpoints is the union of all subsets. Addresses are placed into subsets according to the IPs they share. A single address with multiple ports, some of which are ready and some of which are not (because they come from different containers) will result in the address being displayed in different subsets for the different ports. No address will appear in both Addresses and NotReadyAddresses in the same subset. Sets of addresses and ports that comprise a service. # noqa: E501
:param subsets: The subsets of this V1Endpoints. # noqa: E501
:type: list[V1EndpointSubset]
"""
self._subsets = subsets
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1Endpoints):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1Endpoints):
return True
return self.to_dict() != other.to_dict()
|
py | b4084db34f274ece4846d90ef2a94b57e3f5e2f4 | # Copyright 2020 DataStax, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""paresr test for outputlog"""
import unittest
import os
from pysper.parser import outputlog
from pysper import parser
from tests import test_dir
class TestOutputLog(unittest.TestCase):
"""output log tests"""
def test_versions_from_output_log(self):
"""retrieve server versions present"""
output_log_path = os.path.join(test_dir(), "output.log")
with open(output_log_path, "r") as log:
events = list(parser.read_output_log(log))
solr_version = None
spark_version = None
dse_spark_connector_version = None
for event in events:
if "spark_version" in event:
spark_version = event["spark_version"]
if "dse_spark_connector_version" in event:
dse_spark_connector_version = event["dse_spark_connector_version"]
if "solr_version" in event:
solr_version = event["solr_version"]
if "version" in event:
dse_version = event["version"]
self.assertEqual(dse_version, "6.7.7")
self.assertEqual(solr_version, "6.0.1.2.2647")
self.assertEqual(spark_version, "2.2.3.9")
self.assertEqual(dse_spark_connector_version, "6.7.7")
def test_versions_cassandra_21(self):
"""retrieve server versions present"""
output_log_path = os.path.join(test_dir(), "cassandra21", "system.log")
with open(output_log_path, "r") as log:
events = list(parser.read_output_log(log))
cassandra_version = None
for event in events:
if "cassandra_version" in event:
cassandra_version = event["cassandra_version"]
self.assertEqual(cassandra_version, "2.1.21")
def test_versions_cassandra_22(self):
"""retrieve server versions present"""
output_log_path = os.path.join(test_dir(), "cassandra22", "system.log")
with open(output_log_path, "r") as log:
events = list(parser.read_output_log(log))
cassandra_version = None
for event in events:
if "cassandra_version" in event:
cassandra_version = event["cassandra_version"]
self.assertEqual(cassandra_version, "2.2.15")
def test_versions_cassandra_30(self):
"""retrieve server versions present"""
output_log_path = os.path.join(test_dir(), "cassandra30", "system.log")
with open(output_log_path, "r") as log:
events = list(parser.read_output_log(log))
cassandra_version = None
for event in events:
if "cassandra_version" in event:
cassandra_version = event["cassandra_version"]
self.assertEqual(cassandra_version, "3.0.19")
def test_versions_cassandra_311(self):
"""retrieve server versions present"""
output_log_path = os.path.join(test_dir(), "cassandra311", "system.log")
with open(output_log_path, "r") as log:
events = list(parser.read_output_log(log))
cassandra_version = None
for event in events:
if "cassandra_version" in event:
cassandra_version = event["cassandra_version"]
self.assertEqual(cassandra_version, "3.11.5")
def test_parser_output_log(self):
"""make sure the parse_log gets the configuration from the log"""
output_log_path = os.path.join(test_dir(), "output.log")
with open(output_log_path, "r") as log:
events = list(parser.read_output_log(log))
cpu_cores, threads_per_core, ram_in_mb, heap_size, gc_type = (
None,
None,
None,
None,
None,
)
disk_access_mode, index_access_mode, commit_log_access_mode = (
None,
None,
None,
)
for event in events:
if "cpu_cores" in event:
cpu_cores = event["cpu_cores"]
if "threads_per_core" in event:
threads_per_core = event["threads_per_core"]
if "jvm_args" in event:
if "-Ddse.system_memory_in_mb" in event["jvm_args"]:
for ram in event["jvm_args"]["-Ddse.system_memory_in_mb"]:
# just get the last one
ram_in_mb = ram
for args in event["jvm_args"]:
if args.startswith("-Xmx"):
heap_size = "".join(args[4:])
if args == "-XX:+UseG1GC":
gc_type = "G1GC"
if "logged_disk_access_mode" in event:
disk_access_mode = event["logged_disk_access_mode"]
if "logged_index_access_mode" in event:
index_access_mode = event["logged_index_access_mode"]
if "logged_commit_log_access_mode" in event:
commit_log_access_mode = event["logged_commit_log_access_mode"]
self.assertEqual(disk_access_mode, "standard")
self.assertEqual(index_access_mode, "standard")
self.assertEqual(commit_log_access_mode, "standard")
self.assertEqual(cpu_cores, 8)
self.assertEqual(threads_per_core, 1)
self.assertEqual(ram_in_mb, "15038")
self.assertEqual(gc_type, "G1GC")
self.assertEqual(heap_size, "3759M")
def test_parse_disk_access_mode_48(self):
"""validate the 4.8 modes are handled correctly"""
line = "INFO 10:13:16,088 DiskAccessMode 'auto' determined to be mmap, indexAccessMode is mmap"
fields = outputlog.capture_line(line)
self.assertIsNotNone(fields)
self.assertEqual(fields["logged_disk_access_mode"], "mmap")
self.assertEqual(fields["logged_index_access_mode"], "mmap")
def test_parse_disk_access_mode_50(self):
"""validate the 5.0 modes are handled correctly"""
line = "INFO 11:15:02,584 DatabaseDescriptor.java:320 - DiskAccessMode 'auto' determined to be mmap, indexAccessMode is mmap"
fields = outputlog.capture_line(line)
self.assertIsNotNone(fields)
self.assertEqual(fields["logged_disk_access_mode"], "mmap")
self.assertEqual(fields["logged_index_access_mode"], "mmap")
line = "INFO 11:12:24,303 DatabaseDescriptor.java:326 - DiskAccessMode is standard, indexAccessMode is mmap"
fields = outputlog.capture_line(line)
self.assertIsNotNone(fields)
self.assertEqual(fields["logged_disk_access_mode"], "standard")
self.assertEqual(fields["logged_index_access_mode"], "mmap")
line = "INFO 11:13:34,429 DatabaseDescriptor.java:331 - DiskAccessMode is standard, indexAccessMode is standard"
fields = outputlog.capture_line(line)
self.assertIsNotNone(fields)
self.assertEqual(fields["logged_disk_access_mode"], "standard")
self.assertEqual(fields["logged_index_access_mode"], "standard")
def test_parse_disk_access_mode_51(self):
"""validates 5.1 parses correctly"""
line = (
"INFO [main] 2018-01-09 12:18:13,157 DatabaseDescriptor.java:374 - "
+ "DiskAccessMode is standard, indexAccessMode is mmap"
)
fields = outputlog.capture_line(line)
self.assertIsNotNone(fields)
self.assertEqual(fields["logged_disk_access_mode"], "standard")
self.assertEqual(fields["logged_index_access_mode"], "mmap")
def test_parse_disk_access_mode_60(self):
"""validates 6.0 which is a totally new format parses correctly"""
line = (
"INFO [main] 2018-01-09 12:32:23,568 DatabaseDescriptor.java:425 - "
+ "DiskAccessMode is standard, indexAccessMode is standard, commitlogAccessMode is standard"
)
fields = outputlog.capture_line(line)
self.assertIsNotNone(fields)
self.assertEqual(fields["logged_disk_access_mode"], "standard")
self.assertEqual(fields["logged_index_access_mode"], "standard")
self.assertEqual(fields["logged_commit_log_access_mode"], "standard")
def test_parse_threads_per_core(self):
"""validates the threads per core log format"""
line = "INFO [main] 2017-01-11 12:19:06,187 DseConfig.java:455 - This instance appears to have 2 threads per CPU core and 16 total CPU threads."
fields = outputlog.capture_line(line)
self.assertIsNotNone(fields)
self.assertEqual(fields["threads_per_core"], 2)
self.assertEqual(fields["cpu_cores"], 16)
def test_1_thread_per_core_long_format(self):
"""thread instead of threads per core"""
line = "INFO [main] 2018-01-09 10:12:11,864 DseConfig.java:448 - This instance appears to have 1 thread per CPU core and 8 total CPU threads."
fields = outputlog.capture_line(line)
self.assertIsNotNone(fields)
self.assertEqual(fields["threads_per_core"], 1)
self.assertEqual(fields["cpu_cores"], 8)
def test_parse_threads_per_core_short_format(self):
"""validates the threads per core log format"""
line = "INFO 01:07:18,474 This instance appears to have 2 threads per CPU core and 8 total CPU threads."
fields = outputlog.capture_line(line)
self.assertIsNotNone(fields)
self.assertEqual(fields["threads_per_core"], 2)
self.assertEqual(fields["cpu_cores"], 8)
def test_parse_1_thread_per_core_short_format(self):
"""validates the threads per core log format"""
line = "INFO 01:06:12,474 This instance appears to have 1 thread per CPU core and 8 total CPU threads."
fields = outputlog.capture_line(line)
self.assertIsNotNone(fields)
self.assertEqual(fields["threads_per_core"], 1)
self.assertEqual(fields["cpu_cores"], 8)
def test_parse_threads_per_core_old_format(self):
"""pre 5.1 behavior"""
line = "INFO 10:12:10,183 DseConfig.java:437 - This machine appears to have 1 thread per CPU core."
fields = outputlog.capture_line(line)
self.assertIsNotNone(fields)
self.assertEqual(fields["threads_per_core"], 1)
def test_parse_threads_per_core_old_format_with_2_cores(self):
"""pre 5.1 behavior"""
line = "INFO 10:12:10,382 DseConfig.java:437 - This machine appears to have 2 threads per CPU core."
fields = outputlog.capture_line(line)
self.assertIsNotNone(fields)
self.assertEqual(fields["threads_per_core"], 2)
|
py | b4084de110e9b37ba76bf9eb39a71abd20efbd93 | """
WSGI config for andzy_calc project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'andzy_calc.settings')
application = get_wsgi_application()
|
py | b4084deafb9156982d39dce44a4fbe1dc2c07918 | import cv2
def change_brightness(img, value=30):
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv)
v = cv2.add(v,value)
v[v > 255] = 255
v[v < 0] = 0
final_hsv = cv2.merge((h, s, v))
img = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)
return img
for datax in range(0,1):
# Opens the inbuilt camera of laptop to capture video.
cap = cv2.VideoCapture(0)
i = 0
while(cap.isOpened()):
ret, frame = cap.read()
# This condition prevents from infinite looping
# incase video ends.
if ret == False:
break
if i!=64:
# Save Frame by Frame into disk using imwrite method
dim = (28,28)
img=cv2.resize(frame,dim)
#cv2.imwrite(str(i)+'.jpg', img)
#img = change_brightness(img, value=-100) #decreases
cv2.imwrite("images/"+str(i)+".jpg", img)
i += 1
print(i)
else:
break
cap.release()
cv2.destroyAllWindows()
import os
import random
from PIL import Image, ImageOps
import numpy as np
for j in range(0,8):
for i in range(0,8):
# read the images
img1 = cv2.imread('images/'+str(j*8+i)+".jpg")
if i!=0:
# vertically concatenates images
# of same width
im_v = cv2.hconcat([im_v, img1])
else:
im_v = img1
if j!=0:
im_h = cv2.vconcat([im_h, im_v])
else:
im_h = im_v
# show the output image
cv2.imwrite('img.jpg') |
py | b4084e46c20a023d74663662fc3c87453203f15d | '''
Copyright (C) 2012 STFC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@author: Kevin Haines, Will Rogers
The crypto module calls openssl command line directly, using subprocess.
We investigated python's crypto libraries (all openssl bindings) and
found that none were mature enough to implement the SMIME crypto we had
decided on.
'''
from subprocess import Popen, PIPE
import quopri
import base64
import logging
# logging configuration
log = logging.getLogger(__name__)
# Valid ciphers
CIPHERS = ['aes128', 'aes192', 'aes256']
class CryptoException(Exception):
'''
Exception for use by the crypto module.
'''
pass
def _from_file(filename):
'''
Convenience function to read entire file into string.
'''
f = open(filename, 'r')
s = f.read()
f.close()
return s
def check_cert_key(certpath, keypath):
'''
Check that a certificate and a key match, using openssl directly to fetch
the modulus of each, which must be the same.
'''
try:
cert = _from_file(certpath)
key = _from_file(keypath)
except IOError, e:
log.error('Could not find cert or key file: %s', e)
return False
# Two things the same have the same modulus.
if cert == key:
return False
p1 = Popen(['openssl', 'x509', '-noout', '-modulus'],
stdin=PIPE, stdout=PIPE, stderr=PIPE)
modulus1, error = p1.communicate(cert)
if error != '':
log.error(error)
return False
p2 = Popen(['openssl', 'rsa', '-noout', '-modulus'],
stdin=PIPE, stdout=PIPE, stderr=PIPE)
modulus2, error = p2.communicate(key)
if error != '':
log.error(error)
return False
return modulus1.strip() == modulus2.strip()
def sign(text, certpath, keypath):
'''
Sign the specified message using the certificate and key in the files specified.
Returns the signed message as an SMIME string, suitable for transmission.
'''
try:
p1 = Popen(['openssl', 'smime', '-sign', '-inkey', keypath, '-signer', certpath, '-text'],
stdin=PIPE, stdout=PIPE, stderr=PIPE)
signed_msg, error = p1.communicate(text)
if (error != ''):
log.error(error)
return signed_msg
except OSError, e:
log.error('Failed to sign message: %s', e)
raise CryptoException('Message signing failed. Check cert and key permissions.')
def encrypt(text, certpath, cipher='aes128'):
'''
Encrypt the specified message using the certificate string.
Returns the encrypted SMIME text suitable for transmission
'''
if cipher not in CIPHERS:
raise CryptoException('Invalid cipher %s.' % cipher)
cipher = '-' + cipher
# encrypt
p1 = Popen(['openssl', 'smime', '-encrypt', cipher, certpath],
stdin=PIPE, stdout=PIPE, stderr=PIPE)
enc_txt, error = p1.communicate(text)
if (error != ''):
log.error(error)
return enc_txt
def verify(signed_text, capath, check_crl):
'''
Verify the signed message has been signed by the certificate (attached to the
supplied SMIME message) it claims to have, by one of the accepted CAs in
capath.
Returns a tuple including the signer's certificate and the plain-text of the
message if it has been verified. If the content transfer encoding is specified
as 'quoted-printable' or 'base64', decode the message body accordingly.
'''
if signed_text is None or capath is None:
raise CryptoException('Invalid None argument to verify().')
# This ensures that openssl knows that the string is finished.
# It makes no difference if the signed message is correct, but
# prevents it from hanging in the case of an empty string.
signed_text += '\n\n'
signer = get_signer_cert(signed_text)
if not verify_cert(signer, capath, check_crl):
raise CryptoException('Unverified signer')
# The -noverify flag removes the certificate verification. The certificate
# is verified above; this check would also check that the certificate
# is allowed to sign with SMIME, which host certificates sometimes aren't.
p1 = Popen(['openssl', 'smime', '-verify', '-CApath', capath, '-noverify'],
stdin=PIPE, stdout=PIPE, stderr=PIPE)
message, error = p1.communicate(signed_text)
# SMIME header and message body are separated by a blank line
lines = message.strip().splitlines()
try:
blankline = lines.index('')
except ValueError:
raise CryptoException('No blank line between message header and body')
headers = '\n'.join(lines[:blankline])
body = '\n'.join(lines[blankline + 1:])
# two possible encodings
if 'quoted-printable' in headers:
body = quopri.decodestring(body)
elif 'base64' in headers:
body = base64.decodestring(body)
# otherwise, plain text
# 'openssl smime' returns "Verification successful" to standard error. We
# don't want to log this as an error each time, but we do want to see if
# there's a genuine error.
if "Verification successful" in error:
log.debug(error)
else:
raise CryptoException(
"Possible tampering. See OpenSSL error: %s" % error
)
subj = get_certificate_subject(signer)
return body, subj
def decrypt(encrypted_text, certpath, keypath):
'''
Decrypt the specified message using the certificate and key contained in the
named PEM files. The capath should point to a directory holding all the
CAs that we accept
This decryption function can be used whether or not OpenSSL is used to
encrypt the data
'''
# This ensures that openssl knows that the string is finished.
# It makes no difference if the signed message is correct, but
# prevents it from hanging in the case of an empty string.
encrypted_text += '\n\n'
log.info('Decrypting message.')
p1 = Popen(['openssl', 'smime', '-decrypt',
'-recip', certpath, '-inkey', keypath],
stdin=PIPE, stdout=PIPE, stderr=PIPE)
enc_txt, error = p1.communicate(encrypted_text)
if (error != ''):
log.error(error)
return enc_txt
def verify_cert_date(certpath):
"""Check that certificate hasn't expired and won't expire within 24 hours.
Return True if certifcate is 'in date', otherwise return False.
"""
if certpath is None:
raise CryptoException('Invalid None argument to verify_cert_date().')
# Check if the certificate expires within the next 86400 seconds and exit
# non-zero if yes, it will expire, or zero if not.
args = ['openssl', 'x509', '-checkend', '86400', '-noout', '-in', certpath]
p1 = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
message, error = p1.communicate(certpath)
# This should be unlikely to happen, but if it does log the error
# and do not verify the cert's expiraiton date.
if error != '':
log.error(error)
return False
# If the returncode is zero the certificate has not expired.
return p1.returncode == 0
def verify_cert(certstring, capath, check_crls=True):
'''
Verify that the certificate is signed by a CA whose certificate is stored in
capath.
Note that I've had to compare strings in the output of openssl to check
for verification, which may make this brittle.
Returns True if the certificate is verified
'''
if certstring is None or capath is None:
raise CryptoException('Invalid None argument to verify_cert().')
args = ['openssl', 'verify', '-CApath', capath]
if check_crls:
args.append('-crl_check_all')
p1 = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
message, error = p1.communicate(certstring)
# I think this is unlikely ever to happen
if (error != ''):
log.error(error)
# 'openssl verify' returns 0 whatever happens, so we can't use the return
# code to determine whether the verification was successful.
# If it is successful, openssl prints 'OK'
# If it fails, openssl prints 'error'
return_bool = 'OK' in message and 'error' not in message
if return_bool:
# We're not interested in the ouput if successful.
level = logging.DEBUG
else:
level = logging.WARNING
log.log(level, 'Certificate verification: %s', message)
return return_bool
def verify_cert_path(certpath, capath, check_crls=True):
'''
Verify certificate, but using the certificate filepath rather than
the certificate string as in verify_cert.
'''
certstring = _from_file(certpath)
return verify_cert(certstring, capath, check_crls)
def get_certificate_subject(certstring):
'''
Return the certificate subject's DN, in legacy openssl format.
'''
p1 = Popen(['openssl', 'x509', '-noout', '-subject'],
stdin=PIPE, stdout=PIPE, stderr=PIPE)
subject, error = p1.communicate(certstring)
if (error != ''):
log.error(error)
raise CryptoException('Failed to get subject: %s' % error)
subject = subject.strip()[9:] # remove 'subject= ' from the front
return subject
def get_signer_cert(signed_text):
'''
Read the signer's certificate from the signed specified message, and return the
certificate string.
'''
# This ensures that openssl knows that the string is finished.
# It makes no difference if the signed message is correct, but
# prevents it from hanging in the case of an empty string.
signed_text += '\n\n'
p1 = Popen(['openssl', 'smime', '-pk7out'],
stdin=PIPE, stdout=PIPE, stderr=PIPE)
p2 = Popen(['openssl', 'pkcs7', '-print_certs'],
stdin=p1.stdout, stdout=PIPE, stderr=PIPE)
p1.stdin.write(signed_text)
certstring, error = p2.communicate()
if (error != ''):
log.error(error)
return certstring
|
py | b4084f93c19076e1b0c53ff612508629b7fe9dad | import pytest
from apex.algo.iud_insertion import HISTORICAL, INSERTION, IUD, POST_SUCCESS, PRE_SUCCESS
@pytest.mark.parametrize(('s', 'rx'), [
('iud inserted 3 months ago', HISTORICAL),
('iud inserted 3 months ago', INSERTION),
('iud inserted 3 months ago', IUD),
('strings were cut', PRE_SUCCESS),
('length of strings', POST_SUCCESS),
])
def test_matches(s, rx):
assert rx.matches(s)
|
py | b4084fec62c05cbe03a69998aa38e7055c697fb4 | # -*- coding: utf-8 -*-
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.internet import defer
from synapse.api.errors import AuthError, SynapseError, StoreError, Codes
from synapse.http.servlet import RestServlet, parse_json_object_from_request
from synapse.types import UserID
from ._base import client_v2_patterns
import logging
logger = logging.getLogger(__name__)
class GetFilterRestServlet(RestServlet):
PATTERNS = client_v2_patterns("/user/(?P<user_id>[^/]*)/filter/(?P<filter_id>[^/]*)")
def __init__(self, hs):
super(GetFilterRestServlet, self).__init__()
self.hs = hs
self.auth = hs.get_auth()
self.filtering = hs.get_filtering()
@defer.inlineCallbacks
def on_GET(self, request, user_id, filter_id):
target_user = UserID.from_string(user_id)
requester = yield self.auth.get_user_by_req(request)
if target_user != requester.user:
raise AuthError(403, "Cannot get filters for other users")
if not self.hs.is_mine(target_user):
raise AuthError(403, "Can only get filters for local users")
try:
filter_id = int(filter_id)
except:
raise SynapseError(400, "Invalid filter_id")
try:
filter = yield self.filtering.get_user_filter(
user_localpart=target_user.localpart,
filter_id=filter_id,
)
defer.returnValue((200, filter.get_filter_json()))
except (KeyError, StoreError):
raise SynapseError(400, "No such filter", errcode=Codes.NOT_FOUND)
class CreateFilterRestServlet(RestServlet):
PATTERNS = client_v2_patterns("/user/(?P<user_id>[^/]*)/filter")
def __init__(self, hs):
super(CreateFilterRestServlet, self).__init__()
self.hs = hs
self.auth = hs.get_auth()
self.filtering = hs.get_filtering()
@defer.inlineCallbacks
def on_POST(self, request, user_id):
target_user = UserID.from_string(user_id)
requester = yield self.auth.get_user_by_req(request)
if target_user != requester.user:
raise AuthError(403, "Cannot create filters for other users")
if not self.hs.is_mine(target_user):
raise AuthError(403, "Can only create filters for local users")
content = parse_json_object_from_request(request)
filter_id = yield self.filtering.add_user_filter(
user_localpart=target_user.localpart,
user_filter=content,
)
defer.returnValue((200, {"filter_id": str(filter_id)}))
def register_servlets(hs, http_server):
GetFilterRestServlet(hs).register(http_server)
CreateFilterRestServlet(hs).register(http_server)
|
py | b408512a066102c63077475fce537dadc38828a1 | import os
import random # for testing and diagnostics..
import torch
from torch import nn
from torch import Tensor
from typing import Tuple, Optional, Union
from torch.utils.cpp_extension import load
VERBOSE = True
def _resolve(name):
return os.path.join(os.path.dirname(os.path.realpath(__file__)), name)
try:
import torch_iterative_sampling_cpu
except ImportError:
if VERBOSE:
print('Falling back to JIT compiling torch_iterative_sampling_cpu')
torch_iterative_sampling_cpu = load(
name='torch_iterative_sampling_cpu',
sources=[
_resolve('iterative_sampling_cpu.cpp'),
],
verbose=VERBOSE,
)
try:
import torch_iterative_sampling_cuda
except ImportError:
if VERBOSE:
print('Falling back to JIT compiling torch_iterative_sampling_cuda')
torch_iterative_sampling_cuda = None
if torch.cuda.is_available():
torch_iterative_sampling_cuda = load(
name='torch_iterative_sampling_cuda',
sources=[
_resolve('iterative_sampling_cuda.cpp'),
_resolve('iterative_sampling_cuda_kernel.cu'),
],
verbose=VERBOSE,
)
def _iterative_sample_dispatcher(
cumsum: torch.Tensor,
rand: torch.Tensor,
seq_len: int) -> torch.Tensor:
"""
Dispatcher for iterative
"""
if cumsum.is_cuda:
if torch_iterative_sampling_cuda is None:
raise EnvironmentError(f'Failed to load native CUDA module')
return torch_iterative_sampling_cuda.iterative_sample_cuda(
cumsum, rand, seq_len)
else:
return torch_iterative_sampling_cpu.iterative_sample_cpu(
cumsum, rand, seq_len)
def ensure_nonzero(probs: torch.Tensor) -> torch.Tensor:
"""
Return a version of `probs` that lacks zeros and ones.
Args:
probs: a Tensor of probabilities of shape (*, N), where N is the number of classes
Return:
Returns a modified version of probs without exact zeros or ones.
"""
N = probs.shape[-1]
assert probs.dtype in [torch.float32, torch.float64, torch.float16]
epsilon = (1.2e-07 if probs.dtype == torch.float32 else
(2.3e-16 if probs.dtype == torch.float64 else
9.8e-04)) # <-- assume float16, if supported.
return (probs * (1-N*epsilon)) + epsilon
def iterative_sample(probs: torch.Tensor,
num_seqs: int,
seq_len: int,
) -> torch.Tensor:
"""Sample repeatedly from the categorical distribution in `probs`, each time
only selecting from classes that were not previously seen.
Args:
probs: A tensor of probabilities of discrete classes, of shape (*, N)
where N is the number of classes.
Is expected to sum to one (over the N indexes), and be nonnegative.
We advise to pass it through ensure_nonzero(probs) before
this function.
num_seqs: The number of parallel sequences to sample; must be > 0.
seq_len: The length of the sequences of sample; must be strictly between
9 and N.
Returns: a LongTensor of shape (*, num_seqs, seq_len), containing
the sampled indexes, with elements in the range [0..N-1]. Each
element of each sequence is sampled with probability proportional
to `probs`, excluding from consideration classes already sampled
within that sequence.
"""
probs = probs.to(dtype=torch.float32)
N = probs.shape[-1]
rest_shape = probs.shape[:-1]
probs = probs.reshape(-1, N)
B = probs.shape[0]
rand_int32 = torch.randint(0, (2**31)-1, (B, num_seqs), dtype=torch.int32,
device=probs.device)
indexes = _iterative_sample_dispatcher(probs, rand_int32, seq_len)
indexes = indexes.view(*rest_shape, num_seqs, seq_len)
return indexes
def exclusive_cumsum(x: Tensor, dim: int) -> Tensor:
"""
Exclusive cumulative sum, i.e. cumulative sum minus the current element.
"""
return torch.cumsum(x, dim) - x
class PredictorInputParams(nn.Module):
def __init__(self, num_classes: int, predictor_dim: int,
num_discretization_levels: int,
seq_len: int) -> None:
"""
This module stores some embedding parameters that are part of how we predict the
probabilities of the classes and weights. Its forward
num_classes: Number of classes in the discrete distribution that we are modeling,
e.g. 512 (think of this as a hidden dimension). Referred to elsewhere as N.
predictor_dim: Dimension of the input to the predictor that predicts log-probs
of classes and weights. This predictor will be a sum of various inputs.
E.g. 512.
num_discretization_levels: the number of discretization
levels from the SamplingBottleneckModule, dictates
range of `value_indexes`
seq_len: The length K of the random sequences of classes.
"""
super(PredictorInputParams, self).__init__()
# Initialize various embeddings.
# All embeddings will be returned after multiplying by this scale, this
# is intended to make them learn fast enough. We divide by this scale
# when initializing.
self.embed_scale = predictor_dim ** 0.5
self.num_discretization_levels = num_discretization_levels
def Embedding(num_embeddings, embedding_dim):
return nn.Embedding(
num_embeddings=num_embeddings, embedding_dim=embedding_dim,
_weight=torch.randn(num_embeddings, embedding_dim) * (1 / self.embed_scale)
)
# Embedding by which we indicate that a class is "present" at a previous
# position in the sequence (this gets set independently of its weight)
self.class_present_embed = Embedding(num_classes, predictor_dim)
# Embedding by which we indicate the (input) value of a class, this gets scaled
# by `value`.
self.class_value_embed = Embedding(num_classes, predictor_dim)
# Embedding by which we indicate, when querying a weight, the identity of the class whose
# weight we are querying.
self.class_query_embed = Embedding(num_classes, predictor_dim)
# position_embed is the embedding by which we indicate our current position in the sequence (k=0,1,..,K-1).
# shape: (K, N)
self.position_embed = nn.Parameter(torch.randn(seq_len, predictor_dim) * (1 / self.embed_scale))
def forward(self, class_indexes: Tensor,
value_indexes: Tensor,
base_predictor: Tensor) -> Tuple[Tensor, Tensor]:
"""
Args:
class_indexes: the same-named output from
SamplingBottleneckModule.forward(), a LongTensor of
shape (*, num_seqs, seq_len).
value_indexes: the same-named output from
SamplingBottleneckModule.forward(), a LongTensor of
shape (*, num_seqs, seq_len).
base_predictor: A Tensor of shape (*, predictor_dim) that encodes a
prediction of the distribution, e.g. derived from previous
frames' embeddings. Will be combined with other embeddings
owned in this class.
Returns (class_predictor, value_predictor), where:
class_predictor: of shape (*, S, predictor_dim), this is to be used
as the input to a feedforward network that predicts the next
class in the sequence.
value_predictor: of shape (*, S, predictor_dim), this is to be used
as the input to a feedforward network that predicts the (discretized)
weight for the class that we just saw.
"""
# N == predictor_dim
class_present_embedding = self.class_present_embed(class_indexes) # (*, S, K, N)
# class_present_embedding_cumsum includes the current class. This is OK when
# predicting the value, but for predicting the class itself we'll have to subtract the
# current class.
# class_present_embedding_cumsum shape: (*, S, K, predictor_dim)
class_present_embedding_cumsum = torch.cumsum(class_present_embedding, dim=-2)
selected_values = value_indexes * (1.0 / (self.num_discretization_levels - 1)) # (*, S, K)
selected_values = selected_values ** 2
#if random.random() < 0.01:
# print("selected_values = ", selected_values)
# class_value_embedding will be of shape (*, S, K, N). Caution: this could be on
# the large size if S*K is large, memory might be an issue.
class_value_embedding = self.class_value_embed(class_indexes) * selected_values.unsqueeze(-1)
# So do exclusive-cumsum so that for each point k in the sequence, the model
# is aware of the values of all previously emitted classes and their values (but not the
# current value, which is not known yet).
class_value_embedding_cumsum = exclusive_cumsum(class_value_embedding, dim=-2)
# class_query_embedding has shape (*, S, K, N)
class_query_embedding = self.class_query_embed(class_indexes)
common_embedding = (class_value_embedding_cumsum +
class_present_embedding_cumsum +
self.position_embed)
# reshape base_predictor to (*, 1, 1, predictor_dim)
base_predictor = base_predictor.unsqueeze(-2).unsqueeze(-2)
# we have to subtract class_present_embedding in order to exclude the
# current class from class_present_embedding_cumsum (otherwise it's
# cheating, as the thing we're predicting would be known).
class_predictor = base_predictor + self.embed_scale * (common_embedding - class_present_embedding)
# For predicting the weight, we don't need to subtract the current class
# any more because by the time we predict the weight, the class is
# known. However we do need to add class_query_embedding, because
# otherwise the model wouldn't easily be able to tell which class was
# the one whose weight was being queried.
value_predictor = base_predictor + self.embed_scale * (common_embedding + class_query_embedding)
return class_predictor, value_predictor
class BottleneckPredictor(nn.Module):
def __init__(self, num_classes: int,
predictor_dim: int,
num_discretization_levels: int,
seq_len: int,
hidden_dim: int,
num_hidden_layers: int) -> None:
"""
This module is used to predict the discrete symbols (classes and weights)
of the discrete bottleneck
in the SamplingBottleneckModule. It handles only the prediction within
individual frames; any cross-frame aspect of the prediction (i.e. taking
care of the larger sequence across time) will be handled outside this module,
likely by something like a transformer. The forward function of this module
accepts a predictor that would be the output that that transformer (or other
sequential model).
Args:
num_classes: The number of classes used in the SamplingBottleneckModule,
e.g. 512.
num_discretization_levesl: The number of discretization levesl in
the SamplingBottleneckModule, e.g. 256.
seq_len: The seq_len given to the SamplingBottleneckModule, which
is the number of symbols to sample from the distribution
each time; e.g. 8 or 16.
hidden_dim: The hidden dimension to use in the two feedforward networks owned
by this class, e.g. 512 or 1024.
num_hidden_layers: The number of hidden layers with ReLU activations to
use in the two feedforward networks owned
by this class, e.g. 1 or 2.
"""
super(BottleneckPredictor, self).__init__()
self.input_params = PredictorInputParams(num_classes, predictor_dim,
num_discretization_levels, seq_len)
def create_predictor(output_dim):
layers = []
cur_dim = predictor_dim
for i in range(num_hidden_layers):
if i != 0:
layers.append(nn.LayerNorm(cur_dim))
layers.append(nn.Linear(cur_dim, hidden_dim))
layers.append(nn.ReLU(inplace=True))
cur_dim = hidden_dim
layers.append(nn.Linear(cur_dim, output_dim))
return nn.Sequential(*layers)
self.class_predictor_module = create_predictor(num_classes)
self.value_predictor_module = create_predictor(num_discretization_levels)
def forward(self,
probs: Optional[Tensor],
class_indexes: Tensor,
value_indexes: Tensor,
base_predictor: Tensor) -> Tuple[Tensor, Tensor]:
"""
Computes the predicted total log-probs of the classes and values selected
by the SamplingBottleneckModule.
Args:
probs: the same-named output from SamplingBottleneckModule.forward(),
a Tensor of shape (*, num_classes). You can provide None if you want
class_indexes to be used instead of `probs`, but using `probs`
should give a lower-variance estimate of the derivative. We use
this without grad (we detach it), for a couple reasons.
- only a difference of log-likes would make sense to train the
bottleneck and its input;
- It wouldn't be "all the derivative" anyway, the real
mechanics of backprop to get the correct derivatives w.r.t. `probs`
are much more complicated than just enabling grad here.
class_indexes: the same-named output from
SamplingBottleneckModule.forward(), a LongTensor of
shape (*, num_seqs, seq_len).
value_indexes: the same-named output from
SamplingBottleneckModule.forward(), a LongTensor of
shape (*, num_seqs, seq_len).
base_predictor: A Tensor of shape (*, predictor_dim) that encodes a
prediction of the distribution, e.g. derived from previous
frames' embeddings via some kind of sequential model.
Returns (class_logprobs, value_logprobs), where:
class_logprobs: a Tensor of shape (*) [matching the inputs];
this gives the UNNORMALIZED logprob of the class indexes,
summed over
the sequence (seq_len) and averaged over the parallel
sequences (num_seqs).
value_logprobs: a Tensor of shape (*) [matching the inputs];
this gives the UNNORMALIZED logprob of the discretized weights,
summed over
the sequence (seq_len) and averaged over the parallel
sequences (num_seqs).
"""
if probs is not None:
probs = probs.detach()
# class_predictor: (*, num_seqs, seq_len, predictor_dim)
# value_predictor: (*, num_seqs, seq_len, predictor_dim)
class_predictor, value_predictor = self.input_params(class_indexes,
value_indexes,
base_predictor)
# class_prediction: (*, num_seqs, seq_len, num_classses)
# value_prediction: (*, num_seqs, seq_len, num_discretization_levels)
# both of these are unnormalized logprobs.
class_prediction = self.class_predictor_module(class_predictor)
value_prediction = self.value_predictor_module(value_predictor)
class_prediction, mask = self.mask_prev_classes(class_prediction,
class_indexes)
class_prediction = class_prediction.log_softmax(dim=-1)
value_prediction = value_prediction.log_softmax(dim=-1)
# class_all_logprobs and value_all_logprobs are of shape
# (*, num_seqs, seq_len)
# Even if probs is supplied, in training mode once in every 20 or so minibatches we use
# the sampled classes instead of `probs`. This seems to allow the training to
# get started faster than it otherwise would.
if probs is None or (self.training and random.random() < 0.05):
class_all_logprobs = torch.gather(class_prediction, dim=-1,
index=class_indexes.unsqueeze(-1)).squeeze(-1)
else:
probs_expanded = probs.unsqueeze(-2).unsqueeze(-2).expand(mask.shape).contiguous()
# mask out probs of previously seen classes to zero; these are no longer possible,
# so distribution at each point should exclude these prior classes.
probs_expanded.masked_fill_(mask, 0.0)
class_prediction = class_prediction.clone()
class_prediction.masked_fill_(mask, 0.0)
# Re-normalize probs to sum to one after the last dim.
probs_expanded = probs_expanded / probs_expanded.sum(dim=-1).unsqueeze(-1)
class_all_logprobs = (probs_expanded * class_prediction).sum(dim=-1)
value_all_logprobs = torch.gather(value_prediction, dim=-1,
index=value_indexes.unsqueeze(-1)).squeeze(-1)
num_seqs = class_indexes.shape[-2]
# class_logprobs and value_logprobs are of shape (*).
class_logprobs = torch.sum(class_all_logprobs, dim=(-2,-1)) * (1 / num_seqs)
value_logprobs = torch.sum(value_all_logprobs, dim=(-2,-1)) * (1 / num_seqs)
if random.random() < 0.0001:
class_seq = torch.mean(class_all_logprobs,
dim=tuple(range(class_all_logprobs.ndim - 1)))
value_seq = torch.mean(value_all_logprobs,
dim=tuple(range(class_all_logprobs.ndim - 1)))
print(f"Class/value logprobs, as seqs, are: {class_seq}/{value_seq}")
return class_logprobs, value_logprobs
def mask_prev_classes(self, class_logprobs: Tensor, class_indexes: Tensor) -> Tensor:
"""
Replaces the logprobs in `class_logprobs` that correspond to classes
that were previously seen in a sequence (and are therefore now disallowed),
with -infinity. This means that we don't have to waste modeling power
learning the fact that classes cannot be seen more than once.
Args:
class_logprobs: a Tensor of shape (*, seq_len, num_seqs, num_classes),
containing un-normalized logprobs of the classes.
WARNING: used destructively (actually operates in-place)
class_indexes: a LongTensor of shape (*, seq_len, num_seqs), containing
class indexes in {0..num_classes-1}.
Returns: (class_logprobs, mask)
class_logprobs: An in-place modified version of class_logprobs with
elements corresponding to previously seen classes in the sequence
replaced with -infinity.
mask: a BoolTensor with the same shape as `class_logprobs`, i.e.
(*, seq_len, num_seqs, num_classes),with True in the places
where we put -infinity.
"""
counts = torch.zeros_like(class_logprobs, dtype=torch.int16)
counts.scatter_(-1, class_indexes.unsqueeze(-1), 1)
mask = (exclusive_cumsum(counts, dim=-2) != 0)
# use -1e+20 instead of -infinity for the mask, because otherwise when
# we multiply by zero later we'll get nan's.
class_logprobs.masked_fill_(mask, -1e+20)
return class_logprobs, mask
def get_prob_scales(self, probs: Tensor, class_indexes: Tensor) -> Tensor:
"""
Returns some scaling factors >= 1 that compensate for the fact that we will
be masking out elements in `probs` that correspond to previously emitted
classes in the sequence: the scales will be those that would cause `probs`
to sum to one after such masking.
Args:
probs: A Tensor of shape (*, num_classes), which sums to one along
dim=-1, containing class probabilities, as returned from
SamplingBottleneckModule.
class_indexes: a LongTensor of shape (*, num_seqs, seq_len) as returned
from a SamplingBottleneckModule, containing elements in
{0..num_classes-1}.
Return: Returns a Tensor of shape (*, num_seqs, seq_len), containing
scaling factors >= 1.
"""
num_seqs = class_indexes.shape[-2]
num_classes = probs.shape[-1]
probs_temp = probs.unsqueeze(-2).expand(probs.shape[:-1], num_seqs, num_classes)
# probs_temp now of shape (*, num_seqs, num_classes).
selected_probs = torch.gather(probs_temp, dim=-1, index=class_indexes)
# selected_probs is now of shape (*, num_seqs, seq_len)
selected_probs_cumsum = exclusive_cumsum(selected_probs, dim=-1)
# selected_probs_cumsum is of shape (*, num_seqs, seq_len), containing
# the exclusive cumulative sum of selected_probs
# epsilon is the floating point epsilon.. we'll be dividing by inv_scale, so
# must be very careful about roundoff
epsilon = (1.2e-07 if probs.dtype == torch.float32 else
(2.3e-16 if probs.dtype == torch.float64 else
9.8e-04)) # <-- assume float16, if supported.
inv_scale = (1 - selected_probs_cumsum).clamp(min=epsilon)
class _ParameterizedDropout(torch.autograd.Function):
# Please see the function parameterized_dropout() for a description of
# the interface.
@staticmethod
def forward(ctx, probs: Tensor, mask: Tensor,
values: Tensor, random_rate: float = 0.5,
epsilon: float = 0.1) -> Tensor:
probs = probs.detach()
values = values.detach()
C = probs.shape[-1]
rest_shape = list(probs.shape)[:-1]
# frame_mask is a bool tensor of shape (*, 1), that's True on
# frames that will be random (i.e. use "mask" and not "probs").
frame_mask = (torch.rand(*rest_shape, device=probs.device) < random_rate).unsqueeze(-1)
ctx.save_for_backward(probs, mask, values, frame_mask)
ctx.epsilon = epsilon
actual_mask = (frame_mask * mask + torch.logical_not(frame_mask) * probs)
ans = values * actual_mask
return ans
@staticmethod
def backward(ctx, ans_grad: Tensor) -> Tuple[Tensor, None, Tensor, None, None]:
(probs, mask, values, frame_mask) = ctx.saved_tensors
epsilon = ctx.epsilon
# `actual_mask` is what we multiplied the values by in the forward pass.
actual_mask = (frame_mask * mask + torch.logical_not(frame_mask) * probs)
mask_weight = values / (values + epsilon) # <-- close to 1 if an element of values >> epsilon
probs_weight = 1 - mask_weight # <-- close to 1 if an element of values << epsilon
# The formula is an attempt to reduce the variance of the derivatives. The assumption
# is that if a 'value' is small, then the derivative of the output w.r.t. the
# (value * mask) will be about the same whether the mask is 0 or 1, so we can just use
# the element of 'probs', treating it as an expectation.
# whereas if value >> epsilon, we should use the actual mask, for accuracy.
values_grad = ans_grad * (mask_weight * actual_mask + probs_weight * probs)
# See get_derivative_scales() to understand the function of
# epsilon_tensor, it's the epsilon arg to that function.
#
# epsilon_tensor is close to epsilon when an element of values >>
# epsilon, but approaches 1 when values << epsilon, assuming epsilon is
# small. We can use large elements in epsilon_tensor for small elements
# of `values` because if an element `values` is small it's more valid to
# treat the loss function as being linear in `ans`, so we can set
# epsilon to a large value which gives lower-variance derivatives.
# Actually we could perhaps separate this formula to use two constants.
epsilon_tensor = epsilon * (1 + 1/(values + epsilon))
s1, s2 = get_derivative_scales(probs, epsilon_tensor)
grad_factor_random = (mask * s1) + (torch.logical_not(mask) * s2)
grad_factor_deterministic = 1.0
grad_factor = (frame_mask * grad_factor_random + torch.logical_not(frame_mask) * grad_factor_deterministic)
actual_mask_grad = ans_grad * values
probs_grad = grad_factor * actual_mask_grad
return probs_grad, None, values_grad, None, None
def get_derivative_scales(probs: Tensor,
epsilon: Union[Tensor, float]) -> Tuple[Tensor, Tensor]:
"""
This function, which is intended to be used without gradient, returns scales
s1 and s2 which are to be used in backprop when we are applying a dropout-like
zero-or-one mask. Here is the scenario: we have some probabilities `probs`, with
0 <= probs <= 1, and we randomly generate a zero-or-one mask, like:
mask = (torch.rand_like(probs) < probs).
We are going to get an output by masking some `values`, as in:
output = values * mask, (1)
and we want to know how to propagate the derivative of `output` back to `probs`.
(Note that in normal dropout, the dropout prob is just a constant and there is
no derivative). A simple way to do this would be to treat (1) the same as:
output = values * probs (2)
.. the output in (2) is the same as the expected value of (1). This amounts
to backprop like:
mask_grad = output_grad * values.
probs_grad = mask_grad (3)
If the loss function were linear in `output`, the derivative in (3) would
be "correct", in the sense that it would be the derivative of the expected
value of the loss function. Of course,
the loss function won't, in general, be linear in `output`. For arbitrary loss functions,
there's no way to get "correct" derivatives unless we measure derivatives
at all points of `mask` between zero and one, which would require changing
the forward pass. But we can do a little better than (2): if we assume
the loss function is quadratic in `output`, then the loss derivative w.r.t.
the mask would vary linearly between mask==0 and mask==1, and we can
tread the derivative of `prob` as being: (half the derivative at 0) plus
(half the derivative at 1). Here, the average derivative at 1 is just
(mask_grad * mask / probs), and the average derivative at 0 is
(mask_grad * (1 - mask) / (1 - probs)). (There is some analysis required here
to formalize this argument). The gradient according to this approach would be:
probs_grad = mask_grad * 0.5 * (mask / probs + (1-mask) / (1-probs)) (4).
A difficulty with (4) is that we are dividing by values that get close to zero,
which can cause gradient blow-up. We need to introduce an epsilon value to
prevent this. Let us rewrite (4) as:
probs_grad = mask_grad * (s1 * mask + s2 * (1-mask)) (5).
If we treat the loss function as linear in `output`, then the requirement
for correctness of the derivative would be:
(s1 * probs + s2 * (1-probs)) = 1.0 (6)
(bear in mind that "mask" is 1 with probability "probs"; this expression just
gives the expected value of the scale in parentheses in (5). Our proposed
value for s1 and s2 is as follows:
s1 = 0.5 * (1+epsilon)/(probs+epsilon) + epsilon/(1+epsilon-probs) (7)
s2 = 0.5 * (1+epsilon)/(1+epsilon-probs) + epsilon/(probs+epsilon)
where epsilon > 0; actually epsilon does not have to be less than 1; as epsilon
gets large, s1 and s2 both approach 0.5). You can verify that the formula above
satisfies (6), e.g. type the following into
wolframalpha.com:
p * 0.5 * (1.1/(p+0.1) + 0.1/(1.1-p)) + (1-p) * 0.5 * (1.1/(1.1-p) + 0.1/(p+0.1))
The advantage of (7) is that s1 and s2 never get larger than 0.5/epsilon, but
for probs between about [epsilon..1-epsilon], it is nearly equivalent to (4).
Args:
probs: a Tensor of arbitrary shape with elements in the interval [0,1],
representing the probability of a mask value being 1 (so similar to
1-dropout_rate).
epsilon: A smoothing value greater than zero. This can be either
a float (e.g. 0.1 or 0.2), or it can be any tensor that broadcasts
with `probs`. The idea is that you might want epsion to vary
with the `values` in (1): if an element of `value` is close to zero,
then the linear assumption is closer to being correct, and we might
want a larger epsilon value.
Returns:
Returns a pair of tensors (s1, s2), intended to be applied similar to eqn. (5)
above.
"""
inv_p1 = 0.5 / (probs + epsilon)
inv_p2 = 0.5 / ((1.0 + epsilon) - probs)
common = epsilon * (inv_p1 + inv_p2)
s1 = inv_p1 + common
s2 = inv_p2 + common
return s1, s2
def parameterized_dropout(probs: Tensor,
mask: Tensor,
values: Tensor,
random_rate: float = 0.5,
epsilon: float = 0.1) -> Tensor:
"""
This function returns (values * mask) if random_rate == 1.0 and
(values * probs) if random_rate == 0.0 or if we are in eval mode
(self.training == false). Otherwise, it randomly selects on frame-by-frame
/ vector-by-vector basis, which of the two to use. The main point of this
function is that it intelligently backpropagates derivatives in such a way
that you can meaningfully train `probs`. See the function `get_derivative_scales()`
to understand the central point of how we get derivatives w.r.t. `probs`.
Args:
probs: the probabilities with which the `mask` vector was chosen; we'll be able
to compute derivatives w.r.t. this. A Tensor of shape (*, C) where C is
interpreted as the channel dimension. These must be in the interval [0,1].
mask: A (possibly boolean) Tensor of shape (*, C) and values 0/False or 1/True,
True/1 if this value is to be "passed through".
The caller asserts that these values have been chosen with probabilities
equal to `probs`, e.g. as:
mask = (torch.rand_like(probs) < probs)
(In practice we may be sampling with a more complicated method which has
marginal probabilities equal to `probs`; the correctness of the derivatives
becomes a little weaker in that case).
values: A Tensor of shape (*, C), the same as probs_and mask; these are the
values that are to be multiplied by a mask (or sometimes scaled by `probs`,
if random_rate < 1). The derivatives backpropagated to here are exact,
i.e. just output_grad * mask. We currently require that elements of values
be in the interval [0,1] (this is needed for a formula involving epsilon).
random_rate: A float value that determines how often we use the zero-one mask; the
rest of the time, we use the expected value (probs).
epsilon: A float value used to prevent division by zero in backprop; controls
a bias-variance tradeoff in derivatives (small->lower bias, higher
variance).
Returns: A Tensor with the same shape as `probs`, `mask` and `values`, i.e.
(*, C), which is randomly somewhere between values * mask and
values * probs.
"""
return _ParameterizedDropout.apply(probs, mask, values, random_rate, epsilon)
class _WithGradOf(torch.autograd.Function):
@staticmethod
def forward(ctx, x, y):
"""
Returns x but will assign the gradient to y.
"""
return x
@staticmethod
def backward(ctx, ans_grad) -> Tuple[None, Tensor]:
return None, ans_grad
def discretize_values(values: Tensor,
num_discretization_levels: int) -> Tuple[Tensor, Tensor]:
"""
Pseudo-randomly discretize an input tensor, whose elements must be in the
interval [0,1], into a fixed number of levels, e.g. 128. Does this by
taking sqrt, multiplying by (num_discretization_levels-1), adding
random value in [0,1), and rounding to int.
Args:
values: a Tensor of arbitrary shape
num_discretization_levels: The number of discrete values that we divide
the interval [0,1] into, e.g. 128.
Returns (y, indexes), where:
y: a randomly discretized version of `values`, whose elements will
differ from the corresponding elements of `values` by no more than
1/(num_discretization_levels - 1). Derivatives go "straight through"
from this to `values`.
indexes: a LongTensor containing the discrete indexes corresponding
to `y`, in the range [0..num_discretization_levels-1].
"""
# the 0.999 is to ensure we don't get exactly one. Caution: this won't work
# in half precision, so we use an assert for now (otherwise we'd later get
# an error in a scatter kernel)
assert values.dtype != torch.float16
indexes = (values.sqrt() * (num_discretization_levels - 1) + 0.999*torch.rand_like(values)).to(dtype=torch.long)
ans = indexes * (1.0 / (num_discretization_levels - 1))
ans = ans ** 2
y = _WithGradOf.apply(ans, values)
return y, indexes
class SamplingBottleneckModule(nn.Module):
def __init__(self, dim: int , num_classes: int,
seq_len: int = 8,
num_discretization_levels: int = 128,
random_rate: float = 0.0,
epsilon: float = 0.1) -> None:
"""
Create sampling bottleneck module. This uses an iterative sampling algorithm to
represent the hidden feature by a fixed number of randomly chosen classes (e.g. 8
classes drawn from 512 possible classes), together with values in the range [0,1]
for all of the randomly chosen classes. The basic idea is that we turn the
hidden feature into a categorical distribution over a number of classes, and
we transmit that distribution in a randomized, lossy way that focuses modeling
power on the classes that dominate the distribution. So it's somewhat like
a discrete sampling operation-- in fact, it is such an operation-- but we allow
much more information to pass through than just a single class label.
Args:
dim: feature dimension before and after this module, e.g. 512.
num_classes: The number of discrete classes we form a distribution over, e.g. 512.
seq_len: The number of (distinct) classes we sample from the distribution when
transmitting information over this channel
num_discretization_levels: The number of levels we discretize the interval [0,1]
into when transmitting values, e.g. 128.
random_rate: The probability that we randomize a particular frame, versus
using the expectation.
epsilon: A value used in the backprop that affects derivatives w.r.t. probabilities;
a value close to zero is more theoretically accurate but may lead to
some derivatives being quite large.
"""
super(SamplingBottleneckModule, self).__init__()
self.dim = dim
self.seq_len = seq_len
self.num_classes = num_classes
self.num_discretization_levels = num_discretization_levels
self.random_rate = random_rate
self.epsilon = epsilon
assert epsilon > 0
self.input_scale = nn.Parameter(torch.tensor([3.0]))
# We assume there is a layer-norm just prior to this module, so we don't
# include layer norm on the input.
# to_probs_softmax is a linear projection that will go to a softmax to
# the probs
self.to_probs_softmax = nn.Linear(dim, num_classes, bias=False)
# The output of to_prob_softmax is multiplied by 'to_values_scale' and
# is treated as 'values'. Caution: this may not be stable with SGD optimizer,
# as one parameter does too much work.
self.to_values_scale = nn.Parameter(torch.Tensor([0.13]))
self.layer_norm = nn.LayerNorm(dim)
def forward(self, x: Tensor, num_seqs: int = 1) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]:
"""
Forward function.
Args:
x: a Tensor of shape (*, F) where F is the number of input features/channels,
equal to `dim` arg to constructor.
num_seqs: The number of parallel sequences (S). Should probably be 1 unless
you are planning to model these probabilities
Returns (y, probs, class_indexes, value_indexes, class_entropy, frame_entropy), where:
y: a Tensor of shape (*, F), like x, where F is the `dim` arg to this class's
constructor. This is the main output, to be given to
later modules' forward function.
probs: a Tensor of shape (*, N) where N is the number of classes
(`num_classes` to constructor), giving the probabilities with which we
sampled the classes in `class_indexes`. Currently without gradient,
to save a little memory. This will be used when predicting
the classes in `class_indexes`, replacing samples with
expectations in order to reduce the variance of the resulting
derivatives.
class_indexes: a LongTensor of shape (*, num_seqs, seq_len) containing
the randomly sampled classes in the range [0..num_classes-1].
Will be useful if you want to model the probabilities of the output
of this layer.
value_indexes: a LongTensor of shape (*, num_seqs, seq_len) containing
the values of the randomly sampled classes, whose elements are in
the range [0..num_discretization_levels-1] == [0..M-1]
Will be useful if you want to model the probabilities of the output
of this layer.
class_entropy: A scalar Tensor containing the entropy over classes,
of the distribution summed over all frames; will be close to
log(num_classes) if all classes are equally likely overall.
Might be useful if you want to ensure this stays large
(might help optimization by ensuring we don't waste classes).
frame_entropy: A scalar Tensor containing the per-frame entropy over
classes, reflecting the average uncertainty in the distribution over
classes on individual frames. Will be less than class_entropy.
Might be useful if you want to ensure this doesn't get too
small (this might help optimization).
"""
# logprobs has shape (*, N); it is the input to the softmax that determines the
# probabilities of sampling different classes on each iteration of sampling.
# (Not the same as the marginal probabilities).
x = x * self.input_scale
probs = self.to_probs_softmax(x)
values = probs * self.to_values_scale
probs = torch.softmax(probs, dim=-1)
avg_probs = probs.mean(dim=tuple(range(probs.ndim-1)))
class_entropy = -(avg_probs * (avg_probs + 1.0e-20).log()).sum()
frame_entropy = -(probs * (probs + 1.0e-20).log()).sum() / (probs.numel() / probs.shape[-1])
if random.random() < 0.001:
class_perplexity = class_entropy.exp().to('cpu').item()
frame_perplexity = frame_entropy.exp().to('cpu').item()
print(f"Class perplexity={class_perplexity}, frame perplexity={frame_perplexity}, vs. max possible {avg_probs.numel()}")
# values also has shape (*, N); it is expected to be similar to `probs`,
# since we want to bias towards transmitting the larger values.
values = torch.softmax(values, dim=-1)
probs = ensure_nonzero(probs)
# compute marginal probabilities of selecting any given class at any point
# in the sequence of K distinct samples.
marginals = compute_marginals(probs, self.seq_len)
# indexes shape is (*, S, K)
class_indexes = iterative_sample(probs, num_seqs=num_seqs,
seq_len=self.seq_len)
# values_expanded is values expanded from (*, N) to (*, S, N)
N = probs.shape[-1]
values_expanded = values.unsqueeze(-2).expand(*probs.shape[:-1], num_seqs, N)
chosen_values = torch.gather(values_expanded, dim=-1, index=class_indexes)
# _ and value_indexes have shape (*, S, K)
_, value_indexes = discretize_values(chosen_values,
self.num_discretization_levels)
# discrete_actual_values has shape (*, N), it is just the input `values`
# discretized.
if self.training:
discrete_actual_values, _ = discretize_values(values,
self.num_discretization_levels)
else:
# in eval mode, don't use the discretized values, use the real
# values, which is less noisy.
discrete_actual_values = values
class_indexes_0 = class_indexes.select(dim=-2, index=0)
mask = torch.zeros_like(values)
# Put 1.0 in the mask vector at positions specified by `class_indexes_0`.
mask.scatter_(-1, class_indexes_0, 1.0)
random_rate = 0.0 if not self.training else self.random_rate
y = parameterized_dropout(marginals, mask, discrete_actual_values,
random_rate=random_rate,
epsilon=self.epsilon)
y = torch.matmul(y, self.to_probs_softmax.weight)
y = self.layer_norm(y)
return y, probs, class_indexes, value_indexes, class_entropy, frame_entropy
def compute_marginals(probs: Tensor, K: int) -> Tensor:
"""
Args:
probs: a Tensor of float with shape (*, N), interpreted as
the probabilities of categorical distributions over N classes (should
sum to 1 and be nonnegative).
K: the number of times to (conceptually) sample from the distribution
`probs`, always excluding previously-chosen classes.
Returns: the marginal probability, for each class, of selecting
that class at any point during K rounds of selecting a new, previously
unchosen class with probability proportional to `probs`.
The result will be of shape (*, N), and the sum of the result over
then N axis will equal K (or be very close to K). Returned
elements will be in the interval [0,1].
"""
alpha = compute_normalizer(probs, K)
return 1 - (1 - probs) ** alpha.unsqueeze(-1)
class _ComputeNormalizer(torch.autograd.Function):
@staticmethod
def forward(ctx, x: Tensor, K: int) -> Tensor:
# Please see compute_normalizer() below for documentation of the interface.
# We are solving
# \sum_{i=0}^{N-1} 1 - (1-x_i) ** alpha == K
# i.e.:
# \sum_{i=0}^{N-1} 1 - exp(alpha * log(1-x_i)) == K
#
# Let us define y_i = log(1-x_i). Then we are solving:
#
# \sum_{i=0}^{N-1} 1 - exp(alpha * y_i) == K
#
# K-N + \sum_{i=0}^{N-1} exp(alpha * y_i) == 0 (eqn. 1)
# d(LHS)/d(alpha) where LHS means left hand side of (eqn. 1) is:
# d = \sum_{i=0}^{N-1} y_i exp(alpha * y_i)
#
# An iterative solution (we'll see whether this converges) is:
# alpha := alpha - err / d
# where err is the LHS of (eqn. 1).
requires_grad = x.requires_grad
x = x.detach()
y = (1 - x).log()
N = x.shape[-1]
alpha = torch.empty(list(x.shape)[:-1], device=x.device,
dtype=x.dtype).fill_(K)
for i in range(3):
exp = torch.exp(alpha.unsqueeze(-1) * y)
err = exp.sum(dim=-1) + (K - N)
d = (exp * y).sum(dim=-1)
alpha -= err / d
if __name__ == '__main__':
print(f"Iter {i}, alpha={alpha}, exp={exp}, err={err}, d={d}")
# in:
# K-N + \sum_{i=0}^{N-1} exp(alpha * y_i) == 0 (eqn. 1),
# d(LHS)/dy_i = alpha * exp(alpha * y_i).
# d(alpha)/d(LHS) = -1/d = -1 / (sum_{i=0}^{N-1} (y_i * exp(alpha * y_i)))
# ... so
# but we want d(alpha)/d(x_i), which is:
# d(alpha)/d(LHS) d(LHS)/dy_i dy_i/dx_i. (2)
# dy_i/dx_i is: -1/(1-x).
# So we can write (2) as:
# (alpha * exp(alpha * y_i)) / (d * (1-x))
if requires_grad:
ctx.deriv = (alpha.unsqueeze(-1) * exp) / (d.unsqueeze(-1) * (1 - x))
return alpha
@staticmethod
def backward(ctx, alpha_grad: Tensor) -> Tuple[Tensor, None]:
return alpha_grad.unsqueeze(-1) * ctx.deriv, None
def compute_normalizer(x: Tensor, K: int) -> Tensor:
"""
Args:
x: a Tensor of float with shape (*, N), interpreted as
the probabilities of categorical distributions over N classes (should
sum to 1 and be nonnegative).
K: an integer satifying 0 < K < N.
Returns a Tensor alpha of shape (*), satisfying:
(1 - exp(-x * alpha.unsqueeze(-1))).sum(dim=-1) == K
I.e., that:
\sum_{i=0}^{N-1} 1 - exp(-alpha * x_i) == K.
This will satisfy alpha >= K. alpha, if an integer, would be the
number of draws from the distribution x, such that the
expected number of distinct classes chosen after that many draws
would equal approximately K. We can get this formula by using a Poisson assumption,
with alpha * x_i being the expectation (the lambda parameter of the
Poisson); another way to formulate this is to say that the probability of never
choosing class i is 1 - (1 - x_i) ** alpha (this is equivalent
for small alpha, using -x_i as an approximation of log(1-x_i)). The
version with exp() is more straightforward to differetiate though. This
does not really need to be super exact for our application, just fairly
close. Anyway, the two versions get very similar as K gets larger, because
then alpha gets larger and the x_i's that are large make less
difference.
"""
return _ComputeNormalizer.apply(x, K)
N = x.shape[-1]
alpha_shape = list(x.shape)[:-1]
alpha = torch.empty(alpha_shape, device=x.device, dtype=x.dtype).fill_(K)
for i in range(3):
exp = torch.exp(-alpha.unsqueeze(-1) * x)
err = exp.sum(dim=-1) + (K - N)
minus_d = (exp * x).sum(dim=-1)
alpha += err / minus_d
# d2/d(alpha2) of LHS is:
# d1 = \sum_{i=0}^{N-1} x_i^2 exp(-alpha * x_i)
return alpha
def _test_normalizer():
dim = 20
K = 5
torch.set_default_dtype(torch.double)
for i in range(5): # setting i too large will cause test failure, because
# the iterative procedure converges more slowly when the
# probs have a large range, and we use i below as a
# scale.
B = 5 # Batch size
probs = (torch.randn(B, dim) * i * 0.5).softmax(dim=-1)
probs.requires_grad = True
alpha = compute_normalizer(probs, K)
print(f"Grad check, scale={i}")
# atol=1.0 might seem large, but the gradients used by torch.autograd.gradcheck
alpha_grad = torch.randn(*alpha.shape)
(alpha * alpha_grad).sum().backward()
probs_grad = probs.grad
probs_delta = torch.randn(*probs.shape) * 0.0001
alpha_delta = compute_normalizer(probs + probs_delta, K) - alpha
observed_delta = (alpha_delta * alpha_grad).sum()
predicted_delta = (probs_delta * probs_grad).sum()
# Caution: for i=4, the difference can sometimes be large. These will sometimes even be
# the opposite sign, if it happened that alpha_delta is nearly orthogonal to the change
# in alpha.
print(f"For i={i}, observed_delta={observed_delta} vs. predicted_delta={predicted_delta}")
#torch.autograd.gradcheck(compute_normalizer, (probs, K), eps=1.0e-06, rtol=0.025, atol=1.0)
def _test_compute_marginals():
probs = (torch.randn(10, 20, 30) * 2.0).softmax(dim=-1)
K = 8
marginals = compute_marginals(probs, K)
err = marginals.sum(dim = -1) - K
avg_err = (err * err).mean().sqrt()
print("avg_err of marginals is ", avg_err)
assert avg_err < 0.2
def _test_discretize_values():
values = torch.rand(10, 20, 30)
values.requires_grad = True
M = 32
discrete_values, indexes = discretize_values(values, M)
grad = torch.rand_like(values)
# These tests will work with very high but not 1 probability.
assert torch.min(discrete_values).item() == 0
assert torch.min(indexes).item() == 0
print("max is", torch.max(indexes).item())
assert torch.max(indexes).item() == M - 1
assert torch.max(discrete_values).item() == 1.0
discrete_values.backward(grad)
assert torch.allclose(values.grad, grad)
def _test_sampling_bottleneck():
# just makes sure the forward function runs without crashing.
# There is more extensive testing of this, including training in iterative_sampling_test.py
dim = 256
num_classes = 512
num_discretization_levels = 128
seq_len = 8
m = SamplingBottleneckModule(dim, num_classes,
num_discretization_levels=num_discretization_levels,
seq_len=seq_len)
predictor_dim = 128
p = PredictorInputParams(num_classes, predictor_dim,
num_discretization_levels=num_discretization_levels,
seq_len=seq_len)
hidden_dim = 256
num_hidden_layers = 2
b = BottleneckPredictor(num_classes, predictor_dim,
num_discretization_levels, seq_len, hidden_dim,
num_hidden_layers)
feats = torch.randn(30, 4, dim)
y, probs, class_indexes, value_indexes, class_entropy, frame_entropy = m(feats)
print(f"Shapes of: y={y.shape}, probs={probs.shape}, class_indexes={class_indexes.shape}, value_indexes={value_indexes.shape}")
base_predictor = torch.randn(30, 4, predictor_dim)
(class_predictor, value_predictor) = p(class_indexes, value_indexes, base_predictor)
print(f"class_predictor shape={class_predictor.shape}, value_predictor shape={value_predictor.shape}")
print(f"class_predictor variance={(class_predictor**2).mean()} value_predictor variance={(value_predictor**2).mean()}")
assert value_indexes.min() == 0 and value_indexes.max() < num_discretization_levels
print("y part = ", y[0])
(class_logprobs, value_logprobs) = b(None, class_indexes, value_indexes, base_predictor)
assert class_logprobs.shape == (30, 4)
assert value_logprobs.shape == (30, 4)
(class_logprobs, value_logprobs) = b(probs, class_indexes, value_indexes, base_predictor)
assert class_logprobs.shape == (30, 4)
assert value_logprobs.shape == (30, 4)
def _compare_seen_expected_products(a: Tensor, b: Tensor, a_name: str = "seen", b_name: str = "expected",
threshold: float = 0.02):
"""
Compute and display products between a and b, and check that (a*b).sum() is close to (b*b).sum().
"""
ab = (a * b).sum().to('cpu').item()
aa = (a * a).sum().to('cpu').item()
bb = (b * b).sum().to('cpu').item()
a_flip_b = (a.flip(dims=(0,)) * b).sum().to('cpu').item()
err = (1.0 - ab / (0.5 * (ab + bb)))
print(f"{a_name}*{b_name}:{ab}, {a_name}*{a_name}:{aa}, {b_name}*{b_name}:{bb}, {a_name}-flipped*{b_name}:{a_flip_b}, rel_err={err}")
assert abs(err) < threshold
def _test_iterative_sample():
for device in 'cpu', 'cuda':
print("device=", device)
device = torch.device(device)
B = 3000
N = 256
logprobs = 2 * torch.randn(B, N, device=device)
probs = logprobs.softmax(dim=-1)
num_seqs = random.randint(1, 8)
seq_len = random.randint(5, 15)
indexes = iterative_sample(probs, num_seqs=num_seqs, seq_len=seq_len)
#print("indexes = ", indexes)
indexes_0 = indexes[:,0,:] # take 1st sequence.
zero_ones = torch.zeros(B, N, device=device)
zero_ones.scatter_(-1, indexes_0, 1.0)
# Check all indexes in each sequence are distinct.
assert zero_ones.sum().to('cpu').item() == indexes_0.numel()
expected_marginals = compute_marginals(probs, seq_len)
_compare_seen_expected_products(zero_ones, expected_marginals, "seen", "expected")
def _test_get_derivative_scales():
probs = torch.rand(200, 300, 2)
epsilon_tensor = 0.1 + torch.rand(200, 300, 2)
for epsilon in [0.0, 0.1, 1.0, 2.0, epsilon_tensor]:
s1, s2 = get_derivative_scales(probs, epsilon)
assert torch.all(s1>=0) and torch.all(s2>=0)
one = (s1 * probs) + (s2 * (1-probs))
assert(torch.allclose(one, torch.ones_like(one)))
def _test_parameterized_dropout():
probs = torch.rand(100, 200, 5)
mask = (torch.rand_like(probs) < probs)
values = torch.rand_like(probs)
probs.requires_grad = True
values.requires_grad = True
output_grad = torch.randn_like(probs)
quadratic_grad = torch.randn_like(probs)
for random_rate in (0.0, 0.5, 1.0):
for epsilon in (0.001, 0.1, 0.5, 1.0):
for quadratic_term in (0.0, 1.0, 3.0):
"""
The 'quadratic_term' part requires an explanation. (we assume you've read the docuemntation
for parameterized_dropout()). We construct a loss function that is:
(output * output_grad.sum() + 0.5 * quadratic_term * (output * output).sum()) (eq. 1)
(Remember, as epsilon -> 0, our backprop approach is supposed to approach exact
derivatives for any quadratic loss function).
What is the expected derivative contribution from the quadratic part of the
loss function? We'll first compute the expected loss, which is the thing we
are supposed to be backpropping, and then compute the derivative of that.
Again considering just the quadratic part in (eq. 1), the expected loss
if random_rate == 1.0 (i.e. we use the random
0.5 * quadratic_term * (probs * values * values).sum() (eq. 2).
[note: with probability (1-probs), the output is zero so the squared
output would also be zero.]
If random_rate == 0.0, i.e. output == probs * values, it is:
0.5 * quadratic_term * (probs * probs * values * values).sum() (eq. 3).
In general, the quadratic part of the loss function is (expected value):
0.5 * random_rate * quadratic_term * (probs * values * values).sum() +
0.5 * (1 - random_rate) * quadratic_term * (probs * probs * values * values).sum() (eq. 4).
The derivative of this w.r.t. 'probs' is:
(0.5 * random_rate * quadratic_term * values * values +
(1 - random_rate) * quadratic_term * probs * values * values).sum() (eq. 5).
and the derivative of this w.r.t. 'values' is:
(random_rate * quadratic_term * probs * values +
(1 - random_rate) * quadratic_term * probs * probs * values).sum() (eq. 6).
"""
probs.grad = None
values.grad = None
output = parameterized_dropout(probs, mask, values, random_rate, epsilon)
expected_output = values * probs
print(f"test_parameterized_dropout: random_rate={random_rate}, epsilon={epsilon}, quadratic_term={quadratic_term}:")
_compare_seen_expected_products(output, expected_output)
if random_rate == 0.0:
assert torch.allclose(output, values * probs) # deterministic in this case.
(output * output_grad + 0.5 * quadratic_term * quadratic_grad * output * output).sum().backward()
expected_probs_grad = output_grad * values
# for next line, see (eq. 5) above
expected_probs_grad += quadratic_term * quadratic_grad * (0.5 * random_rate * values * values +
(1-random_rate) * probs * values * values)
expected_values_grad = output_grad * probs
# for next line, see (eq. 6) above
expected_values_grad += quadratic_term * quadratic_grad * (random_rate * probs * values +
(1-random_rate) * probs * probs * values)
# if all three of quadratic_term, epsilon and random_rate are nonzero,
# there is a risk of inaccuracy in expected vs. observed derivatives.
threshold=0.015 + (0.075 * quadratic_term * epsilon * random_rate)
if threshold > 0.02:
print(f"Threshold={threshold}, since quadratic_term={quadratic_term} and epsilon={epsilon} and random_rate={random_rate}")
# Note: this test won't always succeed, and the threshold is heuristic, not based on
# a properly derived formula. The threshold above can be played with
# if the failures bother you.
_compare_seen_expected_products(probs.grad, expected_probs_grad, "probs_grad", "expected_probs_grad",
threshold=threshold)
_compare_seen_expected_products(values.grad, expected_values_grad, "values_grad", "expected_values_grad",
threshold=threshold)
if __name__ == '__main__':
_test_iterative_sample()
_test_sampling_bottleneck()
_test_parameterized_dropout()
_test_get_derivative_scales()
_test_discretize_values()
_test_compute_marginals()
_test_normalizer()
|
py | b408528a4ffa0b31b361edee6d6cd4d5a4d2415c | """Tests for the input_datetime component."""
|
py | b40853764d176b297a15df6fc31638d731d22d42 | import unittest
import logging
from pathlib import Path
from shutil import copyfile
from tempfile import TemporaryDirectory
from rdflib.exceptions import ParserError
from rdflib import Graph
class FileParserGuessFormatTest(unittest.TestCase):
def test_ttl(self):
g = Graph()
self.assertIsInstance(g.parse("test/w3c/turtle/IRI_subject.ttl"), Graph)
def test_n3(self):
g = Graph()
self.assertIsInstance(g.parse("test/n3/example-lots_of_graphs.n3"), Graph)
def test_warning(self):
g = Graph()
graph_logger = logging.getLogger("rdflib")
with TemporaryDirectory() as tmpdirname:
newpath = Path(tmpdirname).joinpath("no_file_ext")
copyfile("test/rdf/Manifest.rdf", str(newpath))
with self.assertLogs(graph_logger, "WARNING"):
with self.assertRaises(ParserError):
g.parse(str(newpath))
if __name__ == '__main__':
unittest.main()
|
py | b40854297a8b97700cf465bc78de62327291e41e | # -*- coding: utf-8 -*-
from typing import Tuple, Dict
from .declarative import load_project
from .container import Container
from .project import Project
def run(project: Project, backend_type: str = None, backend_options: Dict = None,
skip_jobs: Tuple[str] = None, skip_environment: bool = True):
container = Container(project, backend_type=backend_type, backend_options=backend_options)
container.init()
container.run(skip_jobs=skip_jobs, skip_environment=skip_environment)
if container.ephemeral:
container.destroy()
def run_declarative(filename: str, backend_type: str = None, backend_options: Dict=None,
skip_jobs: Tuple[str] = None, skip_environment: bool = True,
artifacts_path: str = None):
project = load_project(filename, artifacts_path=artifacts_path)
run(project, backend_type, backend_options, skip_jobs, skip_environment)
|
py | b408544de541a30155beef0fe2fdbb239225d6bf | # encoding: utf8
from spline.tests import TestController, url
from splinext.pokedex.controllers.pokedex_search import PokemonSearchForm
class TestPokemonSearchController(TestController):
def do_search(self, **criteria):
u"""Small wrapper to run a Pokémon search for the given criteria."""
return self.app.get(url(controller='dex_search',
action='pokemon_search',
**criteria))
def check_search(self, criteria, expected, message, exact=False):
"""Checks whether the given expected results (a list of names or (name,
form_name) tuples) are included in the response from a search.
If exact is set to True, the search must contain exactly the given
results. Otherwise, the search can produce other results.
"""
# Unless otherwise specified, the test doesn't care about display or
# sorting, so skip all the effort the template goes through generating
# the default table
criteria.setdefault('display', 'custom-list')
criteria.setdefault('sort', 'id')
results = self.do_search(**criteria).tmpl_context.results
self.assert_(
len(results) < 700,
u"doesn't look like we got every single Pokémon: {0}".format(message)
)
leftover_results = []
leftover_expected = []
# Normalize expecteds to (name, form_name)
for name in expected:
leftover_expected.append(name)
# Remove expected results from the 'leftover' list, and add unexpected
# results to the other leftover list
for result in results:
result_name = result.name
if result_name in leftover_expected:
leftover_expected.remove(result_name)
else:
leftover_results.append(result_name)
# The leftovers now contain no names in common
if leftover_expected:
print leftover_expected
self.assertEquals(
leftover_expected, [],
u"all expected Pokémon found: {0}".format(message)
)
if exact:
if leftover_results:
print leftover_results
self.assertEquals(
leftover_results, [],
u"no extra Pokémon found: {0}".format(message)
)
def test_name(self):
"""Checks basic name searching.
Anything that would get an exact match via lookup should work -- i.e.,
plain names, forme + name, and wildcards.
"""
self.check_search(
dict(name=u'eevee'),
[u'Eevee'],
'searching by name',
exact=True,
)
self.check_search(
dict(name=u'speed deoxys'),
[u'Speed Deoxys'],
'searching by form name',
exact=True,
)
self.check_search(
dict(name=u'bogus'),
[],
'searching for a bogus name',
exact=True,
)
self.check_search(
dict(name=u'MeOwTh'),
[u'Meowth', u'Alolan Meowth' ],
'case is ignored',
exact=True,
)
self.check_search(
dict(name=u'*eon'),
[ u'Flareon', u'Kecleon', u'Lumineon', u'Empoleon' ], # etc.
'wildcards',
)
self.check_search(
dict(name=u'feralig?tr'),
[ u'Feraligatr' ],
'more wildcards!',
exact=True,
)
self.check_search(
dict(name=u'pikac?'),
[],
'wildcards are trusted',
exact=True,
)
def test_ability(self):
"""Checks searching by ability."""
self.check_search(
dict(ability=u'Bad Dreams'),
[u'Darkrai'],
'ability',
exact=True,
)
def test_held_item(self):
"""Check that searching by held item works correctly."""
self.check_search(
dict(held_item=u'magmarizer'),
[ u'Magby', u'Magmar', u'Magmortar' ],
'simple held-item search',
exact=True,
)
def test_growth_rate(self):
"""Check that searching by growth rate works correctly."""
self.check_search(
dict(growth_rate=u'1059860'),
# All the starters are like this
[ u'Bulbasaur', u'Charmander', u'Totodile', u'Piplup' ],
'growth rate',
)
def test_species(self):
"""Checks searching by species flavor."""
self.check_search(
dict(genus=u'evolutio'),
[ u'Eevee' ],
'genus',
exact=True,
)
def test_color(self):
"""Checks searching by color."""
self.check_search(
dict(color=u'brown'),
[ u'Cubone', u'Eevee', u'Feebas', u'Pidgey', u'Spinda', u'Zigzagoon' ],
# etc.
'color',
)
def test_habitat(self):
"""Checks searching by FR/LG habitat."""
# I actually checked this by looking at the old search's results. Hm.
self.check_search(
dict(habitat=u'urban'),
[ u'Abra', u'Eevee', u'Hitmonlee', u'Muk', u'Persian', u'Voltorb' ],
'habitat',
)
def test_shape(self):
"""Checks searching by flavor shape."""
self.check_search(
dict(shape=u'arms'),
[ u'Baltoy', u'Drifloon', u'Grimer', u'Haunter', u'Porygon-Z', u'Spoink' ],
'shape',
)
def test_evolution_stage(self):
"""Checks the evolution stage searches:
- baby
- basic
- stage 1
- stage 2
And the evolution position searches:
- not evolved
- middle evolution
- fully evolved
- only evolution
And some special things:
- branching evolution
- branched evolution
"""
# Actual stages
self.check_search(
dict(evolution_stage=u'baby'),
[ u'Magby', u'Munchlax', u'Pichu', u'Riolu', u'Smoochum' ],
u'baby Pokémon',
)
self.check_search(
dict(evolution_stage=u'basic'),
[ u'Charmander', u'Eevee', u'Manaphy', u'Scyther', u'Treecko' ],
u'basic form Pokémon',
)
self.check_search(
dict(evolution_stage=u'stage1'),
[ u'Electivire', u'Gloom', u'Jolteon', u'Scizor', u'Wartortle' ],
u'stage 1 Pokémon',
)
self.check_search(
dict(evolution_stage=u'stage2'),
[ u'Charizard', u'Dragonite', u'Feraligatr', u'Staraptor', u'Tyranitar', u'Vileplume' ],
u'stage 2 Pokémon',
)
# Relative position in a family
self.check_search(
dict(evolution_position=u'first'),
[ u'Charmander', u'Eevee', u'Riolu', u'Togepi' ],
u'first evolution',
)
self.check_search(
dict(evolution_position=u'last'),
[ u'Charizard', u'Jolteon', u'Lucario', u'Scizor', u'Togekiss' ],
u'final evolution',
)
self.check_search(
dict(evolution_position=u'middle'),
[ u'Charmeleon', u'Dragonair', u'Gloom', u'Pikachu' ],
u'middle evolution',
)
self.check_search(
dict(evolution_position=u'only'),
[ u'Ditto', u'Farfetch\u2019d', u'Latias', u'Mew', u'Tauros' ],
u'only evolution',
)
# Special stuff
self.check_search(
dict(evolution_special=u'branching'),
[ u'Eevee', u'Tyrogue' ],
u'branching evolution',
)
self.check_search(
dict(evolution_special=u'branched'),
[
u'Gardevoir', u'Gallade',
u'Ninjask', u'Shedinja',
],
u'branched evolution',
)
# Some combinations of options
self.check_search(
dict(evolution_position=u'last', evolution_special=u'branching'),
[],
u'last evolution branching (impossible)',
)
self.check_search(
dict(evolution_position=u'first', evolution_special=u'branched'),
[],
u'first evolution branched (impossible)',
)
self.check_search(
dict(evolution_position=u'middle', evolution_special=u'branched'),
[ u'Silcoon', u'Cascoon' ],
u'middle evolution branched',
exact=True,
)
self.check_search(
dict(evolution_position=u'last', evolution_special=u'branched'),
[ u'Jolteon', u'Bellossom' ],
u'last evolution branched',
)
self.check_search(
dict(evolution_position=[u'middle', u'last']),
[ u'Charmeleon', u'Charizard' ],
u'middle or last evolution',
)
def test_gender_distribution(self):
"""Checks searching by gender frequency.
Controls look like: [at least|v] [1/8 female|v]
Remember, the db (and thus the form) store gender rate as
eighths-female.
"""
self.check_search(
dict(gender_rate_operator=u'less_equal', gender_rate=u'1'),
[ u'Bulbasaur', u'Chikorita', u'Tauros' ],
'mostly male',
)
self.check_search(
dict(gender_rate_operator=u'more_equal', gender_rate=u'6'),
[ u'Clefairy', u'Kangaskhan', u'Miltank' ],
'mostly female',
)
self.check_search(
dict(gender_rate_operator=u'equal', gender_rate=u'4'),
[ u'Absol', u'Castform', u'Delibird', u'Grimer', u'Teddiursa' ],
'half and half',
)
self.check_search(
dict(gender_rate_operator=u'equal', gender_rate=u'-1'),
[ u'Magneton', u'Voltorb' ],
'no gender',
)
# Check that "<= 0" doesn't include genderless (-1)
res = self.do_search(gender_rate_operator=u'less_equal',
gender_rate=u'0')
self.assertFalse(
any(_.name == u'Voltorb' for _ in res.tmpl_context.results))
def test_egg_groups(self):
"""Checks searching by egg groups."""
self.check_search(
dict(egg_group_operator=u'all', egg_group=u'15'),
[ u'Latias', u'Mew' ],
'no eggs',
)
# 6 + 11 == Fairy + Indeterminate
self.check_search(
dict(egg_group_operator=u'all', egg_group=[u'6', u'11']),
[ u'Castform', u'Sunny Castform',
u'Rainy Castform', u'Snowy Castform', ],
'fairy + indeterm; only one result, kinda...',
exact=True,
)
# Water 1; Water 3
self.check_search(
dict(egg_group_operator=u'any', egg_group=[u'2', u'9']),
[ u'Bidoof', u'Corsola', u'Krabby' ],
'water 1 OR water 3',
)
def test_generation(self):
"""Checks searching by generation introduced."""
self.check_search(
dict(introduced_in=u'1'),
[ u'Eevee', u'Pikachu', u'Shellder' ],
'introduced in Kanto',
)
self.check_search(
dict(introduced_in=u'4'),
[ u'Lucario', u'Munchlax', u'Roserade' ],
'introduced in Sinnoh',
)
# and several at once for good measure
self.check_search(
dict(introduced_in=[u'1', u'4']),
[ u'Eevee', u'Pikachu', u'Shellder', u'Lucario', u'Munchlax', u'Roserade' ],
'introduced in Kanto or Sinnoh',
)
def test_pokedex(self):
u"""Checks searching by Pokedex."""
self.check_search(
dict(in_pokedex=u'4'),
[ u'Sandshrew', u'Crobat', u'Lombre' ],
u'in Hoenn Pokedex',
)
self.check_search(
dict(in_pokedex=u'6'),
[ u'Eevee', u'Staraptor', u'Altered Giratina' ],
u'in Sinnoh Pokedex',
)
# and several at once for good measure
self.check_search(
dict(in_pokedex=[u'7', u'4']),
[ u'Eevee', u'Espeon', u'Celebi', u'Silcoon', u'Nuzleaf', u'Aron' ],
u'in Johto or Hoenn Pokedex',
)
def test_type(self):
"""Checks searching by type.
There are three options for type:
- must have at least one of the selected types
- must have exactly the selected type combination
- must have only the selected types
"""
self.check_search(
dict(type_operator=u'any', type=[u'dark', u'steel']),
[ u'Houndoom', u'Magnemite', u'Murkrow', u'Steelix' ],
'one-of some types',
)
self.check_search(
dict(type_operator=u'exact', type=[u'dragon', u'ground']),
[ u'Flygon', u'Gabite', u'Garchomp', u'Gible', u'Vibrava',
u'Mega Garchomp', u'Zygarde', u'10% Zygarde', u'50% Zygarde', u'Complete Zygarde'],
'exact type combo',
exact=True,
)
self.check_search(
dict(type_operator=u'only', type=[u'ice', u'steel']),
[
u'Registeel', # pure steel
u'Glaceon', u'Glalie', u'Regice', u'Snorunt', # pure ice
],
'only selected types',
)
# Make sure the default selection doesn't affect results
self.check_search(
dict(type_operator=u'any', name=u'eevee'),
[ u'Eevee' ],
'empty type selection doesn\'t affect results',
)
def test_move(self):
"""Checks searching by move.
Besides a move name, moves have several ancillary settings:
- Whether to search for the exact move, an identical move, or any
similar move.
- The version(s) to search.
- The method(s) by which the move is learned.
"""
self.check_search(
dict(move=u'Transform'),
[ u'Ditto', u'Mew' ],
'simple search by move',
exact=True,
)
# Try searching for identical moves -- that is, moves with the same
# effect id.
self.check_search(
dict(move=u'Thief', move_fuzz=u'same-effect'),
[
# These can learn Thief
u'Abra', u'Bidoof', u'Ekans', u'Meowth', u'Pidgey',
# These can learn Covet, which is identical
u'Cleffa', u'Cyndaquil', u'Slakoth',
],
'search by identical move',
)
# Restrict by version
self.check_search(
dict(move=u'Roar of Time', move_version_group=[u'1', u'2']),
[],
'gen 4 moves aren\'t learned in gen 1',
exact=True,
)
self.check_search(
dict(move=u'SolarBeam',
move_method=u'level-up',
move_version_group=[u'8', u'9', u'10'],
name=u'Bulbasaur'),
[],
'Bulbasaur lost SolarBeam in gen 4',
exact=True,
)
# Restrict by method
self.check_search(
dict(move=u'Volt Tackle'),
[ u'Pichu' ],
'Pichu learns Volt Tackle...',
exact=True,
)
self.check_search(
dict(move=u'Volt Tackle',
move_method=[u'level-up', u'tutor', u'machine', u'egg']),
[],
'...but not by normal means',
exact=True,
)
# Simple combo
self.check_search(
dict(move=u'Frenzy Plant',
move_method=u'tutor',
move_version_group=[u'7']),
[ u'Venusaur' ],
'only Venusaur gets elemental beam in FR',
exact=True,
)
def test_range_parsing(self):
u"""Checks to make sure that stats, effort, and size searching can
parse number ranges.
They can be any of the following, joined by commas, with space ignored:
- n
- n-m
- n–m
- n+ or +m
- n- or -m (negative numbers are impossible)
- n~m or n±m
- n~ or ~m
- <n or >n
In the case of size, there's extra parsing to do for units; however,
that won't conflict with any of the above rules.
"""
# For the ultimate simplicity, test this against national dex number
self.check_search(
dict(id=u'133'),
[ u'Eevee' ],
'range: exact number',
exact=True,
)
self.check_search(
dict(id=u'133, 352'),
[ u'Eevee', u'Kecleon' ],
'range: several exact numbers',
exact=True,
)
self.check_search(
dict(id=u'133-135'),
[ u'Eevee', u'Vaporeon', u'Jolteon' ],
'range: n-m',
exact=True,
)
self.check_search(
dict(id=u'806+'),
[ u'Blacephalon', u'Zeraora' ],
'range: n+',
exact=True,
)
self.check_search(
dict(id=u'806-'),
[ u'Blacephalon', u'Zeraora' ],
'range: n-',
exact=True,
)
self.check_search(
dict(id=u'>=806'),
[ u'Blacephalon', u'Zeraora' ],
'range: >=n',
exact=True,
)
self.check_search(
dict(id=u'+3'),
[ u'Bulbasaur', u'Ivysaur', u'Venusaur', u'Mega Venusaur' ],
'range: +m',
exact=True,
)
self.check_search(
dict(id=u'–3'),
[ u'Bulbasaur', u'Ivysaur', u'Venusaur', u'Mega Venusaur' ],
'range: endash-m',
exact=True,
)
self.check_search(
dict(id=u'<4'),
[ u'Bulbasaur', u'Ivysaur', u'Venusaur', u'Mega Venusaur' ],
'range: <m',
exact=True,
)
self.check_search(
dict(id=u'5~1'),
[ u'Charmander', u'Charmeleon', u'Charizard',
u'Mega Charizard X', u'Mega Charizard Y' ],
'range: n~m',
exact=True,
)
self.check_search(
dict(id=u'133~'),
[ u'Eevee' ],
'range: n~ (same as just n)',
exact=True,
)
self.check_search(
dict(id=u'~9'),
[ u'Wartortle', u'Blastoise', u'Mega Blastoise', u'Caterpie' ],
'range: ~m',
exact=True,
)
def test_stats(self):
"""Check that searching by stats works correctly."""
self.check_search(
dict(stat_hp=u'1,255'),
[ u'Blissey', u'Shedinja' ],
'HP of 1 or 255',
exact=True,
)
self.check_search(
dict(stat_special_attack=u'130-131'),
[ u'Espeon', u'Gengar', u'Glaceon', u'Heatran', u'Latios', u'Magnezone', u'Kyurem',
u'Mega Charizard X', u'Mega Slowbro', u'Mega Blaziken', u'Xerneas', u'Yveltal', u'Volcanion', u'Tapu Lele', u'Magearna', u'Original Magearna',
],
'special attack of 130',
exact=True,
)
def test_effort(self):
"""Check that searching by effort works correctly."""
self.check_search(
dict(effort_special_attack=u'2', effort_special_defense=u'1'),
[ u'Butterfree', u'Togekiss', u'Venusaur', u'Mega Venusaur' ],
'effort',
exact=True,
)
def test_hatch_counter(self):
"""Checks searching by initial hatch counter."""
self.check_search(
dict(hatch_counter=u'5'),
[ u'Gyarados', u'Magikarp', u'Mega Gyarados' ],
'hatch counter',
exact=True,
)
def test_base_experience(self):
"""Checks searching by base experience."""
self.check_search(
dict(base_experience=u'608'),
[ u'Blissey' ],
'base EXP',
exact=True,
)
def test_capture_rate(self):
"""Checks searching by capture rate."""
self.check_search(
dict(capture_rate=u'205'),
[ u'Corphish' ],
'capture rate',
exact=True,
)
def test_base_happiness(self):
"""Checks searching by base happiness."""
self.check_search(
dict(base_happiness=u'140'),
[
u'Azelf', u'Blissey', u'Chansey', u'Clefable', u'Clefairy',
u'Cleffa', u'Happiny', u'Lopunny', u'Mesprit', u'Uxie',
u'Mega Lopunny',
],
'base happiness',
exact=True,
)
def test_size(self):
"""Check that searching by size works correctly."""
# XXX what should a size with no units do? default american units? just fail?
self.check_search(
dict(height=u'0m-8in'),
[ u'Natu' ],
'dumb height range',
)
self.check_search(
dict(weight=u'450lb–210kg'),
[ u'Rayquaza' ],
'dumb weight range',
)
self.check_search(
dict(weight=u'14.3 lb'),
[ u'Eevee' ],
'converted units match',
)
def test_sort(self):
"""Make sure all the sort methods actually work."""
sort_field = PokemonSearchForm.sort
for value, label in sort_field.kwargs['choices']:
response = self.do_search(id=u'1', sort=value)
self.assert_(
response.tmpl_context.results,
"""Sort by {0} doesn't crash""".format(value)
)
def test_display_custom_table(self):
"""Try spitting out a custom table with every column, and make sure it
doesn't explode.
"""
column_field = PokemonSearchForm.column
columns = [value for (value, label) in column_field.kwargs['choices']]
response = self.do_search(id=u'1', display='custom-table',
column=columns)
self.assert_(
response.tmpl_context.results,
"""Custom table columns don't crash""".format(value)
)
def test_crash_vague_join(self):
"""Tests for crashes that occur when searching by evolution position
and sorting by some other criterion, because the join between 'pokemon'
and that criterion's table is vague due to the multiple 'pokemon'
aliases in the join.
"""
self.check_search(
dict(evolution_stage=u'stage1', sort=u'color'),
[ u'Raichu' ],
u'joins to color still work when searching by evo',
)
self.check_search(
dict(
evolution_stage=u'basic',
move=u'water gun', move_method=u'level-up', move_version_group=u'1',
sort=u'habitat',
),
[ u'Staryu' ],
u'joins to habitat still work when searching by evo and move',
)
|
py | b40855cc371285cc44b8ec1e38270a14bc6c23cb | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 8
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class SummaryWorkloadWorkloadItem(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'bytes_in': 'float',
'bytes_out': 'float',
'cpu': 'float',
'domain_id': 'str',
'error': 'str',
'group_id': 'float',
'group_sid': 'str',
'groupname': 'str',
'job_type': 'str',
'l2': 'float',
'l3': 'float',
'local_addr': 'str',
'local_name': 'str',
'node': 'float',
'ops': 'float',
'path': 'str',
'protocol': 'str',
'reads': 'float',
'remote_addr': 'str',
'remote_name': 'str',
'share_name': 'str',
'system_name': 'str',
'time': 'int',
'user_id': 'float',
'user_sid': 'str',
'username': 'str',
'workload_id': 'int',
'workload_type': 'str',
'writes': 'float',
'zone_id': 'float',
'zone_name': 'str'
}
attribute_map = {
'bytes_in': 'bytes_in',
'bytes_out': 'bytes_out',
'cpu': 'cpu',
'domain_id': 'domain_id',
'error': 'error',
'group_id': 'group_id',
'group_sid': 'group_sid',
'groupname': 'groupname',
'job_type': 'job_type',
'l2': 'l2',
'l3': 'l3',
'local_addr': 'local_addr',
'local_name': 'local_name',
'node': 'node',
'ops': 'ops',
'path': 'path',
'protocol': 'protocol',
'reads': 'reads',
'remote_addr': 'remote_addr',
'remote_name': 'remote_name',
'share_name': 'share_name',
'system_name': 'system_name',
'time': 'time',
'user_id': 'user_id',
'user_sid': 'user_sid',
'username': 'username',
'workload_id': 'workload_id',
'workload_type': 'workload_type',
'writes': 'writes',
'zone_id': 'zone_id',
'zone_name': 'zone_name'
}
def __init__(self, bytes_in=None, bytes_out=None, cpu=None, domain_id=None, error=None, group_id=None, group_sid=None, groupname=None, job_type=None, l2=None, l3=None, local_addr=None, local_name=None, node=None, ops=None, path=None, protocol=None, reads=None, remote_addr=None, remote_name=None, share_name=None, system_name=None, time=None, user_id=None, user_sid=None, username=None, workload_id=None, workload_type=None, writes=None, zone_id=None, zone_name=None): # noqa: E501
"""SummaryWorkloadWorkloadItem - a model defined in Swagger""" # noqa: E501
self._bytes_in = None
self._bytes_out = None
self._cpu = None
self._domain_id = None
self._error = None
self._group_id = None
self._group_sid = None
self._groupname = None
self._job_type = None
self._l2 = None
self._l3 = None
self._local_addr = None
self._local_name = None
self._node = None
self._ops = None
self._path = None
self._protocol = None
self._reads = None
self._remote_addr = None
self._remote_name = None
self._share_name = None
self._system_name = None
self._time = None
self._user_id = None
self._user_sid = None
self._username = None
self._workload_id = None
self._workload_type = None
self._writes = None
self._zone_id = None
self._zone_name = None
self.discriminator = None
self.bytes_in = bytes_in
self.bytes_out = bytes_out
self.cpu = cpu
if domain_id is not None:
self.domain_id = domain_id
if error is not None:
self.error = error
if group_id is not None:
self.group_id = group_id
if group_sid is not None:
self.group_sid = group_sid
if groupname is not None:
self.groupname = groupname
if job_type is not None:
self.job_type = job_type
self.l2 = l2
self.l3 = l3
if local_addr is not None:
self.local_addr = local_addr
if local_name is not None:
self.local_name = local_name
self.node = node
self.ops = ops
if path is not None:
self.path = path
if protocol is not None:
self.protocol = protocol
self.reads = reads
if remote_addr is not None:
self.remote_addr = remote_addr
if remote_name is not None:
self.remote_name = remote_name
if share_name is not None:
self.share_name = share_name
if system_name is not None:
self.system_name = system_name
self.time = time
if user_id is not None:
self.user_id = user_id
if user_sid is not None:
self.user_sid = user_sid
if username is not None:
self.username = username
if workload_id is not None:
self.workload_id = workload_id
if workload_type is not None:
self.workload_type = workload_type
self.writes = writes
if zone_id is not None:
self.zone_id = zone_id
if zone_name is not None:
self.zone_name = zone_name
@property
def bytes_in(self):
"""Gets the bytes_in of this SummaryWorkloadWorkloadItem. # noqa: E501
Count of bytes-in per second. # noqa: E501
:return: The bytes_in of this SummaryWorkloadWorkloadItem. # noqa: E501
:rtype: float
"""
return self._bytes_in
@bytes_in.setter
def bytes_in(self, bytes_in):
"""Sets the bytes_in of this SummaryWorkloadWorkloadItem.
Count of bytes-in per second. # noqa: E501
:param bytes_in: The bytes_in of this SummaryWorkloadWorkloadItem. # noqa: E501
:type: float
"""
if bytes_in is None:
raise ValueError("Invalid value for `bytes_in`, must not be `None`") # noqa: E501
self._bytes_in = bytes_in
@property
def bytes_out(self):
"""Gets the bytes_out of this SummaryWorkloadWorkloadItem. # noqa: E501
Count of bytes-out per second. # noqa: E501
:return: The bytes_out of this SummaryWorkloadWorkloadItem. # noqa: E501
:rtype: float
"""
return self._bytes_out
@bytes_out.setter
def bytes_out(self, bytes_out):
"""Sets the bytes_out of this SummaryWorkloadWorkloadItem.
Count of bytes-out per second. # noqa: E501
:param bytes_out: The bytes_out of this SummaryWorkloadWorkloadItem. # noqa: E501
:type: float
"""
if bytes_out is None:
raise ValueError("Invalid value for `bytes_out`, must not be `None`") # noqa: E501
self._bytes_out = bytes_out
@property
def cpu(self):
"""Gets the cpu of this SummaryWorkloadWorkloadItem. # noqa: E501
The number (across all cores) of micro-seconds per second. # noqa: E501
:return: The cpu of this SummaryWorkloadWorkloadItem. # noqa: E501
:rtype: float
"""
return self._cpu
@cpu.setter
def cpu(self, cpu):
"""Sets the cpu of this SummaryWorkloadWorkloadItem.
The number (across all cores) of micro-seconds per second. # noqa: E501
:param cpu: The cpu of this SummaryWorkloadWorkloadItem. # noqa: E501
:type: float
"""
if cpu is None:
raise ValueError("Invalid value for `cpu`, must not be `None`") # noqa: E501
self._cpu = cpu
@property
def domain_id(self):
"""Gets the domain_id of this SummaryWorkloadWorkloadItem. # noqa: E501
The IFS domain of the path which the operation was requested on. # noqa: E501
:return: The domain_id of this SummaryWorkloadWorkloadItem. # noqa: E501
:rtype: str
"""
return self._domain_id
@domain_id.setter
def domain_id(self, domain_id):
"""Sets the domain_id of this SummaryWorkloadWorkloadItem.
The IFS domain of the path which the operation was requested on. # noqa: E501
:param domain_id: The domain_id of this SummaryWorkloadWorkloadItem. # noqa: E501
:type: str
"""
self._domain_id = domain_id
@property
def error(self):
"""Gets the error of this SummaryWorkloadWorkloadItem. # noqa: E501
Report any errors during id resolution. # noqa: E501
:return: The error of this SummaryWorkloadWorkloadItem. # noqa: E501
:rtype: str
"""
return self._error
@error.setter
def error(self, error):
"""Sets the error of this SummaryWorkloadWorkloadItem.
Report any errors during id resolution. # noqa: E501
:param error: The error of this SummaryWorkloadWorkloadItem. # noqa: E501
:type: str
"""
self._error = error
@property
def group_id(self):
"""Gets the group_id of this SummaryWorkloadWorkloadItem. # noqa: E501
Group ID of the group that initiated the operation. # noqa: E501
:return: The group_id of this SummaryWorkloadWorkloadItem. # noqa: E501
:rtype: float
"""
return self._group_id
@group_id.setter
def group_id(self, group_id):
"""Sets the group_id of this SummaryWorkloadWorkloadItem.
Group ID of the group that initiated the operation. # noqa: E501
:param group_id: The group_id of this SummaryWorkloadWorkloadItem. # noqa: E501
:type: float
"""
self._group_id = group_id
@property
def group_sid(self):
"""Gets the group_sid of this SummaryWorkloadWorkloadItem. # noqa: E501
Group SID of the group that initiated the operation. # noqa: E501
:return: The group_sid of this SummaryWorkloadWorkloadItem. # noqa: E501
:rtype: str
"""
return self._group_sid
@group_sid.setter
def group_sid(self, group_sid):
"""Sets the group_sid of this SummaryWorkloadWorkloadItem.
Group SID of the group that initiated the operation. # noqa: E501
:param group_sid: The group_sid of this SummaryWorkloadWorkloadItem. # noqa: E501
:type: str
"""
self._group_sid = group_sid
@property
def groupname(self):
"""Gets the groupname of this SummaryWorkloadWorkloadItem. # noqa: E501
The resolved text name of the group that initiated the operation. # noqa: E501
:return: The groupname of this SummaryWorkloadWorkloadItem. # noqa: E501
:rtype: str
"""
return self._groupname
@groupname.setter
def groupname(self, groupname):
"""Sets the groupname of this SummaryWorkloadWorkloadItem.
The resolved text name of the group that initiated the operation. # noqa: E501
:param groupname: The groupname of this SummaryWorkloadWorkloadItem. # noqa: E501
:type: str
"""
self._groupname = groupname
@property
def job_type(self):
"""Gets the job_type of this SummaryWorkloadWorkloadItem. # noqa: E501
The canonical name for the job followed by phase in brackets, ie. 'AVscan[1]', etc... # noqa: E501
:return: The job_type of this SummaryWorkloadWorkloadItem. # noqa: E501
:rtype: str
"""
return self._job_type
@job_type.setter
def job_type(self, job_type):
"""Sets the job_type of this SummaryWorkloadWorkloadItem.
The canonical name for the job followed by phase in brackets, ie. 'AVscan[1]', etc... # noqa: E501
:param job_type: The job_type of this SummaryWorkloadWorkloadItem. # noqa: E501
:type: str
"""
self._job_type = job_type
@property
def l2(self):
"""Gets the l2 of this SummaryWorkloadWorkloadItem. # noqa: E501
L2 cache hits per second. # noqa: E501
:return: The l2 of this SummaryWorkloadWorkloadItem. # noqa: E501
:rtype: float
"""
return self._l2
@l2.setter
def l2(self, l2):
"""Sets the l2 of this SummaryWorkloadWorkloadItem.
L2 cache hits per second. # noqa: E501
:param l2: The l2 of this SummaryWorkloadWorkloadItem. # noqa: E501
:type: float
"""
if l2 is None:
raise ValueError("Invalid value for `l2`, must not be `None`") # noqa: E501
self._l2 = l2
@property
def l3(self):
"""Gets the l3 of this SummaryWorkloadWorkloadItem. # noqa: E501
L3 cache hits per second. # noqa: E501
:return: The l3 of this SummaryWorkloadWorkloadItem. # noqa: E501
:rtype: float
"""
return self._l3
@l3.setter
def l3(self, l3):
"""Sets the l3 of this SummaryWorkloadWorkloadItem.
L3 cache hits per second. # noqa: E501
:param l3: The l3 of this SummaryWorkloadWorkloadItem. # noqa: E501
:type: float
"""
if l3 is None:
raise ValueError("Invalid value for `l3`, must not be `None`") # noqa: E501
self._l3 = l3
@property
def local_addr(self):
"""Gets the local_addr of this SummaryWorkloadWorkloadItem. # noqa: E501
The IP address of the host receiving the operation request. # noqa: E501
:return: The local_addr of this SummaryWorkloadWorkloadItem. # noqa: E501
:rtype: str
"""
return self._local_addr
@local_addr.setter
def local_addr(self, local_addr):
"""Sets the local_addr of this SummaryWorkloadWorkloadItem.
The IP address of the host receiving the operation request. # noqa: E501
:param local_addr: The local_addr of this SummaryWorkloadWorkloadItem. # noqa: E501
:type: str
"""
self._local_addr = local_addr
@property
def local_name(self):
"""Gets the local_name of this SummaryWorkloadWorkloadItem. # noqa: E501
The resolved text name of the LocalAddr, if resolution can be performed. # noqa: E501
:return: The local_name of this SummaryWorkloadWorkloadItem. # noqa: E501
:rtype: str
"""
return self._local_name
@local_name.setter
def local_name(self, local_name):
"""Sets the local_name of this SummaryWorkloadWorkloadItem.
The resolved text name of the LocalAddr, if resolution can be performed. # noqa: E501
:param local_name: The local_name of this SummaryWorkloadWorkloadItem. # noqa: E501
:type: str
"""
self._local_name = local_name
@property
def node(self):
"""Gets the node of this SummaryWorkloadWorkloadItem. # noqa: E501
The node on which the operation was performed or 0 for cluster scoped statistics. # noqa: E501
:return: The node of this SummaryWorkloadWorkloadItem. # noqa: E501
:rtype: float
"""
return self._node
@node.setter
def node(self, node):
"""Sets the node of this SummaryWorkloadWorkloadItem.
The node on which the operation was performed or 0 for cluster scoped statistics. # noqa: E501
:param node: The node of this SummaryWorkloadWorkloadItem. # noqa: E501
:type: float
"""
if node is None:
raise ValueError("Invalid value for `node`, must not be `None`") # noqa: E501
self._node = node
@property
def ops(self):
"""Gets the ops of this SummaryWorkloadWorkloadItem. # noqa: E501
Operations per second. # noqa: E501
:return: The ops of this SummaryWorkloadWorkloadItem. # noqa: E501
:rtype: float
"""
return self._ops
@ops.setter
def ops(self, ops):
"""Sets the ops of this SummaryWorkloadWorkloadItem.
Operations per second. # noqa: E501
:param ops: The ops of this SummaryWorkloadWorkloadItem. # noqa: E501
:type: float
"""
if ops is None:
raise ValueError("Invalid value for `ops`, must not be `None`") # noqa: E501
self._ops = ops
@property
def path(self):
"""Gets the path of this SummaryWorkloadWorkloadItem. # noqa: E501
The path which the operation was requestion on. # noqa: E501
:return: The path of this SummaryWorkloadWorkloadItem. # noqa: E501
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""Sets the path of this SummaryWorkloadWorkloadItem.
The path which the operation was requestion on. # noqa: E501
:param path: The path of this SummaryWorkloadWorkloadItem. # noqa: E501
:type: str
"""
self._path = path
@property
def protocol(self):
"""Gets the protocol of this SummaryWorkloadWorkloadItem. # noqa: E501
The protocol of the operation. # noqa: E501
:return: The protocol of this SummaryWorkloadWorkloadItem. # noqa: E501
:rtype: str
"""
return self._protocol
@protocol.setter
def protocol(self, protocol):
"""Sets the protocol of this SummaryWorkloadWorkloadItem.
The protocol of the operation. # noqa: E501
:param protocol: The protocol of this SummaryWorkloadWorkloadItem. # noqa: E501
:type: str
"""
self._protocol = protocol
@property
def reads(self):
"""Gets the reads of this SummaryWorkloadWorkloadItem. # noqa: E501
Disk read operations per second. # noqa: E501
:return: The reads of this SummaryWorkloadWorkloadItem. # noqa: E501
:rtype: float
"""
return self._reads
@reads.setter
def reads(self, reads):
"""Sets the reads of this SummaryWorkloadWorkloadItem.
Disk read operations per second. # noqa: E501
:param reads: The reads of this SummaryWorkloadWorkloadItem. # noqa: E501
:type: float
"""
if reads is None:
raise ValueError("Invalid value for `reads`, must not be `None`") # noqa: E501
self._reads = reads
@property
def remote_addr(self):
"""Gets the remote_addr of this SummaryWorkloadWorkloadItem. # noqa: E501
The IP address of the host sending the operation request. # noqa: E501
:return: The remote_addr of this SummaryWorkloadWorkloadItem. # noqa: E501
:rtype: str
"""
return self._remote_addr
@remote_addr.setter
def remote_addr(self, remote_addr):
"""Sets the remote_addr of this SummaryWorkloadWorkloadItem.
The IP address of the host sending the operation request. # noqa: E501
:param remote_addr: The remote_addr of this SummaryWorkloadWorkloadItem. # noqa: E501
:type: str
"""
self._remote_addr = remote_addr
@property
def remote_name(self):
"""Gets the remote_name of this SummaryWorkloadWorkloadItem. # noqa: E501
The resolved text name of the RemoteAddr, if resolution can be performed. # noqa: E501
:return: The remote_name of this SummaryWorkloadWorkloadItem. # noqa: E501
:rtype: str
"""
return self._remote_name
@remote_name.setter
def remote_name(self, remote_name):
"""Sets the remote_name of this SummaryWorkloadWorkloadItem.
The resolved text name of the RemoteAddr, if resolution can be performed. # noqa: E501
:param remote_name: The remote_name of this SummaryWorkloadWorkloadItem. # noqa: E501
:type: str
"""
self._remote_name = remote_name
@property
def share_name(self):
"""Gets the share_name of this SummaryWorkloadWorkloadItem. # noqa: E501
The name of the SMB share through which the operation was requested. # noqa: E501
:return: The share_name of this SummaryWorkloadWorkloadItem. # noqa: E501
:rtype: str
"""
return self._share_name
@share_name.setter
def share_name(self, share_name):
"""Sets the share_name of this SummaryWorkloadWorkloadItem.
The name of the SMB share through which the operation was requested. # noqa: E501
:param share_name: The share_name of this SummaryWorkloadWorkloadItem. # noqa: E501
:type: str
"""
self._share_name = share_name
@property
def system_name(self):
"""Gets the system_name of this SummaryWorkloadWorkloadItem. # noqa: E501
The process name, job ID, etc... # noqa: E501
:return: The system_name of this SummaryWorkloadWorkloadItem. # noqa: E501
:rtype: str
"""
return self._system_name
@system_name.setter
def system_name(self, system_name):
"""Sets the system_name of this SummaryWorkloadWorkloadItem.
The process name, job ID, etc... # noqa: E501
:param system_name: The system_name of this SummaryWorkloadWorkloadItem. # noqa: E501
:type: str
"""
self._system_name = system_name
@property
def time(self):
"""Gets the time of this SummaryWorkloadWorkloadItem. # noqa: E501
Unix Epoch time in seconds that statistic was collected. # noqa: E501
:return: The time of this SummaryWorkloadWorkloadItem. # noqa: E501
:rtype: int
"""
return self._time
@time.setter
def time(self, time):
"""Sets the time of this SummaryWorkloadWorkloadItem.
Unix Epoch time in seconds that statistic was collected. # noqa: E501
:param time: The time of this SummaryWorkloadWorkloadItem. # noqa: E501
:type: int
"""
if time is None:
raise ValueError("Invalid value for `time`, must not be `None`") # noqa: E501
self._time = time
@property
def user_id(self):
"""Gets the user_id of this SummaryWorkloadWorkloadItem. # noqa: E501
User ID of the user who initiated the operation. # noqa: E501
:return: The user_id of this SummaryWorkloadWorkloadItem. # noqa: E501
:rtype: float
"""
return self._user_id
@user_id.setter
def user_id(self, user_id):
"""Sets the user_id of this SummaryWorkloadWorkloadItem.
User ID of the user who initiated the operation. # noqa: E501
:param user_id: The user_id of this SummaryWorkloadWorkloadItem. # noqa: E501
:type: float
"""
self._user_id = user_id
@property
def user_sid(self):
"""Gets the user_sid of this SummaryWorkloadWorkloadItem. # noqa: E501
User SID of the user who initiated the operation. # noqa: E501
:return: The user_sid of this SummaryWorkloadWorkloadItem. # noqa: E501
:rtype: str
"""
return self._user_sid
@user_sid.setter
def user_sid(self, user_sid):
"""Sets the user_sid of this SummaryWorkloadWorkloadItem.
User SID of the user who initiated the operation. # noqa: E501
:param user_sid: The user_sid of this SummaryWorkloadWorkloadItem. # noqa: E501
:type: str
"""
self._user_sid = user_sid
@property
def username(self):
"""Gets the username of this SummaryWorkloadWorkloadItem. # noqa: E501
The resolved text name of the user who initiated the operation. # noqa: E501
:return: The username of this SummaryWorkloadWorkloadItem. # noqa: E501
:rtype: str
"""
return self._username
@username.setter
def username(self, username):
"""Sets the username of this SummaryWorkloadWorkloadItem.
The resolved text name of the user who initiated the operation. # noqa: E501
:param username: The username of this SummaryWorkloadWorkloadItem. # noqa: E501
:type: str
"""
self._username = username
@property
def workload_id(self):
"""Gets the workload_id of this SummaryWorkloadWorkloadItem. # noqa: E501
ID of the workload (Pinned workloads only). # noqa: E501
:return: The workload_id of this SummaryWorkloadWorkloadItem. # noqa: E501
:rtype: int
"""
return self._workload_id
@workload_id.setter
def workload_id(self, workload_id):
"""Sets the workload_id of this SummaryWorkloadWorkloadItem.
ID of the workload (Pinned workloads only). # noqa: E501
:param workload_id: The workload_id of this SummaryWorkloadWorkloadItem. # noqa: E501
:type: int
"""
self._workload_id = workload_id
@property
def workload_type(self):
"""Gets the workload_type of this SummaryWorkloadWorkloadItem. # noqa: E501
The type of workload output. # noqa: E501
:return: The workload_type of this SummaryWorkloadWorkloadItem. # noqa: E501
:rtype: str
"""
return self._workload_type
@workload_type.setter
def workload_type(self, workload_type):
"""Sets the workload_type of this SummaryWorkloadWorkloadItem.
The type of workload output. # noqa: E501
:param workload_type: The workload_type of this SummaryWorkloadWorkloadItem. # noqa: E501
:type: str
"""
self._workload_type = workload_type
@property
def writes(self):
"""Gets the writes of this SummaryWorkloadWorkloadItem. # noqa: E501
Disk write operations per second. # noqa: E501
:return: The writes of this SummaryWorkloadWorkloadItem. # noqa: E501
:rtype: float
"""
return self._writes
@writes.setter
def writes(self, writes):
"""Sets the writes of this SummaryWorkloadWorkloadItem.
Disk write operations per second. # noqa: E501
:param writes: The writes of this SummaryWorkloadWorkloadItem. # noqa: E501
:type: float
"""
if writes is None:
raise ValueError("Invalid value for `writes`, must not be `None`") # noqa: E501
self._writes = writes
@property
def zone_id(self):
"""Gets the zone_id of this SummaryWorkloadWorkloadItem. # noqa: E501
Zone ID # noqa: E501
:return: The zone_id of this SummaryWorkloadWorkloadItem. # noqa: E501
:rtype: float
"""
return self._zone_id
@zone_id.setter
def zone_id(self, zone_id):
"""Sets the zone_id of this SummaryWorkloadWorkloadItem.
Zone ID # noqa: E501
:param zone_id: The zone_id of this SummaryWorkloadWorkloadItem. # noqa: E501
:type: float
"""
self._zone_id = zone_id
@property
def zone_name(self):
"""Gets the zone_name of this SummaryWorkloadWorkloadItem. # noqa: E501
The resolved text zone name # noqa: E501
:return: The zone_name of this SummaryWorkloadWorkloadItem. # noqa: E501
:rtype: str
"""
return self._zone_name
@zone_name.setter
def zone_name(self, zone_name):
"""Sets the zone_name of this SummaryWorkloadWorkloadItem.
The resolved text zone name # noqa: E501
:param zone_name: The zone_name of this SummaryWorkloadWorkloadItem. # noqa: E501
:type: str
"""
self._zone_name = zone_name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SummaryWorkloadWorkloadItem):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | b4085816ea4e91db0580fb47df952f6ce905069f |
def convert(geostyler):
return {}, [] # (dictionary with ArcGIS Pro style, list of warnings)
|
py | b408596eb923955330057cdd851a95efc3ebd306 | # Copyright 2013-2019 CERN for the benefit of the ATLAS collaboration.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Vincent Garonne <[email protected]>, 2014-2017
# - Cedric Serfon <[email protected]>, 2015
# - Mario Lassnig <[email protected]>, 2019
''' create bad replicas table '''
import datetime
import sqlalchemy as sa
from alembic import context
from alembic.op import (create_table, create_primary_key, create_foreign_key,
create_check_constraint, create_index, drop_table)
from rucio.db.sqla.types import GUID
# Alembic revision identifiers
revision = '32c7d2783f7e'
down_revision = '384b96aa0f60'
def upgrade():
'''
Upgrade the database to this revision
'''
if context.get_context().dialect.name in ['oracle', 'mysql', 'postgresql']:
create_table('bad_replicas',
sa.Column('scope', sa.String(25)),
sa.Column('name', sa.String(255)),
sa.Column('rse_id', GUID()),
sa.Column('reason', sa.String(255)),
sa.Column('state', sa.String(1)),
sa.Column('account', sa.String(25)),
sa.Column('created_at', sa.DateTime, default=datetime.datetime.utcnow),
sa.Column('updated_at', sa.DateTime, default=datetime.datetime.utcnow, onupdate=datetime.datetime.utcnow))
create_primary_key('BAD_REPLICAS_STATE_PK', 'bad_replicas', ['scope', 'name', 'rse_id', 'created_at'])
create_check_constraint('BAD_REPLICAS_SCOPE_NN', 'bad_replicas', 'scope is not null')
create_check_constraint('BAD_REPLICAS_NAME_NN', 'bad_replicas', 'name is not null')
create_check_constraint('BAD_REPLICAS_RSE_ID_NN', 'bad_replicas', 'rse_id is not null')
create_foreign_key('BAD_REPLICAS_ACCOUNT_FK', 'bad_replicas', 'accounts', ['account'], ['account'])
create_index('BAD_REPLICAS_STATE_IDX', 'bad_replicas', ['rse_id', 'state'])
def downgrade():
'''
Downgrade the database to the previous revision
'''
if context.get_context().dialect.name in ['oracle', 'mysql', 'postgresql']:
drop_table('bad_replicas')
|
py | b408599ae22fb3008cb81f43c6e81be9ebe6bd7b | """var types
Revision ID: 3f0b987d79ff
Revises: 8415ee5e38b2
Create Date: 2021-02-22 23:01:23.384196
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '3f0b987d79ff'
down_revision = '8415ee5e38b2'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('film_segment', 'dataset',
existing_type=sa.VARCHAR(length=100),
nullable=True)
op.alter_column('film_segment', 'first_cbd',
existing_type=sa.BIGINT(),
type_=sa.Integer(),
existing_nullable=True)
op.alter_column('film_segment', 'first_frame',
existing_type=sa.BIGINT(),
type_=sa.Integer(),
existing_nullable=True)
op.alter_column('film_segment', 'flight',
existing_type=sa.BIGINT(),
type_=sa.Integer(),
existing_nullable=True)
op.alter_column('film_segment', 'id',
existing_type=sa.BIGINT(),
type_=sa.Integer(),
existing_nullable=False,
autoincrement=True,
existing_server_default=sa.text("nextval('film_segment_id_seq'::regclass)"))
op.alter_column('film_segment', 'instrument_type',
existing_type=sa.BIGINT(),
type_=sa.Integer(),
existing_nullable=True)
op.alter_column('film_segment', 'last_cbd',
existing_type=sa.BIGINT(),
type_=sa.Integer(),
existing_nullable=True)
op.alter_column('film_segment', 'last_changed',
existing_type=postgresql.TIMESTAMP(timezone=True),
type_=sa.DateTime(),
existing_nullable=True)
op.alter_column('film_segment', 'last_frame',
existing_type=sa.BIGINT(),
type_=sa.Integer(),
existing_nullable=True)
op.alter_column('film_segment', 'notes',
existing_type=sa.TEXT(),
type_=sa.String(),
existing_nullable=True)
op.alter_column('film_segment', 'path',
existing_type=sa.TEXT(),
type_=sa.String(length=300),
existing_nullable=True)
op.alter_column('film_segment', 'reel',
existing_type=sa.BIGINT(),
type_=sa.String(length=100),
existing_nullable=True)
op.alter_column('film_segment', 'scope_type',
existing_type=sa.TEXT(),
type_=sa.String(length=100),
existing_nullable=True)
op.alter_column('film_segment', 'updated_by',
existing_type=sa.TEXT(),
type_=sa.String(),
existing_nullable=True)
op.alter_column('film_segment_version', 'first_cbd',
existing_type=sa.BIGINT(),
type_=sa.Integer(),
existing_nullable=True,
autoincrement=False)
op.alter_column('film_segment_version', 'first_frame',
existing_type=sa.BIGINT(),
type_=sa.Integer(),
existing_nullable=True,
autoincrement=False)
op.alter_column('film_segment_version', 'flight',
existing_type=sa.BIGINT(),
type_=sa.Integer(),
existing_nullable=True,
autoincrement=False)
op.alter_column('film_segment_version', 'id',
existing_type=sa.BIGINT(),
type_=sa.Integer(),
existing_nullable=False,
autoincrement=False)
op.alter_column('film_segment_version', 'instrument_type',
existing_type=sa.BIGINT(),
type_=sa.Integer(),
existing_nullable=True,
autoincrement=False)
op.alter_column('film_segment_version', 'last_cbd',
existing_type=sa.BIGINT(),
type_=sa.Integer(),
existing_nullable=True,
autoincrement=False)
op.alter_column('film_segment_version', 'last_changed',
existing_type=postgresql.TIMESTAMP(timezone=True),
type_=sa.DateTime(),
existing_nullable=True,
autoincrement=False)
op.alter_column('film_segment_version', 'last_frame',
existing_type=sa.BIGINT(),
type_=sa.Integer(),
existing_nullable=True,
autoincrement=False)
op.alter_column('film_segment_version', 'notes',
existing_type=sa.TEXT(),
type_=sa.String(),
existing_nullable=True,
autoincrement=False)
op.alter_column('film_segment_version', 'path',
existing_type=sa.TEXT(),
type_=sa.String(length=300),
existing_nullable=True,
autoincrement=False)
op.alter_column('film_segment_version', 'reel',
existing_type=sa.BIGINT(),
type_=sa.String(length=100),
existing_nullable=True,
autoincrement=False)
op.alter_column('film_segment_version', 'scope_type',
existing_type=sa.TEXT(),
type_=sa.String(length=100),
existing_nullable=True,
autoincrement=False)
op.alter_column('film_segment_version', 'updated_by',
existing_type=sa.TEXT(),
type_=sa.String(),
existing_nullable=True,
autoincrement=False)
op.alter_column('flasklogin-users', 'created_on',
existing_type=postgresql.TIMESTAMP(timezone=True),
type_=sa.DateTime(),
existing_nullable=False)
op.alter_column('flasklogin-users', 'email',
existing_type=sa.TEXT(),
type_=sa.String(),
existing_nullable=False)
op.alter_column('flasklogin-users', 'first_name',
existing_type=sa.TEXT(),
type_=sa.String(),
existing_nullable=False)
op.alter_column('flasklogin-users', 'id',
existing_type=sa.BIGINT(),
type_=sa.Integer(),
existing_nullable=False,
autoincrement=True)
op.alter_column('flasklogin-users', 'last_login',
existing_type=postgresql.TIMESTAMP(timezone=True),
type_=sa.DateTime(),
existing_nullable=True)
op.alter_column('flasklogin-users', 'last_name',
existing_type=sa.TEXT(),
type_=sa.String(),
existing_nullable=False)
op.alter_column('flasklogin-users', 'password',
existing_type=sa.TEXT(),
type_=sa.String(length=200),
existing_nullable=False)
op.alter_column('transaction', 'issued_at',
existing_type=postgresql.TIMESTAMP(timezone=True),
type_=sa.DateTime(),
existing_nullable=True)
op.alter_column('transaction', 'remote_addr',
existing_type=sa.TEXT(),
type_=sa.String(length=50),
existing_nullable=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('transaction', 'remote_addr',
existing_type=sa.String(length=50),
type_=sa.TEXT(),
existing_nullable=True)
op.alter_column('transaction', 'issued_at',
existing_type=sa.DateTime(),
type_=postgresql.TIMESTAMP(timezone=True),
existing_nullable=True)
op.alter_column('flasklogin-users', 'password',
existing_type=sa.String(length=200),
type_=sa.TEXT(),
existing_nullable=False)
op.alter_column('flasklogin-users', 'last_name',
existing_type=sa.String(),
type_=sa.TEXT(),
existing_nullable=False)
op.alter_column('flasklogin-users', 'last_login',
existing_type=sa.DateTime(),
type_=postgresql.TIMESTAMP(timezone=True),
existing_nullable=True)
op.alter_column('flasklogin-users', 'id',
existing_type=sa.Integer(),
type_=sa.BIGINT(),
existing_nullable=False,
autoincrement=True)
op.alter_column('flasklogin-users', 'first_name',
existing_type=sa.String(),
type_=sa.TEXT(),
existing_nullable=False)
op.alter_column('flasklogin-users', 'email',
existing_type=sa.String(),
type_=sa.TEXT(),
existing_nullable=False)
op.alter_column('flasklogin-users', 'created_on',
existing_type=sa.DateTime(),
type_=postgresql.TIMESTAMP(timezone=True),
existing_nullable=False)
op.alter_column('film_segment_version', 'updated_by',
existing_type=sa.String(),
type_=sa.TEXT(),
existing_nullable=True,
autoincrement=False)
op.alter_column('film_segment_version', 'scope_type',
existing_type=sa.String(length=100),
type_=sa.TEXT(),
existing_nullable=True,
autoincrement=False)
op.alter_column('film_segment_version', 'reel',
existing_type=sa.String(length=100),
type_=sa.BIGINT(),
existing_nullable=True,
autoincrement=False)
op.alter_column('film_segment_version', 'path',
existing_type=sa.String(length=300),
type_=sa.TEXT(),
existing_nullable=True,
autoincrement=False)
op.alter_column('film_segment_version', 'notes',
existing_type=sa.String(),
type_=sa.TEXT(),
existing_nullable=True,
autoincrement=False)
op.alter_column('film_segment_version', 'last_frame',
existing_type=sa.Integer(),
type_=sa.BIGINT(),
existing_nullable=True,
autoincrement=False)
op.alter_column('film_segment_version', 'last_changed',
existing_type=sa.DateTime(),
type_=postgresql.TIMESTAMP(timezone=True),
existing_nullable=True,
autoincrement=False)
op.alter_column('film_segment_version', 'last_cbd',
existing_type=sa.Integer(),
type_=sa.BIGINT(),
existing_nullable=True,
autoincrement=False)
op.alter_column('film_segment_version', 'instrument_type',
existing_type=sa.Integer(),
type_=sa.BIGINT(),
existing_nullable=True,
autoincrement=False)
op.alter_column('film_segment_version', 'id',
existing_type=sa.Integer(),
type_=sa.BIGINT(),
existing_nullable=False,
autoincrement=False)
op.alter_column('film_segment_version', 'flight',
existing_type=sa.Integer(),
type_=sa.BIGINT(),
existing_nullable=True,
autoincrement=False)
op.alter_column('film_segment_version', 'first_frame',
existing_type=sa.Integer(),
type_=sa.BIGINT(),
existing_nullable=True,
autoincrement=False)
op.alter_column('film_segment_version', 'first_cbd',
existing_type=sa.Integer(),
type_=sa.BIGINT(),
existing_nullable=True,
autoincrement=False)
op.alter_column('film_segment', 'updated_by',
existing_type=sa.String(),
type_=sa.TEXT(),
existing_nullable=True)
op.alter_column('film_segment', 'scope_type',
existing_type=sa.String(length=100),
type_=sa.TEXT(),
existing_nullable=True)
op.alter_column('film_segment', 'reel',
existing_type=sa.String(length=100),
type_=sa.BIGINT(),
existing_nullable=True)
op.alter_column('film_segment', 'path',
existing_type=sa.String(length=300),
type_=sa.TEXT(),
existing_nullable=True)
op.alter_column('film_segment', 'notes',
existing_type=sa.String(),
type_=sa.TEXT(),
existing_nullable=True)
op.alter_column('film_segment', 'last_frame',
existing_type=sa.Integer(),
type_=sa.BIGINT(),
existing_nullable=True)
op.alter_column('film_segment', 'last_changed',
existing_type=sa.DateTime(),
type_=postgresql.TIMESTAMP(timezone=True),
existing_nullable=True)
op.alter_column('film_segment', 'last_cbd',
existing_type=sa.Integer(),
type_=sa.BIGINT(),
existing_nullable=True)
op.alter_column('film_segment', 'instrument_type',
existing_type=sa.Integer(),
type_=sa.BIGINT(),
existing_nullable=True)
op.alter_column('film_segment', 'id',
existing_type=sa.Integer(),
type_=sa.BIGINT(),
existing_nullable=False,
autoincrement=True,
existing_server_default=sa.text("nextval('film_segment_id_seq'::regclass)"))
op.alter_column('film_segment', 'flight',
existing_type=sa.Integer(),
type_=sa.BIGINT(),
existing_nullable=True)
op.alter_column('film_segment', 'first_frame',
existing_type=sa.Integer(),
type_=sa.BIGINT(),
existing_nullable=True)
op.alter_column('film_segment', 'first_cbd',
existing_type=sa.Integer(),
type_=sa.BIGINT(),
existing_nullable=True)
op.alter_column('film_segment', 'dataset',
existing_type=sa.VARCHAR(length=100),
nullable=False)
# ### end Alembic commands ###
|
py | b4085ae2c412f4c3959a0b1c82935070d34e9e0e |
def extractWordrain69Com(item):
'''
Parser for 'wordrain69.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
py | b4085c087584e34510adb8ed11d59720efcb5386 | from libqtile.widget.base import ThreadedPollText
import re
import shlex
import subprocess
VOL_LOW = 40
VOL_HIGH = 80
re_vol = re.compile('\[(\d?\d?\d?)%\]')
getvol_cmd = "amixer -c {cardid} sget {channel}"
voltoggle_cmd = "amixer -c {cardid} -q sset {channel} toggle"
volup_cmd = "amixer -c {cardid} -q sset {channel} {increment}dB+"
voldown_cmd = "amixer -c {cardid} -q sset {channel} {increment}dB-"
class Volume(ThreadedPollText):
defaults = [
('update_interval', 3, 'The update interval'),
("cardid", 0, "Card Id"),
("channel", "Master", "Channel"),
("vol_increment", 2, "dB to change the volume"),
]
def __init__(self, **config):
ThreadedPollText.__init__(self, **config)
self.add_defaults(Volume.defaults)
self.markup = True
self.volume = None
self.muted = None
self.is_new_volume = False
self.clear_new = None
def get_volume(self):
cmd = self.format_cmd(getvol_cmd)
try:
mixer_out = subprocess.check_output(shlex.split(cmd)).decode()
except subprocess.CalledProcessError:
return None
if '[off]' in mixer_out:
return -1
volgroups = re_vol.search(mixer_out)
if volgroups:
return int(volgroups.groups()[0])
else:
# this shouldn't happen
return None
def poll(self):
next_volume = self.get_volume()
if next_volume is None:
return "VolumeError "
if self.volume is not None and (next_volume != self.volume):
if self.clear_new is not None:
self.clear_new.cancel()
self.is_new_volume = True
def clear_it(w):
w.is_new_volume = False
w.tick()
self.clear_new = self.qtile.call_later(3, clear_it, self)
self.volume = next_volume
muted = self.volume < 0
if muted:
return '<big>\U0001f507</big>'
elif self.volume <= VOL_LOW:
icon = '<big>\uf026</big> '
elif self.volume <= VOL_HIGH:
icon = '<big>\uf027</big> '
else:
icon = '<big>\uf028</big> '
if self.is_new_volume:
return icon + " {:d}".format(self.volume)
return icon
def button_press(self, x, y, button):
if button == 1:
self.cmd_toggle()
elif button == 4:
self.cmd_volume_up()
elif button == 5:
self.cmd_volume_down()
def cmd_toggle(self):
cmd = self.format_cmd(voltoggle_cmd)
process = subprocess.call(shlex.split(cmd))
self.tick()
def cmd_volume_up(self):
cmd = self.format_cmd(volup_cmd, increment=self.vol_increment)
process = subprocess.call(shlex.split(cmd))
self.tick()
def cmd_volume_down(self):
cmd = self.format_cmd(voldown_cmd, increment=self.vol_increment)
process = subprocess.call(shlex.split(cmd))
self.tick()
def format_cmd(self, cmd, **kwargs):
return cmd.format(
cardid=self.cardid,
channel=self.channel,
**kwargs
)
|
py | b4085c4743f1feae9e3e7c0b1bf3732f4758b540 | # Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from oslo_log import log as logging
from neutron.api import extensions
from neutron.api.v2 import attributes as attr
from neutron.api.v2 import resource_helper
from neutron.common import exceptions
from neutron.plugins.common import constants
from networking_cisco.plugins.cisco.common import utils
LOG = logging.getLogger(__name__)
ROUTERTYPE = 'routertype'
ROUTERTYPE_ALIAS = ROUTERTYPE
TYPE_ATTR = ROUTERTYPE + ':id'
ROUTER_TYPES = ROUTERTYPE + 's'
RESOURCE_ATTRIBUTE_MAP = {
ROUTER_TYPES: {
'id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid_or_none': None}, 'is_visible': True,
'default': None, 'primary_key': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None}, 'is_visible': True,
'default': ''},
'description': {'allow_post': True, 'allow_put': True,
'validate': {'type:string_or_none': None},
'is_visible': True, 'default': None},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True, 'is_visible': True},
'template_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:uuid': None}, 'is_visible': True},
'ha_enabled_by_default': {'allow_post': True, 'allow_put': True,
'convert_to': attr.convert_to_boolean,
'validate': {'type:boolean': None},
'default': False, 'is_visible': True},
'shared': {'allow_post': True, 'allow_put': False,
'convert_to': attr.convert_to_boolean,
'validate': {'type:boolean': None}, 'default': True,
'is_visible': True},
#TODO(bobmel): add HA attribute: One of None, 'GPLB', 'VRRP', or 'HSRP'
'slot_need': {'allow_post': True, 'allow_put': True,
'validate': {'type:non_negative': None},
'convert_to': attr.convert_to_int,
'default': 0, 'is_visible': True},
'scheduler': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'convert_to': utils.convert_validate_driver_class,
'is_visible': True},
'driver': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'convert_to': utils.convert_validate_driver_class,
'is_visible': True},
'cfg_agent_service_helper': {
'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'convert_to': utils.convert_validate_driver_class,
'is_visible': True},
'cfg_agent_driver': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'convert_to': utils.convert_validate_driver_class,
'is_visible': True},
}
}
EXTENDED_ATTRIBUTES_2_0 = {
'routers': {
TYPE_ATTR: {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'default': attr.ATTR_NOT_SPECIFIED,
'is_visible': True},
}
}
class Routertype(extensions.ExtensionDescriptor):
"""Extension class to define different types of Neutron routers.
This class is used by Neutron's extension framework to support
definition of different types of Neutron Routers.
Attribute 'router_type:id' is the uuid or name of a certain router type.
It can be set during creation of Neutron router. If a Neutron router is
moved (by admin user) to a hosting device of a different hosting device
type, the router type of the Neutron router will also change. Non-admin
users can request that a Neutron router's type is changed.
To create a router of router type <name>:
(shell) router-create <router_name> --router_type:id <uuid_or_name>
"""
@classmethod
def get_name(cls):
return "Router types for routing service"
@classmethod
def get_alias(cls):
return ROUTERTYPE_ALIAS
@classmethod
def get_description(cls):
return "Introduces router types for Neutron Routers"
@classmethod
def get_namespace(cls):
return "http://docs.openstack.org/ext/" + ROUTERTYPE + "/api/v2.0"
@classmethod
def get_updated(cls):
return "2014-02-07T10:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
plural_mappings = resource_helper.build_plural_mappings(
{}, RESOURCE_ATTRIBUTE_MAP)
attr.PLURALS.update(plural_mappings)
return resource_helper.build_resource_info(plural_mappings,
RESOURCE_ATTRIBUTE_MAP,
constants.L3_ROUTER_NAT)
def get_extended_resources(self, version):
if version == "2.0":
return EXTENDED_ATTRIBUTES_2_0
else:
return {}
# router_type exceptions
class RouterTypeInUse(exceptions.InUse):
message = _("Router type %(id)s in use.")
class RouterTypeNotFound(exceptions.NotFound):
message = _("Router type %(id)s does not exist")
class MultipleRouterTypes(exceptions.NeutronException):
message = _("Multiple router type with same name %(name)s exist. Id "
"must be used to specify router type.")
class SchedulerNotFound(exceptions.NetworkNotFound):
message = _("Scheduler %(scheduler)s does not exist")
class RouterTypeAlreadyDefined(exceptions.NeutronException):
message = _("Router type %(type) already exists")
class NoSuchHostingDeviceTemplateForRouterType(exceptions.NeutronException):
message = _("No hosting device template with id %(type) exists")
class HostingDeviceTemplateUsedByRouterType(exceptions.NeutronException):
message = _("Router type %(type) already defined for Hosting device "
"template with id %(type)")
class RouterTypeHasRouters(exceptions.NeutronException):
message = _("Router type %(type) cannot be deleted since routers "
"of that type exists")
class RoutertypePluginBase(object):
"""REST API to manage router types.
All methods except listing require admin context.
"""
@abc.abstractmethod
def create_routertype(self, context, routertype):
"""Creates a router type.
Also binds it to the specified hosting device template.
"""
pass
@abc.abstractmethod
def update_routertype(self, context, id, routertype):
"""Updates a router type."""
pass
@abc.abstractmethod
def delete_routertype(self, context, id):
"""Deletes a router type."""
pass
@abc.abstractmethod
def get_routertype(self, context, id, fields=None):
"""Lists defined router type."""
pass
@abc.abstractmethod
def get_routertypes(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
"""Lists defined router types."""
pass
|
py | b4085e304594a831f1cd39e9c595ffe72af1b3bc | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
import torch.nn as nn
from mmcv.runner import ModuleList
from mmdet.core import (bbox2result, bbox2roi, bbox_mapping, build_assigner,
build_sampler, merge_aug_bboxes, merge_aug_masks,
multiclass_nms)
from ..builder import HEADS, build_head, build_roi_extractor
from .base_roi_head import BaseRoIHead
from .test_mixins import BBoxTestMixin, MaskTestMixin
@HEADS.register_module()
class CascadeRoIHead(BaseRoIHead, BBoxTestMixin, MaskTestMixin):
"""Cascade roi head including one bbox head and one mask head.
https://arxiv.org/abs/1712.00726
"""
def __init__(self,
num_stages,
stage_loss_weights,
bbox_roi_extractor=None,
bbox_head=None,
mask_roi_extractor=None,
mask_head=None,
shared_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
assert bbox_roi_extractor is not None
assert bbox_head is not None
assert shared_head is None, \
'Shared head is not supported in Cascade RCNN anymore'
self.num_stages = num_stages
self.stage_loss_weights = stage_loss_weights
super(CascadeRoIHead, self).__init__(
bbox_roi_extractor=bbox_roi_extractor,
bbox_head=bbox_head,
mask_roi_extractor=mask_roi_extractor,
mask_head=mask_head,
shared_head=shared_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
def init_bbox_head(self, bbox_roi_extractor, bbox_head):
"""Initialize box head and box roi extractor.
Args:
bbox_roi_extractor (dict): Config of box roi extractor.
bbox_head (dict): Config of box in box head.
"""
self.bbox_roi_extractor = ModuleList()
self.bbox_head = ModuleList()
if not isinstance(bbox_roi_extractor, list):
bbox_roi_extractor = [
bbox_roi_extractor for _ in range(self.num_stages)
]
if not isinstance(bbox_head, list):
bbox_head = [bbox_head for _ in range(self.num_stages)]
assert len(bbox_roi_extractor) == len(bbox_head) == self.num_stages
for roi_extractor, head in zip(bbox_roi_extractor, bbox_head):
self.bbox_roi_extractor.append(build_roi_extractor(roi_extractor))
self.bbox_head.append(build_head(head))
def init_mask_head(self, mask_roi_extractor, mask_head):
"""Initialize mask head and mask roi extractor.
Args:
mask_roi_extractor (dict): Config of mask roi extractor.
mask_head (dict): Config of mask in mask head.
"""
self.mask_head = nn.ModuleList()
if not isinstance(mask_head, list):
mask_head = [mask_head for _ in range(self.num_stages)]
assert len(mask_head) == self.num_stages
for head in mask_head:
self.mask_head.append(build_head(head))
if mask_roi_extractor is not None:
self.share_roi_extractor = False
self.mask_roi_extractor = ModuleList()
if not isinstance(mask_roi_extractor, list):
mask_roi_extractor = [
mask_roi_extractor for _ in range(self.num_stages)
]
assert len(mask_roi_extractor) == self.num_stages
for roi_extractor in mask_roi_extractor:
self.mask_roi_extractor.append(
build_roi_extractor(roi_extractor))
else:
self.share_roi_extractor = True
self.mask_roi_extractor = self.bbox_roi_extractor
def init_assigner_sampler(self):
"""Initialize assigner and sampler for each stage."""
self.bbox_assigner = []
self.bbox_sampler = []
if self.train_cfg is not None:
for idx, rcnn_train_cfg in enumerate(self.train_cfg):
self.bbox_assigner.append(
build_assigner(rcnn_train_cfg.assigner))
self.current_stage = idx
self.bbox_sampler.append(
build_sampler(rcnn_train_cfg.sampler, context=self))
def forward_dummy(self, x, proposals):
"""Dummy forward function."""
# bbox head
outs = ()
rois = bbox2roi([proposals])
if self.with_bbox:
for i in range(self.num_stages):
bbox_results = self._bbox_forward(i, x, rois)
outs = outs + (bbox_results['cls_score'],
bbox_results['bbox_pred'])
# mask heads
if self.with_mask:
mask_rois = rois[:100]
for i in range(self.num_stages):
mask_results = self._mask_forward(i, x, mask_rois)
outs = outs + (mask_results['mask_pred'], )
return outs
def _bbox_forward(self, stage, x, rois):
"""Box head forward function used in both training and testing."""
bbox_roi_extractor = self.bbox_roi_extractor[stage]
bbox_head = self.bbox_head[stage]
bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs],
rois)
# do not support caffe_c4 model anymore
cls_score, bbox_pred = bbox_head(bbox_feats)
bbox_results = dict(
cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats)
return bbox_results
def _bbox_forward_train(self, stage, x, sampling_results, gt_bboxes,
gt_labels, rcnn_train_cfg):
"""Run forward function and calculate loss for box head in training."""
rois = bbox2roi([res.bboxes for res in sampling_results])
bbox_results = self._bbox_forward(stage, x, rois)
bbox_targets = self.bbox_head[stage].get_targets(
sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg)
loss_bbox = self.bbox_head[stage].loss(bbox_results['cls_score'],
bbox_results['bbox_pred'], rois,
*bbox_targets)
bbox_results.update(
loss_bbox=loss_bbox, rois=rois, bbox_targets=bbox_targets)
return bbox_results
def _mask_forward(self, stage, x, rois):
"""Mask head forward function used in both training and testing."""
mask_roi_extractor = self.mask_roi_extractor[stage]
mask_head = self.mask_head[stage]
mask_feats = mask_roi_extractor(x[:mask_roi_extractor.num_inputs],
rois)
# do not support caffe_c4 model anymore
mask_pred = mask_head(mask_feats)
mask_results = dict(mask_pred=mask_pred)
return mask_results
def _mask_forward_train(self,
stage,
x,
sampling_results,
gt_masks,
rcnn_train_cfg,
bbox_feats=None):
"""Run forward function and calculate loss for mask head in
training."""
pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])
mask_results = self._mask_forward(stage, x, pos_rois)
mask_targets = self.mask_head[stage].get_targets(
sampling_results, gt_masks, rcnn_train_cfg)
pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
loss_mask = self.mask_head[stage].loss(mask_results['mask_pred'],
mask_targets, pos_labels)
mask_results.update(loss_mask=loss_mask)
return mask_results
def forward_train(self,
x,
img_metas,
proposal_list,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None):
"""
Args:
x (list[Tensor]): list of multi-level img features.
img_metas (list[dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
proposals (list[Tensors]): list of region proposals.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
gt_masks (None | Tensor) : true segmentation masks for each box
used if the architecture supports a segmentation task.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
losses = dict()
for i in range(self.num_stages):
self.current_stage = i
rcnn_train_cfg = self.train_cfg[i]
lw = self.stage_loss_weights[i]
# assign gts and sample proposals
sampling_results = []
if self.with_bbox or self.with_mask:
bbox_assigner = self.bbox_assigner[i]
bbox_sampler = self.bbox_sampler[i]
num_imgs = len(img_metas)
if gt_bboxes_ignore is None:
gt_bboxes_ignore = [None for _ in range(num_imgs)]
for j in range(num_imgs):
assign_result = bbox_assigner.assign(
proposal_list[j], gt_bboxes[j], gt_bboxes_ignore[j],
gt_labels[j])
sampling_result = bbox_sampler.sample(
assign_result,
proposal_list[j],
gt_bboxes[j],
gt_labels[j],
feats=[lvl_feat[j][None] for lvl_feat in x])
sampling_results.append(sampling_result)
# bbox head forward and loss
bbox_results = self._bbox_forward_train(i, x, sampling_results,
gt_bboxes, gt_labels,
rcnn_train_cfg)
for name, value in bbox_results['loss_bbox'].items():
losses[f's{i}.{name}'] = (
value * lw if 'loss' in name else value)
# mask head forward and loss
if self.with_mask:
mask_results = self._mask_forward_train(
i, x, sampling_results, gt_masks, rcnn_train_cfg,
bbox_results['bbox_feats'])
for name, value in mask_results['loss_mask'].items():
losses[f's{i}.{name}'] = (
value * lw if 'loss' in name else value)
# refine bboxes
if i < self.num_stages - 1:
pos_is_gts = [res.pos_is_gt for res in sampling_results]
# bbox_targets is a tuple
roi_labels = bbox_results['bbox_targets'][0]
with torch.no_grad():
cls_score = bbox_results['cls_score']
if self.bbox_head[i].custom_activation:
cls_score = self.bbox_head[i].loss_cls.get_activation(
cls_score)
# Empty proposal.
if cls_score.numel() == 0:
break
roi_labels = torch.where(
roi_labels == self.bbox_head[i].num_classes,
cls_score[:, :-1].argmax(1), roi_labels)
proposal_list = self.bbox_head[i].refine_bboxes(
bbox_results['rois'], roi_labels,
bbox_results['bbox_pred'], pos_is_gts, img_metas)
return losses
def simple_test(self, x, proposal_list, img_metas, rescale=False, **kwargs):
"""Test without augmentation.
Args:
x (tuple[Tensor]): Features from upstream network. Each
has shape (batch_size, c, h, w).
proposal_list (list(Tensor)): Proposals from rpn head.
Each has shape (num_proposals, 5), last dimension
5 represent (x1, y1, x2, y2, score).
img_metas (list[dict]): Meta information of images.
rescale (bool): Whether to rescale the results to
the original image. Default: True.
Returns:
list[list[np.ndarray]] or list[tuple]: When no mask branch,
it is bbox results of each image and classes with type
`list[list[np.ndarray]]`. The outer list
corresponds to each image. The inner list
corresponds to each class. When the model has mask branch,
it contains bbox results and mask results.
The outer list corresponds to each image, and first element
of tuple is bbox results, second element is mask results.
"""
assert self.with_bbox, 'Bbox head must be implemented.'
num_imgs = len(proposal_list)
img_shapes = tuple(meta['img_shape'] for meta in img_metas)
ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)
scale_factors = tuple(meta['scale_factor'] for meta in img_metas)
# "ms" in variable names means multi-stage
ms_bbox_result = {}
ms_segm_result = {}
ms_scores = []
rcnn_test_cfg = self.test_cfg
rois = bbox2roi(proposal_list)
if rois.shape[0] == 0:
# There is no proposal in the whole batch
bbox_results = [[
np.zeros((0, 5), dtype=np.float32)
for _ in range(self.bbox_head[-1].num_classes)
]] * num_imgs
if self.with_mask:
mask_classes = self.mask_head[-1].num_classes
segm_results = [[[] for _ in range(mask_classes)]
for _ in range(num_imgs)]
results = list(zip(bbox_results, segm_results))
else:
results = bbox_results
return results
for i in range(self.num_stages):
bbox_results = self._bbox_forward(i, x, rois)
# split batch bbox prediction back to each image
cls_score = bbox_results['cls_score']
bbox_pred = bbox_results['bbox_pred']
num_proposals_per_img = tuple(
len(proposals) for proposals in proposal_list)
rois = rois.split(num_proposals_per_img, 0)
cls_score = cls_score.split(num_proposals_per_img, 0)
if isinstance(bbox_pred, torch.Tensor):
bbox_pred = bbox_pred.split(num_proposals_per_img, 0)
else:
bbox_pred = self.bbox_head[i].bbox_pred_split(
bbox_pred, num_proposals_per_img)
ms_scores.append(cls_score)
if i < self.num_stages - 1:
if self.bbox_head[i].custom_activation:
cls_score = [
self.bbox_head[i].loss_cls.get_activation(s)
for s in cls_score
]
refine_rois_list = []
for j in range(num_imgs):
if rois[j].shape[0] > 0:
bbox_label = cls_score[j][:, :-1].argmax(dim=1)
refined_rois = self.bbox_head[i].regress_by_class(
rois[j], bbox_label, bbox_pred[j], img_metas[j])
refine_rois_list.append(refined_rois)
rois = torch.cat(refine_rois_list)
# average scores of each image by stages
cls_score = [
sum([score[i] for score in ms_scores]) / float(len(ms_scores))
for i in range(num_imgs)
]
# apply bbox post-processing to each image individually
det_bboxes = []
det_labels = []
for i in range(num_imgs):
det_bbox, det_label = self.bbox_head[-1].get_bboxes(
rois[i],
cls_score[i],
bbox_pred[i],
img_shapes[i],
scale_factors[i],
rescale=rescale,
cfg=rcnn_test_cfg)
det_bboxes.append(det_bbox)
det_labels.append(det_label)
bbox_results = [
bbox2result(det_bboxes[i], det_labels[i],
self.bbox_head[-1].num_classes)
for i in range(num_imgs)
]
ms_bbox_result['ensemble'] = bbox_results
if self.with_mask:
if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):
mask_classes = self.mask_head[-1].num_classes
segm_results = [[[] for _ in range(mask_classes)]
for _ in range(num_imgs)]
else:
if rescale and not isinstance(scale_factors[0], float):
scale_factors = [
torch.from_numpy(scale_factor).to(det_bboxes[0].device)
for scale_factor in scale_factors
]
_bboxes = [
det_bboxes[i][:, :4] *
scale_factors[i] if rescale else det_bboxes[i][:, :4]
for i in range(len(det_bboxes))
]
mask_rois = bbox2roi(_bboxes)
num_mask_rois_per_img = tuple(
_bbox.size(0) for _bbox in _bboxes)
aug_masks = []
for i in range(self.num_stages):
mask_results = self._mask_forward(i, x, mask_rois)
mask_pred = mask_results['mask_pred']
# split batch mask prediction back to each image
mask_pred = mask_pred.split(num_mask_rois_per_img, 0)
aug_masks.append([
m.sigmoid().cpu().detach().numpy() for m in mask_pred
])
# apply mask post-processing to each image individually
segm_results = []
for i in range(num_imgs):
if det_bboxes[i].shape[0] == 0:
segm_results.append(
[[]
for _ in range(self.mask_head[-1].num_classes)])
else:
aug_mask = [mask[i] for mask in aug_masks]
merged_masks = merge_aug_masks(
aug_mask, [[img_metas[i]]] * self.num_stages,
rcnn_test_cfg)
segm_result = self.mask_head[-1].get_seg_masks(
merged_masks, _bboxes[i], det_labels[i],
rcnn_test_cfg, ori_shapes[i], scale_factors[i],
rescale)
segm_results.append(segm_result)
ms_segm_result['ensemble'] = segm_results
if self.with_mask:
results = list(
zip(ms_bbox_result['ensemble'], ms_segm_result['ensemble']))
else:
results = ms_bbox_result['ensemble']
return results
def aug_test(self, features, proposal_list, img_metas, rescale=False):
"""Test with augmentations.
If rescale is False, then returned bboxes and masks will fit the scale
of imgs[0].
"""
rcnn_test_cfg = self.test_cfg
aug_bboxes = []
aug_scores = []
for x, img_meta in zip(features, img_metas):
# only one image in the batch
img_shape = img_meta[0]['img_shape']
scale_factor = img_meta[0]['scale_factor']
flip = img_meta[0]['flip']
flip_direction = img_meta[0]['flip_direction']
proposals = bbox_mapping(proposal_list[0][:, :4], img_shape,
scale_factor, flip, flip_direction)
# "ms" in variable names means multi-stage
ms_scores = []
rois = bbox2roi([proposals])
if rois.shape[0] == 0:
# There is no proposal in the single image
aug_bboxes.append(rois.new_zeros(0, 4))
aug_scores.append(rois.new_zeros(0, 1))
continue
for i in range(self.num_stages):
bbox_results = self._bbox_forward(i, x, rois)
ms_scores.append(bbox_results['cls_score'])
if i < self.num_stages - 1:
cls_score = bbox_results['cls_score']
if self.bbox_head[i].custom_activation:
cls_score = self.bbox_head[i].loss_cls.get_activation(
cls_score)
bbox_label = cls_score[:, :-1].argmax(dim=1)
rois = self.bbox_head[i].regress_by_class(
rois, bbox_label, bbox_results['bbox_pred'],
img_meta[0])
cls_score = sum(ms_scores) / float(len(ms_scores))
bboxes, scores = self.bbox_head[-1].get_bboxes(
rois,
cls_score,
bbox_results['bbox_pred'],
img_shape,
scale_factor,
rescale=False,
cfg=None)
aug_bboxes.append(bboxes)
aug_scores.append(scores)
# after merging, bboxes will be rescaled to the original image size
merged_bboxes, merged_scores = merge_aug_bboxes(
aug_bboxes, aug_scores, img_metas, rcnn_test_cfg)
det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores,
rcnn_test_cfg.score_thr,
rcnn_test_cfg.nms,
rcnn_test_cfg.max_per_img)
bbox_result = bbox2result(det_bboxes, det_labels,
self.bbox_head[-1].num_classes)
if self.with_mask:
if det_bboxes.shape[0] == 0:
segm_result = [[]
for _ in range(self.mask_head[-1].num_classes)]
else:
aug_masks = []
aug_img_metas = []
for x, img_meta in zip(features, img_metas):
img_shape = img_meta[0]['img_shape']
scale_factor = img_meta[0]['scale_factor']
flip = img_meta[0]['flip']
flip_direction = img_meta[0]['flip_direction']
_bboxes = bbox_mapping(det_bboxes[:, :4], img_shape,
scale_factor, flip, flip_direction)
mask_rois = bbox2roi([_bboxes])
for i in range(self.num_stages):
mask_results = self._mask_forward(i, x, mask_rois)
aug_masks.append(
mask_results['mask_pred'].sigmoid().cpu().numpy())
aug_img_metas.append(img_meta)
merged_masks = merge_aug_masks(aug_masks, aug_img_metas,
self.test_cfg)
ori_shape = img_metas[0][0]['ori_shape']
dummy_scale_factor = np.ones(4)
segm_result = self.mask_head[-1].get_seg_masks(
merged_masks,
det_bboxes,
det_labels,
rcnn_test_cfg,
ori_shape,
scale_factor=dummy_scale_factor,
rescale=False)
return [(bbox_result, segm_result)]
else:
return [bbox_result]
def onnx_export(self, x, proposals, img_metas):
assert self.with_bbox, 'Bbox head must be implemented.'
assert proposals.shape[0] == 1, 'Only support one input image ' \
'while in exporting to ONNX'
# remove the scores
rois = proposals[..., :-1]
batch_size = rois.shape[0]
num_proposals_per_img = rois.shape[1]
# Eliminate the batch dimension
rois = rois.view(-1, 4)
# add dummy batch index
rois = torch.cat([rois.new_zeros(rois.shape[0], 1), rois], dim=-1)
max_shape = img_metas[0]['img_shape_for_onnx']
ms_scores = []
rcnn_test_cfg = self.test_cfg
for i in range(self.num_stages):
bbox_results = self._bbox_forward(i, x, rois)
cls_score = bbox_results['cls_score']
bbox_pred = bbox_results['bbox_pred']
# Recover the batch dimension
rois = rois.reshape(batch_size, num_proposals_per_img,
rois.size(-1))
cls_score = cls_score.reshape(batch_size, num_proposals_per_img,
cls_score.size(-1))
bbox_pred = bbox_pred.reshape(batch_size, num_proposals_per_img, 4)
ms_scores.append(cls_score)
if i < self.num_stages - 1:
assert self.bbox_head[i].reg_class_agnostic
new_rois = self.bbox_head[i].bbox_coder.decode(
rois[..., 1:], bbox_pred, max_shape=max_shape)
rois = new_rois.reshape(-1, new_rois.shape[-1])
# add dummy batch index
rois = torch.cat([rois.new_zeros(rois.shape[0], 1), rois],
dim=-1)
cls_score = sum(ms_scores) / float(len(ms_scores))
bbox_pred = bbox_pred.reshape(batch_size, num_proposals_per_img, 4)
rois = rois.reshape(batch_size, num_proposals_per_img, -1)
det_bboxes, det_labels = self.bbox_head[-1].onnx_export(
rois, cls_score, bbox_pred, max_shape, cfg=rcnn_test_cfg)
if not self.with_mask:
return det_bboxes, det_labels
else:
batch_index = torch.arange(
det_bboxes.size(0),
device=det_bboxes.device).float().view(-1, 1, 1).expand(
det_bboxes.size(0), det_bboxes.size(1), 1)
rois = det_bboxes[..., :4]
mask_rois = torch.cat([batch_index, rois], dim=-1)
mask_rois = mask_rois.view(-1, 5)
aug_masks = []
for i in range(self.num_stages):
mask_results = self._mask_forward(i, x, mask_rois)
mask_pred = mask_results['mask_pred']
aug_masks.append(mask_pred)
max_shape = img_metas[0]['img_shape_for_onnx']
# calculate the mean of masks from several stage
mask_pred = sum(aug_masks) / len(aug_masks)
segm_results = self.mask_head[-1].onnx_export(
mask_pred, rois.reshape(-1, 4), det_labels.reshape(-1),
self.test_cfg, max_shape)
segm_results = segm_results.reshape(batch_size,
det_bboxes.shape[1],
max_shape[0], max_shape[1])
return det_bboxes, det_labels, segm_results
|
py | b4085ebae4161a84c405dcfb28cde3758e172409 | # This file is part of Androguard.
#
# Copyright (C) 2012/2013/2014, Anthony Desnos <desnos at t0t0.fr>
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from androguard.core import bytecode
from androguard.core.androconf import CONF, debug, warning, is_android_raw
from androguard.util import read
import sys
import re
import struct
from struct import pack, unpack, calcsize
DEX_FILE_MAGIC_35 = 'dex\n035\x00'
DEX_FILE_MAGIC_36 = 'dex\n036\x00'
ODEX_FILE_MAGIC_35 = 'dey\n035\x00'
ODEX_FILE_MAGIC_36 = 'dey\n036\x00'
TYPE_MAP_ITEM = {
0x0: "TYPE_HEADER_ITEM",
0x1: "TYPE_STRING_ID_ITEM",
0x2: "TYPE_TYPE_ID_ITEM",
0x3: "TYPE_PROTO_ID_ITEM",
0x4: "TYPE_FIELD_ID_ITEM",
0x5: "TYPE_METHOD_ID_ITEM",
0x6: "TYPE_CLASS_DEF_ITEM",
0x1000: "TYPE_MAP_LIST",
0x1001: "TYPE_TYPE_LIST",
0x1002: "TYPE_ANNOTATION_SET_REF_LIST",
0x1003: "TYPE_ANNOTATION_SET_ITEM",
0x2000: "TYPE_CLASS_DATA_ITEM",
0x2001: "TYPE_CODE_ITEM",
0x2002: "TYPE_STRING_DATA_ITEM",
0x2003: "TYPE_DEBUG_INFO_ITEM",
0x2004: "TYPE_ANNOTATION_ITEM",
0x2005: "TYPE_ENCODED_ARRAY_ITEM",
0x2006: "TYPE_ANNOTATIONS_DIRECTORY_ITEM",
}
ACCESS_FLAGS = [
(0x1, 'public'),
(0x2, 'private'),
(0x4, 'protected'),
(0x8, 'static'),
(0x10, 'final'),
(0x20, 'synchronized'),
(0x40, 'bridge'),
(0x80, 'varargs'),
(0x100, 'native'),
(0x200, 'interface'),
(0x400, 'abstract'),
(0x800, 'strictfp'),
(0x1000, 'synthetic'),
(0x4000, 'enum'),
(0x8000, 'unused'),
(0x10000, 'constructor'),
(0x20000, 'synchronized'),
]
TYPE_DESCRIPTOR = {
'V': 'void',
'Z': 'boolean',
'B': 'byte',
'S': 'short',
'C': 'char',
'I': 'int',
'J': 'long',
'F': 'float',
'D': 'double',
}
def get_access_flags_string(value):
"""
Transform an access flags to the corresponding string
:param value: the value of the access flags
:type value: int
:rtype: string
"""
buff = ""
for i in ACCESS_FLAGS:
if (i[0] & value) == i[0]:
buff += i[1] + " "
if buff != "":
return buff[:-1]
return buff
def get_type(atype, size=None):
"""
Retrieve the type of a descriptor (e.g : I)
"""
if atype.startswith('java.lang'):
atype = atype.replace('java.lang.', '')
res = TYPE_DESCRIPTOR.get(atype.lstrip('java.lang'))
if res is None:
if atype[0] == 'L':
res = atype[1:-1].replace('/', '.')
elif atype[0] == '[':
if size is None:
res = '%s[]' % get_type(atype[1:])
else:
res = '%s[%s]' % (get_type(atype[1:]), size)
else:
res = atype
return res
MATH_DVM_OPCODES = { "add." : '+',
"div." : '/',
"mul." : '*',
"or." : '|',
"sub." : '-',
"and." : '&',
"xor." : '^',
"shl." : "<<",
"shr." : ">>",
}
FIELD_READ_DVM_OPCODES = [ ".get" ]
FIELD_WRITE_DVM_OPCODES = [ ".put" ]
BREAK_DVM_OPCODES = [ "invoke.", "move.", ".put", "if." ]
BRANCH_DVM_OPCODES = [ "throw", "throw.", "if.", "goto", "goto.", "return", "return.", "packed-switch$", "sparse-switch$" ]
def clean_name_instruction( instruction ):
op_value = instruction.get_op_value()
# goto range
if op_value >= 0x28 and op_value <= 0x2a:
return "goto"
return instruction.get_name()
def static_operand_instruction( instruction ):
buff = ""
if isinstance(instruction, Instruction):
# get instructions without registers
for val in instruction.get_literals():
buff += "%s" % val
op_value = instruction.get_op_value()
if op_value == 0x1a or op_value == 0x1b:
buff += instruction.get_string()
return buff
html_escape_table = {
"&": "&",
'"': """,
"'": "'",
">": ">",
"<": "<",
}
def readuleb128(buff):
result = ord( buff.read(1) )
if result > 0x7f:
cur = ord( buff.read(1) )
result = (result & 0x7f) | ((cur & 0x7f) << 7)
if cur > 0x7f:
cur = ord( buff.read(1) )
result |= (cur & 0x7f) << 14
if cur > 0x7f:
cur = ord( buff.read(1) )
result |= (cur & 0x7f) << 21
if cur > 0x7f:
cur = ord( buff.read(1) )
if cur > 0x0f:
warning("possible error while decoding number")
result |= cur << 28
return result
def readusleb128(buff):
result = ord( buff.read(1) )
if result > 0x7f:
cur = ord( buff.read(1) )
result = (result & 0x7f) | ((cur & 0x7f) << 7)
if cur > 0x7f:
cur = ord( buff.read(1) )
result |= (cur & 0x7f) << 14
if cur > 0x7f:
cur = ord( buff.read(1) )
result |= (cur & 0x7f) << 21
if cur > 0x7f:
cur = ord( buff.read(1) )
result |= cur << 28
return result
def readuleb128p1(buff):
return readuleb128( buff ) - 1
def readsleb128(buff):
result = unpack( '=b', buff.read(1) )[0]
if result <= 0x7f:
result = (result << 25)
if result > 0x7fffffff:
result = (0x7fffffff & result) - 0x80000000
result = result >> 25
else:
cur = unpack( '=b', buff.read(1) )[0]
result = (result & 0x7f) | ((cur & 0x7f) << 7)
if cur <= 0x7f:
result = (result << 18) >> 18
else:
cur = unpack( '=b', buff.read(1) )[0]
result |= (cur & 0x7f) << 14
if cur <= 0x7f:
result = (result << 11) >> 11
else:
cur = unpack( '=b', buff.read(1) )[0]
result |= (cur & 0x7f) << 21
if cur <= 0x7f:
result = (result << 4) >> 4
else:
cur = unpack( '=b', buff.read(1) )[0]
result |= cur << 28
return result
def get_sbyte(buff):
return unpack( '=b', buff.read(1) )[0]
def readsleb128_2(buff):
result = get_sbyte(buff)
if result <= 0x7f:
result = (result << 25) >> 25
else:
cur = get_sbyte(buff)
result = (result & 0x7f) | ((cur & 0x7f) << 7)
if cur <= 0x7f:
result = (result << 18) >> 18
else:
cur = get_sbyte(buff)
result |= (cur & 0x7f) << 14
if cur <= 0x7f:
result = (result << 11) >> 11
else:
cur = get_sbyte(buff)
result |= (cur & 0x7f) << 21
if cur <= 0x7f:
result = (result << 4) >> 4
else:
cur = get_sbyte(buff)
result |= cur << 28
return result
def writeuleb128(value):
remaining = value >> 7
buff = ""
while remaining > 0:
buff += pack( "=B", ((value & 0x7f) | 0x80) )
value = remaining
remaining >>= 7
buff += pack( "=B", value & 0x7f )
return buff
def writesleb128(value):
remaining = value >> 7
hasMore = True
end = 0
buff = ""
if (value & (-sys.maxint - 1)) == 0:
end = 0
else:
end = -1
while hasMore:
hasMore = (remaining != end) or ((remaining & 1) != ((value >> 6) & 1))
tmp = 0
if hasMore:
tmp = 0x80
buff += pack( "=B", (value & 0x7f) | (tmp) )
value = remaining
remaining >>= 7
return buff
def determineNext(i, end, m):
op_value = i.get_op_value()
# throw + return*
if (op_value == 0x27) or (0x0e <= op_value <= 0x11):
return [ -1 ]
# goto
elif 0x28 <= op_value <= 0x2a:
off = i.get_ref_off() * 2
return [ off + end ]
# if
elif 0x32 <= op_value <= 0x3d:
off = i.get_ref_off() * 2
return [ end + i.get_length(), off + (end) ]
# sparse/packed
elif op_value in (0x2b, 0x2c):
x = []
x.append( end + i.get_length() )
code = m.get_code().get_bc()
off = i.get_ref_off() * 2
data = code.get_ins_off( off + end )
if data != None:
for target in data.get_targets():
x.append( target*2 + end )
return x
return []
def determineException(vm, m):
# no exceptions !
if m.get_code().get_tries_size() <= 0:
return []
h_off = {}
handler_catch_list = m.get_code().get_handlers()
for try_item in m.get_code().get_tries():
offset_handler = try_item.get_handler_off() + handler_catch_list.get_off()
if offset_handler in h_off:
h_off[ offset_handler ].append( [ try_item ] )
else:
h_off[ offset_handler ] = []
h_off[ offset_handler ].append( [ try_item ] )
#print m.get_name(), "\t HANDLER_CATCH_LIST SIZE", handler_catch_list.size, handler_catch_list.get_offset()
for handler_catch in handler_catch_list.get_list():
if handler_catch.get_off() not in h_off:
continue
for i in h_off[ handler_catch.get_off() ]:
i.append( handler_catch )
exceptions = []
#print m.get_name(), h_off
for i in h_off:
for value in h_off[ i ]:
try_value = value[0]
z = [ try_value.get_start_addr() * 2, (try_value.get_start_addr() * 2) + (try_value.get_insn_count() * 2) - 1 ]
handler_catch = value[1]
if handler_catch.get_size() <= 0:
z.append( [ "Ljava/lang/Throwable;", handler_catch.get_catch_all_addr() * 2 ] )
for handler in handler_catch.get_handlers():
z.append( [ vm.get_cm_type( handler.get_type_idx() ), handler.get_addr() * 2 ] )
exceptions.append( z )
#print m.get_name(), exceptions
return exceptions
class HeaderItem(object):
"""
This class can parse an header_item of a dex file
:param buff: a string which represents a Buff object of the header_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, size, buff, cm):
self.__CM = cm
self.offset = buff.get_idx()
self.magic = unpack("=Q", buff.read(8))[0]
self.checksum = unpack("=i", buff.read(4))[0]
self.signature = unpack("=20s", buff.read(20))[0]
self.file_size = unpack("=I", buff.read(4))[0]
self.header_size = unpack("=I", buff.read(4))[0]
self.endian_tag = unpack("=I", buff.read(4))[0]
self.link_size = unpack("=I", buff.read(4))[0]
self.link_off = unpack("=I", buff.read(4))[0]
self.map_off = unpack("=I", buff.read(4))[0]
self.string_ids_size = unpack("=I", buff.read(4))[0]
self.string_ids_off = unpack("=I", buff.read(4))[0]
self.type_ids_size = unpack("=I", buff.read(4))[0]
self.type_ids_off = unpack("=I", buff.read(4))[0]
self.proto_ids_size = unpack("=I", buff.read(4))[0]
self.proto_ids_off = unpack("=I", buff.read(4))[0]
self.field_ids_size = unpack("=I", buff.read(4))[0]
self.field_ids_off = unpack("=I", buff.read(4))[0]
self.method_ids_size = unpack("=I", buff.read(4))[0]
self.method_ids_off = unpack("=I", buff.read(4))[0]
self.class_defs_size = unpack("=I", buff.read(4))[0]
self.class_defs_off = unpack("=I", buff.read(4))[0]
self.data_size = unpack("=I", buff.read(4))[0]
self.data_off = unpack("=I", buff.read(4))[0]
self.map_off_obj = None
self.string_off_obj = None
self.type_off_obj = None
self.proto_off_obj = None
self.field_off_obj = None
self.method_off_obj = None
self.class_off_obj = None
self.data_off_obj = None
def reload(self):
pass
def get_obj(self):
if self.map_off_obj == None:
self.map_off_obj = self.__CM.get_item_by_offset( self.map_off )
if self.string_off_obj == None:
self.string_off_obj = self.__CM.get_item_by_offset( self.string_ids_off )
if self.type_off_obj == None:
self.type_off_obj = self.__CM.get_item_by_offset( self.type_ids_off )
if self.proto_off_obj == None:
self.proto_off_obj = self.__CM.get_item_by_offset( self.proto_ids_off )
if self.field_off_obj == None:
self.field_off_obj = self.__CM.get_item_by_offset( self.field_ids_off )
if self.method_off_obj == None:
self.method_off_obj = self.__CM.get_item_by_offset( self.method_ids_off )
if self.class_off_obj == None:
self.class_off_obj = self.__CM.get_item_by_offset( self.class_defs_off )
if self.data_off_obj == None:
self.data_off_obj = self.__CM.get_item_by_offset( self.data_off )
self.map_off = self.map_off_obj.get_off()
self.string_ids_size = len(self.string_off_obj)
self.string_ids_off = self.string_off_obj[0].get_off()
self.type_ids_size = len(self.type_off_obj.type)
self.type_ids_off = self.type_off_obj.get_off()
self.proto_ids_size = len(self.proto_off_obj.proto)
self.proto_ids_off = self.proto_off_obj.get_off()
self.field_ids_size = len(self.field_off_obj.elem)
self.field_ids_off = self.field_off_obj.get_off()
self.method_ids_size = len(self.method_off_obj.methods)
self.method_ids_off = self.method_off_obj.get_off()
self.class_defs_size = len(self.class_off_obj.class_def)
self.class_defs_off = self.class_off_obj.get_off()
#self.data_size = len(self.data_off_obj)
self.data_off = self.data_off_obj[0].get_off()
return pack("=Q", self.magic) + \
pack("=I", self.checksum) + \
pack("=20s", self.signature) + \
pack("=I", self.file_size) + \
pack("=I", self.header_size) + \
pack("=I", self.endian_tag) + \
pack("=I", self.link_size) + \
pack("=I", self.link_off) + \
pack("=I", self.map_off) + \
pack("=I", self.string_ids_size) + \
pack("=I", self.string_ids_off) + \
pack("=I", self.type_ids_size) + \
pack("=I", self.type_ids_off) + \
pack("=I", self.proto_ids_size) + \
pack("=I", self.proto_ids_off) + \
pack("=I", self.field_ids_size) + \
pack("=I", self.field_ids_off) + \
pack("=I", self.method_ids_size) + \
pack("=I", self.method_ids_off) + \
pack("=I", self.class_defs_size) + \
pack("=I", self.class_defs_off) + \
pack("=I", self.data_size) + \
pack("=I", self.data_off)
def get_raw(self):
return self.get_obj()
def get_length(self):
return len(self.get_raw())
def show(self):
bytecode._PrintSubBanner("Header Item")
bytecode._PrintDefault("magic=%s, checksum=%s, signature=%s\n" % (self.magic, self.checksum, self.signature))
bytecode._PrintDefault("file_size=%x, header_size=%x, endian_tag=%x\n" % (self.file_size, self.header_size, self.endian_tag))
bytecode._PrintDefault("link_size=%x, link_off=%x\n" % (self.link_size, self.link_off))
bytecode._PrintDefault("map_off=%x\n" % (self.map_off))
bytecode._PrintDefault("string_ids_size=%x, string_ids_off=%x\n" % (self.string_ids_size, self.string_ids_off))
bytecode._PrintDefault("type_ids_size=%x, type_ids_off=%x\n" % (self.type_ids_size, self.type_ids_off))
bytecode._PrintDefault("proto_ids_size=%x, proto_ids_off=%x\n" % (self.proto_ids_size, self.proto_ids_off))
bytecode._PrintDefault("field_ids_size=%x, field_ids_off=%x\n" % (self.field_ids_size, self.field_ids_off))
bytecode._PrintDefault("method_ids_size=%x, method_ids_off=%x\n" % (self.method_ids_size, self.method_ids_off))
bytecode._PrintDefault("class_defs_size=%x, class_defs_off=%x\n" % (self.class_defs_size, self.class_defs_off))
bytecode._PrintDefault("data_size=%x, data_off=%x\n" % (self.data_size, self.data_off))
def set_off(self, off):
self.offset = off
def get_off(self):
return self.offset
class AnnotationOffItem(object):
"""
This class can parse an annotation_off_item of a dex file
:param buff: a string which represents a Buff object of the annotation_off_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm):
self.__CM = cm
self.annotation_off = unpack("=I", buff.read( 4 ) )[0]
def show(self):
bytecode._PrintSubBanner("Annotation Off Item")
bytecode._PrintDefault("annotation_off=0x%x\n" % self.annotation_off)
def get_obj(self):
if self.annotation_off != 0:
self.annotation_off = self.__CM.get_obj_by_offset( self.annotation_off ).get_off()
return pack("=I", self.annotation_off)
def get_raw(self):
return self.get_obj()
def get_length(self):
return len(self.get_obj())
class AnnotationSetItem(object):
"""
This class can parse an annotation_set_item of a dex file
:param buff: a string which represents a Buff object of the annotation_set_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm):
self.__CM = cm
self.offset = buff.get_idx()
self.annotation_off_item = []
self.size = unpack("=I", buff.read( 4 ) )[0]
for i in xrange(0, self.size):
self.annotation_off_item.append( AnnotationOffItem(buff, cm) )
def get_annotation_off_item(self):
"""
Return the offset from the start of the file to an annotation
:rtype: a list of :class:`AnnotationOffItem`
"""
return self.annotation_off_item
def set_off(self, off):
self.offset = off
def get_off(self):
return self.offset
def reload(self):
pass
def show(self):
bytecode._PrintSubBanner("Annotation Set Item")
for i in self.annotation_off_item:
i.show()
def get_obj(self):
return pack("=I", self.size)
def get_raw(self):
return self.get_obj() + ''.join(i.get_raw() for i in self.annotation_off_item)
def get_length(self):
length = len(self.get_obj())
for i in self.annotation_off_item:
length += i.get_length()
return length
class AnnotationSetRefItem(object):
"""
This class can parse an annotation_set_ref_item of a dex file
:param buff: a string which represents a Buff object of the annotation_set_ref_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm):
self.__CM = cm
self.annotations_off = unpack("=I", buff.read( 4 ) )[0]
def get_annotations_off(self):
"""
Return the offset from the start of the file to the referenced annotation set or
0 if there are no annotations for this element.
:rtype: int
"""
return self.annotations_off
def show(self):
bytecode._PrintSubBanner("Annotation Set Ref Item")
bytecode._PrintDefault("annotation_off=0x%x\n" % self.annotation_off)
def get_obj(self):
if self.annotations_off != 0:
self.annotations_off = self.__CM.get_obj_by_offset( self.annotations_off ).get_off()
return pack("=I", self.annotations_off)
def get_raw(self):
return self.get_obj()
class AnnotationSetRefList(object):
"""
This class can parse an annotation_set_ref_list_item of a dex file
:param buff: a string which represents a Buff object of the annotation_set_ref_list_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm):
self.offset = buff.get_idx()
self.__CM = cm
self.list = []
self.size = unpack("=I", buff.read( 4 ) )[0]
for i in xrange(0, self.size):
self.list.append( AnnotationSetRefItem(buff, cm) )
def get_list(self):
"""
Return elements of the list
:rtype: :class:`AnnotationSetRefItem`
"""
return self.list
def get_off(self):
return self.offset
def set_off(self, off):
self.offset = off
def reload(self):
pass
def show(self):
bytecode._PrintSubBanner("Annotation Set Ref List Item")
for i in self.list:
i.show()
def get_obj(self):
return [ i for i in self.list ]
def get_raw(self):
return pack("=I", self.size) + ''.join(i.get_raw() for i in self.list)
def get_length(self):
return len(self.get_raw())
class FieldAnnotation(object):
"""
This class can parse a field_annotation of a dex file
:param buff: a string which represents a Buff object of the field_annotation
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm):
self.offset = buff.get_idx()
self.__CM = cm
self.field_idx = unpack("=I", buff.read( 4 ) )[0]
self.annotations_off = unpack("=I", buff.read( 4 ) )[0]
def get_field_idx(self):
"""
Return the index into the field_ids list for the identity of the field being annotated
:rtype: int
"""
return self.get_field_idx
def get_annotations_off(self):
"""
Return the offset from the start of the file to the list of annotations for the field
:rtype: int
"""
return self.annotations_off
def set_off(self, off):
self.offset = off
def get_off(self):
return self.offset
def show(self):
bytecode._PrintSubBanner("Field Annotation")
bytecode._PrintDefault( "field_idx=0x%x annotations_off=0x%x\n" % (self.field_idx, self.annotations_off) )
def get_obj(self):
if self.annotations_off != 0:
self.annotations_off = self.__CM.get_obj_by_offset( self.annotations_off ).get_off()
return pack("=I", self.field_idx) + pack("=I", self.annotations_off)
def get_raw(self):
return self.get_obj()
def get_length(self):
return len(self.get_raw())
class MethodAnnotation(object):
"""
This class can parse a method_annotation of a dex file
:param buff: a string which represents a Buff object of the method_annotation
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm):
self.offset = buff.get_idx()
self.__CM = cm
self.method_idx = unpack("=I", buff.read( 4 ) )[0]
self.annotations_off = unpack("=I", buff.read( 4 ) )[0]
def get_method_idx(self):
"""
Return the index into the method_ids list for the identity of the method being annotated
:rtype: int
"""
return self.get_method_idx
def get_annotations_off(self):
"""
Return the offset from the start of the file to the list of annotations for the method
:rtype: int
"""
return self.annotations_off
def set_off(self, off):
self.offset = off
def get_off(self):
return self.offset
def show(self):
bytecode._PrintSubBanner("Method Annotation")
bytecode._PrintDefault( "method_idx=0x%x annotations_off=0x%x\n" % (self.method_idx, self.annotations_off) )
def get_obj(self):
if self.annotations_off != 0:
self.annotations_off = self.__CM.get_obj_by_offset( self.annotations_off ).get_off()
return pack("=I", self.method_idx) + pack("=I", self.annotations_off)
def get_raw(self):
return self.get_obj()
def get_length(self):
return len(self.get_raw())
class ParameterAnnotation(object):
"""
This class can parse a parameter_annotation of a dex file
:param buff: a string which represents a Buff object of the parameter_annotation
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm):
self.offset = buff.get_idx()
self.__CM = cm
self.method_idx = unpack("=I", buff.read( 4 ) )[0]
self.annotations_off = unpack("=I", buff.read( 4 ) )[0]
def get_method_idx(self):
"""
Return the index into the method_ids list for the identity of the method whose parameters are being annotated
:rtype: int
"""
return self.get_method_idx
def get_annotations_off(self):
"""
Return the offset from the start of the file to the list of annotations for the method parameters
:rtype: int
"""
return self.annotations_off
def set_off(self, off):
self.offset = off
def get_off(self):
return self.offset
def show(self):
bytecode._PrintSubBanner("Parameter Annotation")
bytecode._PrintDefault( "method_idx=0x%x annotations_off=0x%x\n" % (self.method_idx, self.annotations_off) )
def get_obj(self):
if self.annotations_off != 0:
self.annotations_off = self.__CM.get_obj_by_offset( self.annotations_off ).get_off()
return pack("=I", self.method_idx) + pack("=I", self.annotations_off)
def get_raw(self):
return self.get_obj()
def get_length(self):
return len(self.get_raw())
class AnnotationsDirectoryItem(object):
"""
This class can parse an annotations_directory_item of a dex file
:param buff: a string which represents a Buff object of the annotations_directory_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm):
self.__CM = cm
self.offset = buff.get_idx()
self.class_annotations_off = unpack("=I", buff.read(4))[0]
self.annotated_fields_size = unpack("=I", buff.read(4))[0]
self.annotated_methods_size = unpack("=I", buff.read(4))[0]
self.annotated_parameters_size = unpack("=I", buff.read(4))[0]
self.field_annotations = []
for i in xrange(0, self.annotated_fields_size):
self.field_annotations.append( FieldAnnotation( buff, cm ) )
self.method_annotations = []
for i in xrange(0, self.annotated_methods_size):
self.method_annotations.append( MethodAnnotation( buff, cm ) )
self.parameter_annotations = []
for i in xrange(0, self.annotated_parameters_size):
self.parameter_annotations.append( ParameterAnnotation( buff, cm ) )
def get_class_annotations_off(self):
"""
Return the offset from the start of the file to the annotations made directly on the class,
or 0 if the class has no direct annotations
:rtype: int
"""
return self.class_annotations_off
def get_annotated_fields_size(self):
"""
Return the count of fields annotated by this item
:rtype: int
"""
return self.annotated_fields_size
def get_annotated_methods_size(self):
"""
Return the count of methods annotated by this item
:rtype: int
"""
return self.annotated_methods_size
def get_annotated_parameters_size(self):
"""
Return the count of method parameter lists annotated by this item
:rtype: int
"""
return self.annotated_parameters_size
def get_field_annotations(self):
"""
Return the list of associated field annotations
:rtype: a list of :class:`FieldAnnotation`
"""
return self.field_annotations
def get_method_annotations(self):
"""
Return the list of associated method annotations
:rtype: a list of :class:`MethodAnnotation`
"""
return self.method_annotations
def get_parameter_annotations(self):
"""
Return the list of associated method parameter annotations
:rtype: a list of :class:`ParameterAnnotation`
"""
return self.parameter_annotations
def set_off(self, off):
self.offset = off
def get_off(self):
return self.offset
def reload(self):
pass
def show(self):
bytecode._PrintSubBanner("Annotations Directory Item")
bytecode._PrintDefault("class_annotations_off=0x%x annotated_fields_size=%d annotated_methods_size=%d annotated_parameters_size=%d\n" %
( self.class_annotations_off,
self.annotated_fields_size,
self.annotated_methods_size,
self.annotated_parameters_size))
for i in self.field_annotations:
i.show()
for i in self.method_annotations:
i.show()
for i in self.parameter_annotations:
i.show()
def get_obj(self):
if self.class_annotations_off != 0:
self.class_annotations_off = self.__CM.get_obj_by_offset( self.class_annotations_off ).get_off()
return pack("=I", self.class_annotations_off) + \
pack("=I", self.annotated_fields_size) + \
pack("=I", self.annotated_methods_size) + \
pack("=I", self.annotated_parameters_size)
def get_raw(self):
return self.get_obj() + \
''.join(i.get_raw() for i in self.field_annotations) + \
''.join(i.get_raw() for i in self.method_annotations) + \
''.join(i.get_raw() for i in self.parameter_annotations)
def get_length(self):
length = len( self.get_obj() )
for i in self.field_annotations:
length += i.get_length()
for i in self.method_annotations:
length += i.get_length()
for i in self.parameter_annotations:
length += i.get_length()
return length
class TypeItem(object):
"""
This class can parse a type_item of a dex file
:param buff: a string which represents a Buff object of the type_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm):
self.__CM = cm
self.type_idx = unpack("=H", buff.read(2))[0]
def get_type_idx(self):
"""
Return the index into the type_ids list
:rtype: int
"""
return self.type_idx
def get_string(self):
"""
Return the type string
:rtype: string
"""
return self.__CM.get_type( self.type_idx )
def show(self):
bytecode._PrintSubBanner("Type Item")
bytecode._PrintDefault("type_idx=%d\n" % self.type_idx)
def get_obj(self):
return pack("=H", self.type_idx)
def get_raw(self):
return self.get_obj()
def get_length(self):
return len(self.get_obj())
class TypeList(object):
"""
This class can parse a type_list of a dex file
:param buff: a string which represents a Buff object of the type_list
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm):
self.__CM = cm
self.offset = buff.get_idx()
self.pad = ""
if self.offset % 4 != 0:
self.pad = buff.read( self.offset % 4 )
self.len_pad = len(self.pad)
self.size = unpack("=I", buff.read( 4 ) )[0]
self.list = []
for i in xrange(0, self.size):
self.list.append( TypeItem( buff, cm ) )
def get_pad(self):
"""
Return the alignment string
:rtype: string
"""
return self.pad
def get_type_list_off(self):
"""
Return the offset of the item
:rtype: int
"""
return self.offset + self.len_pad
def get_string(self):
"""
Return the concatenation of all strings
:rtype: string
"""
return ' '.join(i.get_string() for i in self.list)
def get_size(self):
"""
Return the size of the list, in entries
:rtype: int
"""
return self.size
def get_list(self):
"""
Return the list of TypeItem
:rtype: a list of :class:`TypeItem` objects
"""
return self.list
def set_off(self, off):
self.offset = off
def get_off(self):
return self.offset + self.len_pad
def reload(self):
pass
def show(self):
bytecode._PrintSubBanner("Type List")
bytecode._PrintDefault("size=%d\n" % self.size)
for i in self.list:
i.show()
def get_obj(self):
return self.pad + pack("=I", self.size)
def get_raw(self):
return self.get_obj() + ''.join(i.get_raw() for i in self.list)
def get_length(self):
length = len(self.get_obj())
for i in self.list:
length += i.get_length()
return length
DBG_END_SEQUENCE = 0x00 # (none) terminates a debug info sequence for a code_item
DBG_ADVANCE_PC = 0x01 # uleb128 addr_diff addr_diff: amount to add to address register advances the address register without emitting a positions entry
DBG_ADVANCE_LINE = 0x02 # sleb128 line_diff line_diff: amount to change line register by advances the line register without emitting a positions entry
DBG_START_LOCAL = 0x03 # uleb128 register_num
# uleb128p1 name_idx
# uleb128p1 type_idx
# register_num: register that will contain local name_idx: string index of the name
# type_idx: type index of the type introduces a local variable at the current address. Either name_idx or type_idx may be NO_INDEX to indicate that that value is unknown.
DBG_START_LOCAL_EXTENDED = 0x04 # uleb128 register_num uleb128p1 name_idx uleb128p1 type_idx uleb128p1 sig_idx
# register_num: register that will contain local
# name_idx: string index of the name
# type_idx: type index of the type
# sig_idx: string index of the type signature
# introduces a local with a type signature at the current address. Any of name_idx, type_idx, or sig_idx may be NO_INDEX to indicate that that value is unknown. (
# If sig_idx is -1, though, the same data could be represented more efficiently using the opcode DBG_START_LOCAL.)
# Note: See the discussion under "dalvik.annotation.Signature" below for caveats about handling signatures.
DBG_END_LOCAL = 0x05 # uleb128 register_num
# register_num: register that contained local
# marks a currently-live local variable as out of scope at the current address
DBG_RESTART_LOCAL = 0x06 # uleb128 register_num
# register_num: register to restart re-introduces a local variable at the current address.
# The name and type are the same as the last local that was live in the specified register.
DBG_SET_PROLOGUE_END = 0x07 # (none) sets the prologue_end state machine register, indicating that the next position entry that is added should be considered the end of a
# method prologue (an appropriate place for a method breakpoint). The prologue_end register is cleared by any special (>= 0x0a) opcode.
DBG_SET_EPILOGUE_BEGIN = 0x08 # (none) sets the epilogue_begin state machine register, indicating that the next position entry that is added should be considered the beginning
# of a method epilogue (an appropriate place to suspend execution before method exit). The epilogue_begin register is cleared by any special (>= 0x0a) opcode.
DBG_SET_FILE = 0x09 # uleb128p1 name_idx
# name_idx: string index of source file name; NO_INDEX if unknown indicates that all subsequent line number entries make reference to this source file name,
# instead of the default name specified in code_item
DBG_Special_Opcodes_BEGIN = 0x0a # (none) advances the line and address registers, emits a position entry, and clears prologue_end and epilogue_begin. See below for description.
DBG_Special_Opcodes_END = 0xff
DBG_LINE_BASE = -4
DBG_LINE_RANGE = 15
class DBGBytecode(object):
def __init__(self, cm, op_value):
self.CM = cm
self.op_value = op_value
self.format = []
def get_op_value(self):
return self.op_value
def add(self, value, ttype):
self.format.append( (value, ttype) )
def get_value(self):
if self.get_op_value() == DBG_START_LOCAL:
return self.CM.get_string(self.format[1][0])
elif self.get_op_value() == DBG_START_LOCAL_EXTENDED:
return self.CM.get_string(self.format[1][0])
return None
def show(self):
bytecode._PrintSubBanner("DBGBytecode")
bytecode._PrintDefault("op_value=%x format=%s value=%s\n" % (self.op_value, str(self.format), self.get_value()))
def get_obj(self):
return []
def get_raw(self):
buff = self.op_value.get_value_buff()
for i in self.format:
if i[1] == "u":
buff += writeuleb128( i[0] )
elif i[1] == "s":
buff += writesleb128( i[0] )
return buff
class DebugInfoItem(object):
def __init__(self, buff, cm):
self.CM = cm
self.offset = buff.get_idx()
self.line_start = readuleb128( buff )
self.parameters_size = readuleb128( buff )
#print "line", self.line_start, "params", self.parameters_size
self.parameter_names = []
for i in xrange(0, self.parameters_size):
self.parameter_names.append( readuleb128p1( buff ) )
self.bytecodes = []
bcode = DBGBytecode( self.CM, unpack("=B", buff.read(1))[0] )
self.bytecodes.append( bcode )
while bcode.get_op_value() != DBG_END_SEQUENCE:
bcode_value = bcode.get_op_value()
if bcode_value == DBG_ADVANCE_PC:
bcode.add( readuleb128( buff ), "u" )
elif bcode_value == DBG_ADVANCE_LINE:
bcode.add( readsleb128( buff ), "s" )
elif bcode_value == DBG_START_LOCAL:
bcode.add( readusleb128( buff ), "u" )
bcode.add( readuleb128p1( buff ), "u1" )
bcode.add( readuleb128p1( buff ), "u1" )
elif bcode_value == DBG_START_LOCAL_EXTENDED:
bcode.add( readusleb128( buff ), "u" )
bcode.add( readuleb128p1( buff ), "u1" )
bcode.add( readuleb128p1( buff ), "u1" )
bcode.add( readuleb128p1( buff ), "u1" )
elif bcode_value == DBG_END_LOCAL:
bcode.add( readusleb128( buff ), "u" )
elif bcode_value == DBG_RESTART_LOCAL:
bcode.add( readusleb128( buff ), "u" )
elif bcode_value == DBG_SET_PROLOGUE_END:
pass
elif bcode_value == DBG_SET_EPILOGUE_BEGIN:
pass
elif bcode_value == DBG_SET_FILE:
bcode.add( readuleb128p1( buff ), "u1" )
else : #bcode_value >= DBG_Special_Opcodes_BEGIN and bcode_value <= DBG_Special_Opcodes_END:
pass
bcode = DBGBytecode( self.CM, unpack("=B", buff.read(1))[0] )
self.bytecodes.append( bcode )
def reload(self):
pass
def get_parameters_size(self):
return self.parameters_size
def get_line_start(self):
return self.line_start
def get_parameter_names(self):
return self.parameter_names
def get_translated_parameter_names(self):
l = []
for i in self.parameter_names:
if i == -1:
l.append( None )
else:
l.append( self.CM.get_string( i ) )
return l
def get_bytecodes(self):
return self.bytecodes
def show(self):
bytecode._PrintSubBanner("Debug Info Item")
bytecode._PrintDefault("line_start=%d parameters_size=%d\n" % (self.line_start, self.parameters_size))
nb = 0
for i in self.parameter_names:
bytecode._PrintDefault("parameter_names[%d]=%s\n" % (nb, self.CM.get_string( i )))
nb += 1
for i in self.bytecodes:
i.show()
def get_raw(self):
return [ bytecode.Buff( self.__offset, writeuleb128( self.line_start ) + \
writeuleb128( self.parameters_size ) + \
''.join(writeuleb128(i) for i in self.parameter_names) + \
''.join(i.get_raw() for i in self.bytecodes) ) ]
def get_off(self):
return self.offset
VALUE_BYTE = 0x00 # (none; must be 0) ubyte[1] signed one-byte integer value
VALUE_SHORT = 0x02 # size - 1 (0..1) ubyte[size] signed two-byte integer value, sign-extended
VALUE_CHAR = 0x03 # size - 1 (0..1) ubyte[size] unsigned two-byte integer value, zero-extended
VALUE_INT = 0x04 # size - 1 (0..3) ubyte[size] signed four-byte integer value, sign-extended
VALUE_LONG = 0x06 # size - 1 (0..7) ubyte[size] signed eight-byte integer value, sign-extended
VALUE_FLOAT = 0x10 # size - 1 (0..3) ubyte[size] four-byte bit pattern, zero-extended to the right, and interpreted as an IEEE754 32-bit floating point value
VALUE_DOUBLE = 0x11 # size - 1 (0..7) ubyte[size] eight-byte bit pattern, zero-extended to the right, and interpreted as an IEEE754 64-bit floating point value
VALUE_STRING = 0x17 # size - 1 (0..3) ubyte[size] unsigned (zero-extended) four-byte integer value, interpreted as an index into the string_ids section and representing a string value
VALUE_TYPE = 0x18 # size - 1 (0..3) ubyte[size] unsigned (zero-extended) four-byte integer value, interpreted as an index into the type_ids section and representing a reflective type/class value
VALUE_FIELD = 0x19 # size - 1 (0..3) ubyte[size] unsigned (zero-extended) four-byte integer value, interpreted as an index into the field_ids section and representing a reflective field value
VALUE_METHOD = 0x1a # size - 1 (0..3) ubyte[size] unsigned (zero-extended) four-byte integer value, interpreted as an index into the method_ids section and representing a reflective method value
VALUE_ENUM = 0x1b # size - 1 (0..3) ubyte[size] unsigned (zero-extended) four-byte integer value, interpreted as an index into the field_ids section and representing the value of an enumerated type constant
VALUE_ARRAY = 0x1c # (none; must be 0) encoded_array an array of values, in the format specified by "encoded_array Format" below. The size of the value is implicit in the encoding.
VALUE_ANNOTATION = 0x1d # (none; must be 0) encoded_annotation a sub-annotation, in the format specified by "encoded_annotation Format" below. The size of the value is implicit in the encoding.
VALUE_NULL = 0x1e # (none; must be 0) (none) null reference value
VALUE_BOOLEAN = 0x1f # boolean (0..1) (none) one-bit value; 0 for false and 1 for true. The bit is represented in the value_arg.
class DebugInfoItemEmpty(object):
def __init__(self, buff, cm):
self.__CM = cm
self.offset = buff.get_idx()
self.__buff = buff
self.__raw = ""
def set_off(self, off):
self.offset = off
def get_off(self):
return self.offset
def reload(self):
offset = self.offset
n = self.__CM.get_next_offset_item( offset )
s_idx = self.__buff.get_idx()
self.__buff.set_idx( offset )
self.__raw = self.__buff.read( n - offset )
self.__buff.set_idx( s_idx )
def show(self):
pass
def get_obj(self):
return []
def get_raw(self):
return self.__raw
def get_length(self):
return len(self.__raw)
class EncodedArray(object):
"""
This class can parse an encoded_array of a dex file
:param buff: a string which represents a Buff object of the encoded_array
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm):
self.__CM = cm
self.offset = buff.get_idx()
self.size = readuleb128( buff )
self.values = []
for i in xrange(0, self.size):
self.values.append( EncodedValue(buff, cm) )
def get_size(self):
"""
Return the number of elements in the array
:rtype: int
"""
return self.size
def get_values(self):
"""
Return a series of size encoded_value byte sequences in the format specified by this section,
concatenated sequentially
:rtype: a list of :class:`EncodedValue` objects
"""
return self.values
def show(self):
bytecode._PrintSubBanner("Encoded Array")
bytecode._PrintDefault("size=%d\n" % self.size)
for i in self.values:
i.show()
def get_obj(self):
return writeuleb128( self.size )
def get_raw(self):
return self.get_obj() + ''.join(i.get_raw() for i in self.values)
def get_length(self):
length = len(self.get_obj())
for i in self.values:
length += i.get_length()
return length
class EncodedValue(object):
"""
This class can parse an encoded_value of a dex file
:param buff: a string which represents a Buff object of the encoded_value
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm):
self.__CM = cm
self.val = unpack("=B", buff.read(1))[0]
self.value_arg = self.val >> 5
self.value_type = self.val & 0x1f
self.raw_value = None
self.value = ""
# TODO: parse floats/doubles correctly
if self.value_type >= VALUE_SHORT and self.value_type < VALUE_STRING:
self.value, self.raw_value = self._getintvalue(buff.read( self.value_arg + 1 ))
elif self.value_type == VALUE_STRING:
id, self.raw_value = self._getintvalue(buff.read( self.value_arg + 1 ))
self.value = cm.get_raw_string(id)
elif self.value_type == VALUE_TYPE:
id, self.raw_value = self._getintvalue(buff.read( self.value_arg + 1 ))
self.value = cm.get_type(id)
elif self.value_type == VALUE_FIELD:
id, self.raw_value = self._getintvalue(buff.read( self.value_arg + 1 ))
self.value = cm.get_field(id)
elif self.value_type == VALUE_METHOD:
id, self.raw_value = self._getintvalue(buff.read( self.value_arg + 1 ))
self.value = cm.get_method(id)
elif self.value_type == VALUE_ENUM:
id, self.raw_value = self._getintvalue(buff.read( self.value_arg + 1 ))
self.value = cm.get_field(id)
elif self.value_type == VALUE_ARRAY:
self.value = EncodedArray( buff, cm )
elif self.value_type == VALUE_ANNOTATION:
self.value = EncodedAnnotation( buff, cm )
elif self.value_type == VALUE_BYTE:
self.value = buff.read( 1 )
elif self.value_type == VALUE_NULL:
self.value = None
elif self.value_type == VALUE_BOOLEAN:
if self.value_arg:
self.value = True
else:
self.value = False
else:
bytecode.Exit( "Unknown value 0x%x" % self.value_type )
def get_value(self):
"""
Return the bytes representing the value, variable in length and interpreted differently for different value_type bytes,
though always little-endian
:rtype: an object representing the value
"""
return self.value
def get_value_type(self):
return self.value_type
def get_value_arg(self):
return self.value_arg
def _getintvalue(self, buf):
ret = 0
shift = 0
for b in buf:
ret |= ord(b) << shift
shift += 8
return ret, buf
def show(self):
bytecode._PrintSubBanner("Encoded Value")
bytecode._PrintDefault("val=%x value_arg=%x value_type=%x\n" % (self.val, self.value_arg, self.value_type))
def get_obj(self):
if isinstance(self.value, str) == False:
return [ self.value ]
return []
def get_raw(self):
if self.raw_value == None:
return pack("=B", self.val) + bytecode.object_to_str( self.value )
else:
return pack("=B", self.val) + bytecode.object_to_str( self.raw_value )
def get_length(self):
if self.raw_value == None:
return len(pack("=B", self.val)) + len(bytecode.object_to_str( self.value ))
else:
return len(pack("=B", self.val)) + len(bytecode.object_to_str( self.raw_value ))
class AnnotationElement(object):
"""
This class can parse an annotation_element of a dex file
:param buff: a string which represents a Buff object of the annotation_element
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm):
self.__CM = cm
self.offset = buff.get_idx()
self.name_idx = readuleb128( buff )
self.value = EncodedValue( buff, cm )
def get_name_idx(self):
"""
Return the element name, represented as an index into the string_ids section
:rtype: int
"""
return self.name_idx
def get_value(self):
"""
Return the element value (EncodedValue)
:rtype: a :class:`EncodedValue` object
"""
return self.value
def show(self):
bytecode._PrintSubBanner("Annotation Element")
bytecode._PrintDefault("name_idx=%d\n" % self.name_idx)
self.value.show()
def get_obj(self):
return writeuleb128(self.name_idx)
def get_raw(self):
return self.get_obj() + self.value.get_raw()
def get_length(self):
return len(self.get_obj()) + self.value.get_length()
class EncodedAnnotation(object):
"""
This class can parse an encoded_annotation of a dex file
:param buff: a string which represents a Buff object of the encoded_annotation
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm):
self.__CM = cm
self.offset = buff.get_idx()
self.type_idx = readuleb128( buff )
self.size = readuleb128( buff )
self.elements = []
for i in xrange(0, self.size):
self.elements.append( AnnotationElement( buff, cm ) )
def get_type_idx(self):
"""
Return the type of the annotation. This must be a class (not array or primitive) type
:rtype: int
"""
return self.type_idx
def get_size(self):
"""
Return the number of name-value mappings in this annotation
:rtype:int
"""
return self.size
def get_elements(self):
"""
Return the elements of the annotation, represented directly in-line (not as offsets)
:rtype: a list of :class:`AnnotationElement` objects
"""
return self.elements
def show(self):
bytecode._PrintSubBanner("Encoded Annotation")
bytecode._PrintDefault("type_idx=%d size=%d\n" % (self.type_idx, self.size))
for i in self.elements:
i.show()
def get_obj(self):
return [ i for i in self.elements ]
def get_raw(self):
return writeuleb128(self.type_idx) + writeuleb128(self.size) + ''.join(i.get_raw() for i in self.elements)
def get_length(self):
length = len(writeuleb128(self.type_idx) + writeuleb128(self.size))
for i in self.elements:
length += i.get_length()
return length
class AnnotationItem(object):
"""
This class can parse an annotation_item of a dex file
:param buff: a string which represents a Buff object of the annotation_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm):
self.__CM = cm
self.offset = buff.get_idx()
self.visibility = unpack("=B", buff.read(1))[0]
self.annotation = EncodedAnnotation(buff, cm)
def get_visibility(self):
"""
Return the intended visibility of this annotation
:rtype: int
"""
return self.visibility
def get_annotation(self):
"""
Return the encoded annotation contents
:rtype: a :class:`EncodedAnnotation` object
"""
return self.annotation
def set_off(self, off):
self.offset = off
def get_off(self):
return self.offset
def reload(self):
pass
def show(self):
bytecode._PrintSubBanner("Annotation Item")
bytecode._PrintDefault("visibility=%d\n" % self.visibility)
self.annotation.show()
def get_obj(self):
return [ self.annotation ]
def get_raw(self):
return pack("=B", self.visibility) + self.annotation.get_raw()
def get_length(self):
length = len(pack("=B", self.visibility))
length += self.annotation.get_length()
return length
class EncodedArrayItem(object):
"""
This class can parse an encoded_array_item of a dex file
:param buff: a string which represents a Buff object of the encoded_array_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm):
self.__CM = cm
self.offset = buff.get_idx()
self.value = EncodedArray( buff, cm )
def get_value(self):
"""
Return the bytes representing the encoded array value
:rtype: a :class:`EncodedArray` object
"""
return self.value
def set_off(self, off):
self.offset = off
def reload(self):
pass
def get_value(self):
return self.value
def show(self):
bytecode._PrintSubBanner("Encoded Array Item")
self.value.show()
def get_obj(self):
return [ self.value ]
def get_raw(self):
return self.value.get_raw()
def get_length(self):
return self.value.get_length()
def get_off(self):
return self.offset
def utf8_to_string(buff, length):
chars = []
for _ in xrange(length):
first_char = ord(buff.read(1))
value = first_char >> 4
if value in (0x00, 0x01, 0x02, 0x03,
0x04, 0x05, 0x06, 0x07):
if first_char == 0:
warning('at offset %x: single zero byte illegal' % buff.get_idx())
chars.append(chr(first_char))
elif value in (0x0c, 0x0d):
second_char = ord(buff.read(1))
if (second_char & 0xc0) != 0x80:
warning('bad utf8 at offset: %x' % buff.get_idx())
value = ((first_char & 0x1f) << 6) | (second_char & 0x3f)
if value != 0 and value < 0x80:
warning('at offset %x: utf8 should have been represented with one byte encoding' % buff.get_idx())
chars.append(unichr(value))
elif value == 0x0e:
second_char = ord(buff.read(1))
if second_char & 0xc0 != 0x80:
warning('bad utf8 byte %x at offset %x' % (second_char, buff.get_idx()))
third_char = ord(buff.read(1))
if third_char & 0xc0 != 0x80:
warning('bad utf8 byte %x at offset %x' % (third_char, buff.get_idx()))
value = ((first_char & 0x0f) << 12) | ((second_char & 0x3f) << 6) | (third_char & 0x3f)
if value < 0x800:
warning('at offset %x: utf8 should have been represented with two-byte encoding' % buff.get_idx())
chars.append(unichr(value))
else:
warning('at offset %x: illegal utf8' % buff.get_idx())
return ''.join(chars).encode('utf-8')
class StringDataItem(object):
"""
This class can parse a string_data_item of a dex file
:param buff: a string which represents a Buff object of the string_data_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm):
self.__CM = cm
self.offset = buff.get_idx()
self.utf16_size = readuleb128( buff )
self.data = utf8_to_string(buff, self.utf16_size)
expected = buff.read(1)
if expected != '\x00':
warning('\x00 expected at offset: %x, found: %x' % (buff.get_idx(), expected))
def get_utf16_size(self):
"""
Return the size of this string, in UTF-16 code units
:rtype:int
"""
return self.utf16_size
def get_data(self):
"""
Return a series of MUTF-8 code units (a.k.a. octets, a.k.a. bytes) followed by a byte of value 0
:rtype: string
"""
return self.data
def set_off(self, off):
self.offset = off
def get_off(self):
return self.offset
def reload(self):
pass
def get(self):
return self.data
def show(self):
bytecode._PrintSubBanner("String Data Item")
bytecode._PrintDefault("utf16_size=%d data=%s\n" % (self.utf16_size, repr( self.data )))
def get_obj(self):
return []
def get_raw(self):
return writeuleb128( self.utf16_size ) + self.data
def get_length(self):
return len(writeuleb128( self.utf16_size )) + len(self.data)
class StringIdItem(object):
"""
This class can parse a string_id_item of a dex file
:param buff: a string which represents a Buff object of the string_id_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm):
self.__CM = cm
self.offset = buff.get_idx()
self.string_data_off = unpack("=I", buff.read(4))[0]
def get_string_data_off(self):
"""
Return the offset from the start of the file to the string data for this item
:rtype: int
"""
return self.string_data_off
def set_off(self, off):
self.offset = off
def get_off(self):
return self.offset
def reload(self):
pass
def show(self):
bytecode._PrintSubBanner("String Id Item")
bytecode._PrintDefault("string_data_off=%x\n" % self.string_data_off)
def get_obj(self):
if self.string_data_off != 0:
self.string_data_off = self.__CM.get_string_by_offset( self.string_data_off ).get_off()
return pack("=I", self.string_data_off)
def get_raw(self):
return self.get_obj()
def get_length(self):
return len(self.get_obj())
class TypeIdItem(object):
"""
This class can parse a type_id_item of a dex file
:param buff: a string which represents a Buff object of the type_id_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm):
self.__CM = cm
self.offset = buff.get_idx()
self.descriptor_idx = unpack("=I", buff.read( 4 ) )[0]
self.descriptor_idx_value = None
def get_descriptor_idx(self):
"""
Return the index into the string_ids list for the descriptor string of this type
:rtype: int
"""
return self.descriptor_idx
def get_descriptor_idx_value(self):
"""
Return the string associated to the descriptor
:rtype: string
"""
return self.descriptor_idx_value
def reload(self):
self.descriptor_idx_value = self.__CM.get_string( self.descriptor_idx )
def show(self):
bytecode._PrintSubBanner("Type Id Item")
bytecode._PrintDefault("descriptor_idx=%d descriptor_idx_value=%s\n" % (self.descriptor_idx, self.descriptor_idx_value))
def get_obj(self):
return pack("=I", self.descriptor_idx)
def get_raw(self):
return self.get_obj()
def get_length(self):
return len(self.get_obj())
class TypeHIdItem(object):
"""
This class can parse a list of type_id_item of a dex file
:param buff: a string which represents a Buff object of the list of type_id_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, size, buff, cm):
self.__CM = cm
self.offset = buff.get_idx()
self.type = []
for i in xrange(0, size):
self.type.append( TypeIdItem( buff, cm ) )
def get_type(self):
"""
Return the list of type_id_item
:rtype: a list of :class:`TypeIdItem` objects
"""
return self.type
def get(self, idx):
try:
return self.type[ idx ].get_descriptor_idx()
except IndexError:
return -1
def set_off(self, off):
self.offset = off
def get_off(self):
return self.offset
def reload(self):
for i in self.type:
i.reload()
def show(self):
bytecode._PrintSubBanner("Type List Item")
for i in self.type:
i.show()
def get_obj(self):
return [ i for i in self.type ]
def get_raw(self):
return ''.join(i.get_raw() for i in self.type)
def get_length(self):
length = 0
for i in self.type:
length += i.get_length()
return length
class ProtoIdItem(object):
"""
This class can parse a proto_id_item of a dex file
:param buff: a string which represents a Buff object of the proto_id_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm):
self.__CM = cm
self.offset = buff.get_idx()
self.shorty_idx = unpack("=I", buff.read(4))[0]
self.return_type_idx = unpack("=I", buff.read(4))[0]
self.parameters_off = unpack("=I", buff.read(4))[0]
self.shorty_idx_value = None
self.return_type_idx_value = None
self.parameters_off_value = None
def reload(self):
self.shorty_idx_value = self.__CM.get_string( self.shorty_idx )
self.return_type_idx_value = self.__CM.get_type( self.return_type_idx )
params = self.__CM.get_type_list( self.parameters_off )
self.parameters_off_value = '({})'.format(' '.join(params))
def get_shorty_idx(self):
"""
Return the index into the string_ids list for the short-form descriptor string of this prototype
:rtype: int
"""
return self.shorty_idx
def get_return_type_idx(self):
"""
Return the index into the type_ids list for the return type of this prototype
:rtype: int
"""
return self.return_type_idx
def get_parameters_off(self):
"""
Return the offset from the start of the file to the list of parameter types for this prototype, or 0 if this prototype has no parameters
:rtype: int
"""
return self.parameters_off
def get_shorty_idx_value(self):
"""
Return the string associated to the shorty_idx
:rtype: string
"""
return self.shorty_idx_value
def get_return_type_idx_value(self):
"""
Return the string associated to the return_type_idx
:rtype: string
"""
return self.return_type_idx_value
def get_parameters_off_value(self):
"""
Return the string associated to the parameters_off
:rtype: string
"""
return self.parameters_off_value
def show(self):
bytecode._PrintSubBanner("Proto Item")
bytecode._PrintDefault("shorty_idx=%d return_type_idx=%d parameters_off=%d\n" % (self.shorty_idx, self.return_type_idx, self.parameters_off))
bytecode._PrintDefault("shorty_idx_value=%s return_type_idx_value=%s parameters_off_value=%s\n" %
(self.shorty_idx_value, self.return_type_idx_value, self.parameters_off_value))
def get_obj(self):
if self.parameters_off != 0:
self.parameters_off = self.__CM.get_obj_by_offset( self.parameters_off ).get_off()
return pack("=I", self.shorty_idx) + pack("=I", self.return_type_idx) + pack("=I", self.parameters_off)
def get_raw(self):
return self.get_obj()
def get_length(self):
return len(self.get_obj())
class ProtoHIdItem(object):
"""
This class can parse a list of proto_id_item of a dex file
:param buff: a string which represents a Buff object of the list of proto_id_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, size, buff, cm):
self.__CM = cm
self.offset = buff.get_idx()
self.proto = []
for i in xrange(0, size):
self.proto.append( ProtoIdItem(buff, cm) )
def set_off(self, off):
self.offset = off
def get_off(self):
return self.offset
def get(self, idx):
try:
return self.proto[ idx ]
except IndexError:
return ProtoIdItemInvalid()
def reload(self):
for i in self.proto:
i.reload()
def show(self):
bytecode._PrintSubBanner("Proto List Item")
for i in self.proto:
i.show()
def get_obj(self):
return [ i for i in self.proto ]
def get_raw(self):
return ''.join(i.get_raw() for i in self.proto)
def get_length(self):
length = 0
for i in self.proto:
length += i.get_length()
return length
class FieldIdItem(object):
"""
This class can parse a field_id_item of a dex file
:param buff: a string which represents a Buff object of the field_id_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm):
self.__CM = cm
self.offset = buff.get_idx()
self.class_idx = unpack("=H", buff.read(2))[0]
self.type_idx = unpack("=H", buff.read(2))[0]
self.name_idx = unpack("=I", buff.read(4))[0]
self.class_idx_value = None
self.type_idx_value = None
self.name_idx_value = None
def reload(self):
self.class_idx_value = self.__CM.get_type( self.class_idx )
self.type_idx_value = self.__CM.get_type( self.type_idx )
self.name_idx_value = self.__CM.get_string( self.name_idx )
def get_class_idx(self):
"""
Return the index into the type_ids list for the definer of this field
:rtype: int
"""
return self.class_idx
def get_type_idx(self):
"""
Return the index into the type_ids list for the type of this field
:rtype: int
"""
return self.type_idx
def get_name_idx(self):
"""
Return the index into the string_ids list for the name of this field
:rtype: int
"""
return self.name_idx
def get_class_name(self):
"""
Return the class name of the field
:rtype: string
"""
return self.class_idx_value
def get_type(self):
"""
Return the type of the field
:rtype: string
"""
return self.type_idx_value
def get_descriptor(self):
"""
Return the descriptor of the field
:rtype: string
"""
return self.type_idx_value
def get_name(self):
"""
Return the name of the field
:rtype: string
"""
return self.name_idx_value
def get_list(self):
return [ self.get_class_name(), self.get_type(), self.get_name() ]
def show(self):
bytecode._PrintSubBanner("Field Id Item")
bytecode._PrintDefault("class_idx=%d type_idx=%d name_idx=%d\n" % (self.class_idx, self.type_idx, self.name_idx))
bytecode._PrintDefault("class_idx_value=%s type_idx_value=%s name_idx_value=%s\n" % (self.class_idx_value, self.type_idx_value, self.name_idx_value))
def get_obj(self):
return pack("=H", self.class_idx) + \
pack("=H", self.type_idx) + \
pack("=I", self.name_idx)
def get_raw(self):
return self.get_obj()
def get_length(self):
return len(self.get_obj())
class FieldHIdItem(object):
"""
This class can parse a list of field_id_item of a dex file
:param buff: a string which represents a Buff object of the list of field_id_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, size, buff, cm):
self.offset = buff.get_idx()
self.elem = []
for i in xrange(0, size):
self.elem.append( FieldIdItem(buff, cm) )
def set_off(self, off):
self.offset = off
def get_off(self):
return self.offset
def gets(self):
return self.elem
def get(self, idx):
try:
return self.elem[ idx ]
except IndexError:
return FieldIdItemInvalid()
def reload(self):
for i in self.elem:
i.reload()
def show(self):
nb = 0
for i in self.elem:
print nb,
i.show()
nb = nb + 1
def get_obj(self):
return [ i for i in self.elem ]
def get_raw(self):
return ''.join(i.get_raw() for i in self.elem)
def get_length(self):
length = 0
for i in self.elem:
length += i.get_length()
return length
class MethodIdItem(object):
"""
This class can parse a method_id_item of a dex file
:param buff: a string which represents a Buff object of the method_id_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm):
self.__CM = cm
self.offset = buff.get_idx()
self.class_idx = unpack("=H", buff.read(2))[0]
self.proto_idx = unpack("=H", buff.read(2))[0]
self.name_idx = unpack("=I", buff.read(4))[0]
self.class_idx_value = None
self.proto_idx_value = None
self.name_idx_value = None
def reload(self):
self.class_idx_value = self.__CM.get_type( self.class_idx )
self.proto_idx_value = self.__CM.get_proto( self.proto_idx )
self.name_idx_value = self.__CM.get_string( self.name_idx )
def get_class_idx(self):
"""
Return the index into the type_ids list for the definer of this method
:rtype: int
"""
return self.class_idx
def get_proto_idx(self):
"""
Return the index into the proto_ids list for the prototype of this method
:rtype: int
"""
return self.proto_idx
def get_name_idx(self):
"""
Return the index into the string_ids list for the name of this method
:rtype: int
"""
return self.name_idx
def get_class_name(self):
"""
Return the class name of the method
:rtype: string
"""
return self.class_idx_value
def get_proto(self):
"""
Return the prototype of the method
:rtype: string
"""
return self.proto_idx_value
def get_descriptor(self):
"""
Return the descriptor
:rtype: string
"""
proto = self.get_proto()
return proto[0] + proto[1]
def get_real_descriptor(self):
"""
Return the real descriptor (i.e. without extra spaces)
:rtype: string
"""
proto = self.get_proto()
return proto[0].replace(' ','') + proto[1]
def get_name(self):
"""
Return the name of the method
:rtype: string
"""
return self.name_idx_value
def get_list(self):
return [ self.get_class_name(), self.get_name(), self.get_proto() ]
def get_triple(self):
return self.get_class_name()[1:-1], self.get_name(), self.get_real_descriptor()
def show(self):
bytecode._PrintSubBanner("Method Id Item")
bytecode._PrintDefault("class_idx=%d proto_idx=%d name_idx=%d\n" % (self.class_idx, self.proto_idx, self.name_idx))
bytecode._PrintDefault("class_idx_value=%s proto_idx_value=%s name_idx_value=%s\n" % (self.class_idx_value, self.proto_idx_value, self.name_idx_value))
def get_obj(self):
return pack("H", self.class_idx) + pack("H", self.proto_idx) + pack("I", self.name_idx)
def get_raw(self):
return self.get_obj()
def get_length(self):
return len(self.get_obj())
class MethodHIdItem(object):
"""
This class can parse a list of method_id_item of a dex file
:param buff: a string which represents a Buff object of the list of method_id_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, size, buff, cm):
self.__CM = cm
self.offset = buff.get_idx()
self.methods = []
for i in xrange(0, size):
self.methods.append( MethodIdItem(buff, cm) )
def set_off(self, off):
self.offset = off
def get_off(self):
return self.offset
def get(self, idx):
try:
return self.methods[ idx ]
except IndexError:
return MethodIdItemInvalid()
def reload(self):
for i in self.methods:
i.reload()
def show(self):
print "METHOD_ID_ITEM"
nb = 0
for i in self.methods:
print nb,
i.show()
nb = nb + 1
def get_obj(self):
return [ i for i in self.methods ]
def get_raw(self):
return ''.join(i.get_raw() for i in self.methods)
def get_length(self):
length = 0
for i in self.methods:
length += i.get_length()
return length
class ProtoIdItemInvalid(object):
def get_params(self):
return "AG:IPI:invalid_params;"
def get_shorty(self):
return "(AG:IPI:invalid_shorty)"
def get_return_type(self):
return "(AG:IPI:invalid_return_type)"
def show(self):
print "AG:IPI:invalid_proto_item", self.get_shorty(), self.get_return_type(), self.get_params()
class FieldIdItemInvalid(object):
def get_class_name(self):
return "AG:IFI:invalid_class_name;"
def get_type(self):
return "(AG:IFI:invalid_type)"
def get_descriptor(self):
return "(AG:IFI:invalid_descriptor)"
def get_name(self):
return "AG:IFI:invalid_name"
def get_list(self):
return [ self.get_class_name(), self.get_type(), self.get_name() ]
def show(self):
print "AG:IFI:invalid_field_item"
class MethodIdItemInvalid(object):
def get_class_name(self):
return "AG:IMI:invalid_class_name;"
def get_descriptor(self):
return "(AG:IMI:invalid_descriptor)"
def get_proto(self):
return "()AG:IMI:invalid_proto"
def get_name(self):
return "AG:IMI:invalid_name"
def get_list(self):
return [ self.get_class_name(), self.get_name(), self.get_proto() ]
def show(self):
print "AG:IMI:invalid_method_item"
class EncodedField(object):
"""
This class can parse an encoded_field of a dex file
:param buff: a string which represents a Buff object of the encoded field
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm):
self.CM = cm
self.offset = buff.get_idx()
self.field_idx_diff = readuleb128( buff )
self.access_flags = readuleb128( buff )
self.field_idx = 0
self.name = None
self.proto = None
self.class_name = None
self.init_value = None
self.access_flags_string = None
def reload(self):
name = self.CM.get_field( self.field_idx )
self.class_name = name[0]
self.name = name[2]
self.proto = ''.join(i for i in name[1])
def set_init_value(self, value):
"""
Setup the init value object of the field
:param value: the init value
:type value: :class:`EncodedValue`
"""
self.init_value = value
def get_init_value(self):
"""
Return the init value object of the field
:rtype: :class:`EncodedValue`
"""
return self.init_value
def adjust_idx(self, val):
self.field_idx = self.field_idx_diff + val
def get_field_idx_diff(self):
"""
Return the index into the field_ids list for the identity of this field (includes the name and descriptor),
represented as a difference from the index of previous element in the list
:rtype: int
"""
return self.field_idx_diff
def get_field_idx(self):
"""
Return the real index of the method
:rtype: int
"""
return self.field_idx
def get_access_flags(self):
"""
Return the access flags of the field
:rtype: int
"""
return self.access_flags
def get_class_name(self):
"""
Return the class name of the field
:rtype: string
"""
return self.class_name
def get_descriptor(self):
"""
Return the descriptor of the field
:rtype: string
"""
return self.proto
def get_name(self):
"""
Return the name of the field
:rtype: string
"""
return self.name
def get_access_flags_string(self):
"""
Return the access flags string of the field
:rtype: string
"""
if self.access_flags_string == None:
self.access_flags_string = get_access_flags_string( self.get_access_flags() )
if self.access_flags_string == "":
self.access_flags_string = "0x%x" % self.get_access_flags()
return self.access_flags_string
def set_name(self, value):
self.CM.set_hook_field_name(self, value)
self.reload()
def get_obj(self):
return []
def get_raw(self):
return writeuleb128( self.field_idx_diff ) + writeuleb128( self.access_flags )
def get_size(self):
return len(self.get_raw())
def show(self):
"""
Display the information about the field
"""
colors = bytecode.disable_print_colors()
self.pretty_show()
bytecode.enable_print_colors(colors)
def pretty_show(self):
"""
Display the information (with a pretty print) about the field
"""
bytecode._PrintSubBanner("Field Information")
bytecode._PrintDefault("%s->%s %s [access_flags=%s]\n" % ( self.get_class_name(), self.get_name(), self.get_descriptor(), self.get_access_flags_string() ))
init_value = self.get_init_value()
if init_value != None:
bytecode._PrintDefault( "\tinit value: %s\n" % str( init_value.get_value() ) )
self.show_dref()
def show_dref(self):
"""
Display where this field is read or written
"""
try:
bytecode._PrintSubBanner("DREF")
bytecode._PrintDRef("R", self.DREFr.items)
bytecode._PrintDRef("W", self.DREFw.items)
bytecode._PrintSubBanner()
except AttributeError:
pass
class EncodedMethod(object):
"""
This class can parse an encoded_method of a dex file
:param buff: a string which represents a Buff object of the encoded_method
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm):
self.CM = cm
self.offset = buff.get_idx()
self.method_idx_diff = readuleb128( buff ) #: method index diff in the corresponding section
self.access_flags = readuleb128( buff ) #: access flags of the method
self.code_off = readuleb128( buff ) #: offset of the code section
self.method_idx = 0
self.name = None
self.proto = None
self.class_name = None
self.code = None
self.access_flags_string = None
self.notes = []
def adjust_idx(self, val):
self.method_idx = self.method_idx_diff + val
def get_method_idx(self):
"""
Return the real index of the method
:rtype: int
"""
return self.method_idx
def get_method_idx_diff(self):
"""
Return index into the method_ids list for the identity of this method (includes the name and descriptor),
represented as a difference from the index of previous element in the lis
:rtype: int
"""
return self.method_idx_diff
def get_access_flags(self):
"""
Return the access flags of the method
:rtype: int
"""
return self.access_flags
def get_code_off(self):
"""
Return the offset from the start of the file to the code structure for this method,
or 0 if this method is either abstract or native
:rtype: int
"""
return self.code_off
def get_access_flags_string(self):
"""
Return the access flags string of the method
:rtype: string
"""
if self.access_flags_string == None:
self.access_flags_string = get_access_flags_string( self.get_access_flags() )
if self.access_flags_string == "":
self.access_flags_string = "0x%x" % self.get_access_flags()
return self.access_flags_string
def reload(self):
v = self.CM.get_method( self.method_idx )
self.class_name = v[0]
self.name = v[1]
self.proto = ''.join(i for i in v[2])
self.code = self.CM.get_code( self.code_off )
def get_locals(self):
ret = self.proto.split(')')
params = ret[0][1:].split()
return self.code.get_registers_size() - len(params) - 1
def get_information(self):
info = {}
if self.code:
nb = self.code.get_registers_size()
proto = self.get_descriptor()
ret = proto.split(')')
params = ret[0][1:].split()
ret = proto.split(')')
params = ret[0][1:].split()
if params:
info["registers"] = (0, nb - len(params) - 1)
j = 0
info["params"] = []
for i in xrange(nb - len(params), nb):
info["params"].append((i, get_type(params[j])))
j += 1
else:
info["registers"] = (0, nb - 1)
info["return"] = get_type(ret[1])
return info
def each_params_by_register(self, nb, proto):
bytecode._PrintSubBanner("Params")
ret = proto.split(')')
params = ret[0][1:].split()
if params:
bytecode._PrintDefault("- local registers: v%d...v%d\n" % (0, nb - len(params) - 1))
j = 0
for i in xrange(nb - len(params), nb):
bytecode._PrintDefault("- v%d: %s\n" % (i, get_type(params[j])))
j += 1
else:
bytecode._PrintDefault("local registers: v%d...v%d\n" % (0, nb - 1))
bytecode._PrintDefault("- return: %s\n" % get_type(ret[1]))
bytecode._PrintSubBanner()
def show_info(self):
"""
Display the basic information about the method
"""
bytecode._PrintSubBanner("Method Information")
bytecode._PrintDefault("%s->%s%s [access_flags=%s]\n" % ( self.get_class_name(), self.get_name(), self.get_descriptor(), self.get_access_flags_string() ))
def show(self):
"""
Display the information about the method
"""
colors = bytecode.disable_print_colors()
self.pretty_show()
bytecode.enable_print_colors(colors)
def pretty_show(self):
"""
Display the information (with a pretty print) about the method
"""
self.show_info()
self.show_notes()
if self.code != None:
self.each_params_by_register( self.code.get_registers_size(), self.get_descriptor() )
if self.CM.get_vmanalysis() == None:
self.code.show()
else:
self.code.pretty_show( self.CM.get_vmanalysis().get_method( self ) )
self.show_xref()
def show_xref(self):
"""
Display where the method is called or which method is called
"""
try:
bytecode._PrintSubBanner("XREF")
bytecode._PrintXRef("F", self.XREFfrom.items)
bytecode._PrintXRef("T", self.XREFto.items)
bytecode._PrintSubBanner()
except AttributeError:
pass
def show_notes(self):
"""
Display the notes about the method
"""
if self.notes != []:
bytecode._PrintSubBanner("Notes")
for i in self.notes:
bytecode._PrintNote(i)
bytecode._PrintSubBanner()
def source(self):
"""
Return the source code of this method
:rtype: string
"""
self.CM.decompiler_ob.display_source(self)
def get_source(self):
return self.CM.decompiler_ob.get_source_method(self)
def get_length(self):
"""
Return the length of the associated code of the method
:rtype: int
"""
if self.code != None:
return self.code.get_length()
return 0
def get_code(self):
"""
Return the code object associated to the method
:rtype: :class:`DalvikCode` object
"""
return self.code
def get_instructions(self):
"""
Get the instructions
:rtype: a generator of each :class:`Instruction` (or a cached list of instructions if you have setup instructions)
"""
if self.code == None:
return []
return self.code.get_bc().get_instructions()
def set_instructions(self, instructions):
"""
Set the instructions
:param instructions: the list of instructions
:type instructions: a list of :class:`Instruction`
"""
if self.code == None:
return []
return self.code.get_bc().set_instructions(instructions)
def get_instruction(self, idx, off=None):
"""
Get a particular instruction by using (default) the index of the address if specified
:param idx: index of the instruction (the position in the list of the instruction)
:type idx: int
:param off: address of the instruction
:type off: int
:rtype: an :class:`Instruction` object
"""
if self._code != None:
return self.code.get_bc().get_instruction(idx, off)
return None
def get_debug(self):
"""
Return the debug object associated to this method
:rtype: :class:`DebugInfoItem`
"""
if self.code == None:
return None
return self.code.get_debug()
def get_descriptor(self):
"""
Return the descriptor of the method
:rtype: string
"""
return self.proto
def get_class_name(self):
"""
Return the class name of the method
:rtype: string
"""
return self.class_name
def get_name(self):
"""
Return the name of the method
:rtype: string
"""
return self.name
def get_triple(self):
return self.CM.get_method_ref( self.method_idx ).get_triple()
def add_inote(self, msg, idx, off=None):
"""
Add a message to a specific instruction by using (default) the index of the address if specified
:param msg: the message
:type msg: string
:param idx: index of the instruction (the position in the list of the instruction)
:type idx: int
:param off: address of the instruction
:type off: int
"""
if self.code != None:
self.code.add_inote(msg, idx, off)
def add_note(self, msg):
"""
Add a message to this method
:param msg: the message
:type msg: string
"""
self.notes.append( msg )
def set_code_idx(self, idx):
"""
Set the start address of the buffer to disassemble
:param idx: the index
:type idx: int
"""
if self.code != None:
self.code.set_idx( idx )
def set_name(self, value):
self.CM.set_hook_method_name( self, value )
self.reload()
def get_raw(self):
if self.code != None:
self.code_off = self.code.get_off()
return writeuleb128( self.method_idx_diff ) + writeuleb128( self.access_flags ) + writeuleb128( self.code_off )
def get_size(self):
return len(self.get_raw())
class ClassDataItem(object):
"""
This class can parse a class_data_item of a dex file
:param buff: a string which represents a Buff object of the class_data_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm):
self.__CM = cm
self.offset = buff.get_idx()
self.static_fields_size = readuleb128( buff )
self.instance_fields_size = readuleb128( buff )
self.direct_methods_size = readuleb128( buff )
self.virtual_methods_size = readuleb128( buff )
self.static_fields = []
self.instance_fields = []
self.direct_methods = []
self.virtual_methods = []
self._load_elements( self.static_fields_size, self.static_fields, EncodedField, buff, cm )
self._load_elements( self.instance_fields_size, self.instance_fields, EncodedField, buff, cm )
self._load_elements( self.direct_methods_size, self.direct_methods, EncodedMethod, buff, cm )
self._load_elements( self.virtual_methods_size, self.virtual_methods, EncodedMethod, buff, cm )
def get_static_fields_size(self):
"""
Return the number of static fields defined in this item
:rtype: int
"""
return self.static_fields_size
def get_instance_fields_size(self):
"""
Return the number of instance fields defined in this item
:rtype: int
"""
return self.instance_fields_size
def get_direct_methods_size(self):
"""
Return the number of direct methods defined in this item
:rtype: int
"""
return self.direct_methods_size
def get_virtual_methods_size(self):
"""
Return the number of virtual methods defined in this item
:rtype: int
"""
return self.virtual_methods_size
def get_static_fields(self):
"""
Return the defined static fields, represented as a sequence of encoded elements
:rtype: a list of :class:`EncodedField` objects
"""
return self.static_fields
def get_instance_fields(self):
"""
Return the defined instance fields, represented as a sequence of encoded elements
:rtype: a list of :class:`EncodedField` objects
"""
return self.instance_fields
def get_direct_methods(self):
"""
Return the defined direct (any of static, private, or constructor) methods, represented as a sequence of encoded elements
:rtype: a list of :class:`EncodedMethod` objects
"""
return self.direct_methods
def get_virtual_methods(self):
"""
Return the defined virtual (none of static, private, or constructor) methods, represented as a sequence of encoded elements
:rtype: a list of :class:`EncodedMethod` objects
"""
return self.virtual_methods
def get_methods(self):
"""
Return direct and virtual methods
:rtype: a list of :class:`EncodedMethod` objects
"""
return [ x for x in self.direct_methods ] + [ x for x in self.virtual_methods ]
def get_fields(self):
"""
Return static and instance fields
:rtype: a list of :class:`EncodedField` objects
"""
return [ x for x in self.static_fields ] + [ x for x in self.instance_fields ]
def set_off(self, off):
self.offset = off
def set_static_fields(self, value):
if value != None:
values = value.get_values()
if len(values) <= len(self.static_fields):
for i in xrange(0, len(values)):
self.static_fields[i].set_init_value( values[i] )
def _load_elements(self, size, l, Type, buff, cm):
prev = 0
for i in xrange(0, size):
el = Type(buff, cm)
el.adjust_idx( prev )
if isinstance(el, EncodedField):
prev = el.get_field_idx()
else:
prev = el.get_method_idx()
l.append( el )
def reload(self):
for i in self.static_fields:
i.reload()
for i in self.instance_fields:
i.reload()
for i in self.direct_methods:
i.reload()
for i in self.virtual_methods:
i.reload()
def show(self):
self.pretty_show()
def pretty_show(self):
bytecode._PrintSubBanner("Class Data Item")
bytecode._PrintDefault("static_fields_size=%d instance_fields_size=%d direct_methods_size=%d virtual_methods_size=%d\n" % \
(self.static_fields_size, self.instance_fields_size, self.direct_methods_size, self.virtual_methods_size))
bytecode._PrintSubBanner("Static Fields")
for i in self.static_fields:
i.show()
bytecode._PrintSubBanner("Instance Fields")
for i in self.instance_fields:
i.show()
bytecode._PrintSubBanner("Direct Methods")
for i in self.direct_methods:
i.pretty_show()
bytecode._PrintSubBanner("Virtual Methods")
for i in self.virtual_methods:
i.pretty_show()
def get_obj(self):
return [ i for i in self.static_fields ] + \
[ i for i in self.instance_fields ] + \
[ i for i in self.direct_methods ] + \
[ i for i in self.virtual_methods ]
def get_raw(self):
buff = writeuleb128( self.static_fields_size ) + \
writeuleb128( self.instance_fields_size ) + \
writeuleb128( self.direct_methods_size ) + \
writeuleb128( self.virtual_methods_size ) + \
''.join(i.get_raw() for i in self.static_fields) + \
''.join(i.get_raw() for i in self.instance_fields) + \
''.join(i.get_raw() for i in self.direct_methods) + \
''.join(i.get_raw() for i in self.virtual_methods)
return buff
def get_length(self):
length = len(writeuleb128( self.static_fields_size )) + \
len(writeuleb128( self.instance_fields_size )) + \
len(writeuleb128( self.direct_methods_size )) + \
len(writeuleb128( self.virtual_methods_size ))
for i in self.static_fields:
length += i.get_size()
for i in self.instance_fields:
length += i.get_size()
for i in self.direct_methods:
length += i.get_size()
for i in self.virtual_methods:
length += i.get_size()
return length
def get_off(self):
return self.offset
class ClassDefItem(object):
"""
This class can parse a class_def_item of a dex file
:param buff: a string which represents a Buff object of the class_def_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm):
self.__CM = cm
self.offset = buff.get_idx()
self.class_idx = unpack("=I", buff.read(4))[0]
self.access_flags = unpack("=I", buff.read(4))[0]
self.superclass_idx = unpack("=I", buff.read(4))[0]
self.interfaces_off = unpack("=I", buff.read(4))[0]
self.source_file_idx = unpack("=I", buff.read(4))[0]
self.annotations_off = unpack("=I", buff.read(4))[0]
self.class_data_off = unpack("=I", buff.read(4))[0]
self.static_values_off = unpack("=I", buff.read(4))[0]
self.interfaces = []
self.class_data_item = None
self.static_values = None
self.name = None
self.sname = None
self.access_flags_string = None
def reload(self):
self.name = self.__CM.get_type( self.class_idx )
self.sname = self.__CM.get_type( self.superclass_idx )
self.interfaces = self.__CM.get_type_list( self.interfaces_off )
if self.class_data_off != 0:
self.class_data_item = self.__CM.get_class_data_item( self.class_data_off )
self.class_data_item.reload()
if self.static_values_off != 0:
self.static_values = self.__CM.get_encoded_array_item ( self.static_values_off )
if self.class_data_item != None:
self.class_data_item.set_static_fields( self.static_values.get_value() )
def get_methods(self):
"""
Return all methods of this class
:rtype: a list of :class:`EncodedMethod` objects
"""
if self.class_data_item != None:
return self.class_data_item.get_methods()
return []
def get_fields(self):
"""
Return all fields of this class
:rtype: a list of :class:`EncodedField` objects
"""
if self.class_data_item != None:
return self.class_data_item.get_fields()
return []
def get_class_idx(self):
"""
Return the index into the type_ids list for this class
:rtype: int
"""
return self.class_idx
def get_access_flags(self):
"""
Return the access flags for the class (public, final, etc.)
:rtype: int
"""
return self.access_flags
def get_superclass_idx(self):
"""
Return the index into the type_ids list for the superclass
:rtype: int
"""
return self.superclass_idx
def get_interfaces_off(self):
"""
Return the offset from the start of the file to the list of interfaces, or 0 if there are none
:rtype: int
"""
return self.interfaces_off
def get_source_file_idx(self):
"""
Return the index into the string_ids list for the name of the file containing the original
source for (at least most of) this class, or the special value NO_INDEX to represent a lack of this information
:rtype: int
"""
return self.source_file_idx
def get_annotations_off(self):
"""
Return the offset from the start of the file to the annotations structure for this class,
or 0 if there are no annotations on this class.
:rtype: int
"""
return self.annotations_off
def get_class_data_off(self):
"""
Return the offset from the start of the file to the associated class data for this item,
or 0 if there is no class data for this class
:rtype: int
"""
return self.class_data_off
def get_static_values_off(self):
"""
Return the offset from the start of the file to the list of initial values for static fields,
or 0 if there are none (and all static fields are to be initialized with 0 or null)
:rtype: int
"""
return self.static_values_off
def get_class_data(self):
"""
Return the associated class_data_item
:rtype: a :class:`ClassDataItem` object
"""
return self.class_data_item
def get_name(self):
"""
Return the name of this class
:rtype: int
"""
return self.name
def get_superclassname(self):
"""
Return the name of the super class
:rtype: string
"""
return self.sname
def get_interfaces(self):
"""
Return the name of the interface
:rtype: string
"""
return self.interfaces
def get_access_flags_string(self):
"""
Return the access flags string of the class
:rtype: string
"""
if self.access_flags_string == None:
self.access_flags_string = get_access_flags_string( self.get_access_flags() )
if self.access_flags_string == "":
self.access_flags_string = "0x%x" % self.get_access_flags()
return self.access_flags_string
def show(self):
bytecode._PrintSubBanner("Class Def Item")
bytecode._PrintDefault("name=%s, sname=%s, interfaces=%s, access_flags=%s\n" %
(self.name,
self.sname,
self.interfaces,
self.get_access_flags_string()))
bytecode._PrintDefault("class_idx=%d, superclass_idx=%d, interfaces_off=%x, source_file_idx=%d, annotations_off=%x, class_data_off=%x, static_values_off=%x\n" %
(self.class_idx,
self.superclass_idx,
self.interfaces_off,
self.source_file_idx,
self.annotations_off,
self.class_data_off,
self.static_values_off))
self.show_xref()
def show_xref(self):
"""
Display where the method is called or which method is called
"""
try:
bytecode._PrintSubBanner("XREF")
bytecode._PrintXRef("F", self.XREFfrom.items)
bytecode._PrintSubBanner()
except AttributeError:
pass
def source(self):
"""
Return the source code of the entire class
:rtype: string
"""
self.__CM.decompiler_ob.display_all(self)
def get_source(self):
return self.__CM.decompiler_ob.get_source_class(self)
def get_source_ext(self):
return self.__CM.decompiler_ob.get_source_class_ext(self)
def set_name(self, value):
self.__CM.set_hook_class_name( self, value )
def get_obj(self):
if self.interfaces_off != 0:
self.interfaces_off = self.__CM.get_obj_by_offset( self.interfaces_off ).get_off()
if self.annotations_off != 0:
self.annotations_off = self.__CM.get_obj_by_offset( self.annotations_off ).get_off()
if self.class_data_off != 0:
self.class_data_off = self.__CM.get_obj_by_offset( self.class_data_off ).get_off()
if self.static_values_off != 0:
self.static_values_off = self.__CM.get_obj_by_offset( self.static_values_off ).get_off()
return pack("=I", self.class_idx) + \
pack("=I", self.access_flags) + \
pack("=I", self.superclass_idx) + \
pack("=I", self.interfaces_off) + \
pack("=I", self.source_file_idx) + \
pack("=I", self.annotations_off) + \
pack("=I", self.class_data_off) + \
pack("=I", self.static_values_off)
def get_raw(self):
return self.get_obj()
def get_length(self):
return len(self.get_obj())
class ClassHDefItem(object):
"""
This class can parse a list of class_def_item of a dex file
:param buff: a string which represents a Buff object of the list of class_def_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, size, buff, cm):
self.__CM = cm
self.offset = buff.get_idx()
self.class_def = []
for i in xrange(0, size):
idx = buff.get_idx()
class_def = ClassDefItem( buff, cm )
self.class_def.append( class_def )
buff.set_idx( idx + calcsize("=IIIIIIII") )
def set_off(self, off):
self.offset = off
def get_off(self):
return self.offset
def get_class_idx(self, idx):
for i in self.class_def:
if i.get_class_idx() == idx:
return i
return None
def get_method(self, name_class, name_method):
l = []
for i in self.class_def:
if i.get_name() == name_class:
for j in i.get_methods():
if j.get_name() == name_method:
l.append(j)
return l
def get_names(self):
return [ x.get_name() for x in self.class_def ]
def reload(self):
for i in self.class_def:
i.reload()
def show(self):
for i in self.class_def:
i.show()
def get_obj(self):
return [ i for i in self.class_def ]
def get_raw(self):
return ''.join(i.get_raw() for i in self.class_def)
def get_length(self):
length = 0
for i in self.class_def:
length += i.get_length()
return length
class EncodedTypeAddrPair(object):
"""
This class can parse an encoded_type_addr_pair of a dex file
:param buff: a string which represents a Buff object of the encoded_type_addr_pair
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff):
self.type_idx = readuleb128( buff )
self.addr = readuleb128( buff )
def get_type_idx(self):
"""
Return the index into the type_ids list for the type of the exception to catch
:rtype: int
"""
return self.type_idx
def get_addr(self):
"""
Return the bytecode address of the associated exception handler
:rtype: int
"""
return self.addr
def get_obj(self):
return []
def show(self):
bytecode._PrintSubBanner("Encoded Type Addr Pair")
bytecode._PrintDefault("type_idx=%d addr=%x\n" % (self.type_idx, self.addr))
def get_raw(self):
return writeuleb128( self.type_idx ) + writeuleb128( self.addr )
def get_length(self):
return len(self.get_raw())
class EncodedCatchHandler(object):
"""
This class can parse an encoded_catch_handler of a dex file
:param buff: a string which represents a Buff object of the encoded_catch_handler
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm):
self.offset = buff.get_idx()
self.size = readsleb128( buff )
self.handlers = []
for i in xrange(0, abs(self.size)):
self.handlers.append( EncodedTypeAddrPair(buff) )
if self.size <= 0:
self.catch_all_addr = readuleb128( buff )
def get_size(self):
"""
Return the number of catch types in this list
:rtype: int
"""
return self.size
def get_handlers(self):
"""
Return the stream of abs(size) encoded items, one for each caught type, in the order that the types should be tested.
:rtype: a list of :class:`EncodedTypeAddrPair` objects
"""
return self.handlers
def get_catch_all_addr(self):
"""
Return the bytecode address of the catch-all handler. This element is only present if size is non-positive.
:rtype: int
"""
return self.catch_all_addr
def get_off(self):
return self.offset
def set_off(self, off):
self.offset = off
def show(self):
bytecode._PrintSubBanner("Encoded Catch Handler")
bytecode._PrintDefault("size=%d\n" % self.size)
for i in self.handlers:
i.show()
if self.size <= 0:
bytecode._PrintDefault("catch_all_addr=%x\n" % self.catch_all_addr)
def get_raw(self):
buff = writesleb128( self.size ) + ''.join(i.get_raw() for i in self.handlers)
if self.size <= 0:
buff += writeuleb128( self.catch_all_addr )
return buff
def get_length(self):
length = len(writesleb128( self.size ))
for i in self.handlers:
length += i.get_length()
if self.size <= 0:
length += len(writeuleb128( self.catch_all_addr ))
return length
class EncodedCatchHandlerList(object):
"""
This class can parse an encoded_catch_handler_list of a dex file
:param buff: a string which represents a Buff object of the encoded_catch_handler_list
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm):
self.offset = buff.get_idx()
self.size = readuleb128( buff )
self.list = []
for i in xrange(0, self.size):
self.list.append( EncodedCatchHandler(buff, cm) )
def get_size(self):
"""
Return the size of this list, in entries
:rtype: int
"""
return self.size
def get_list(self):
"""
Return the actual list of handler lists, represented directly (not as offsets), and concatenated sequentially
:rtype: a list of :class:`EncodedCatchHandler` objects
"""
return self.list
def show(self):
bytecode._PrintSubBanner("Encoded Catch Handler List")
bytecode._PrintDefault("size=%d\n" % self.size)
for i in self.list:
i.show()
def get_off(self):
return self.offset
def set_off(self, off):
self.offset = off
def get_obj(self):
return writeuleb128( self.size )
def get_raw(self):
return self.get_obj() + ''.join(i.get_raw() for i in self.list)
def get_length(self):
length = len(self.get_obj())
for i in self.list:
length += i.get_length()
return length
KIND_METH = 0
KIND_STRING = 1
KIND_FIELD = 2
KIND_TYPE = 3
VARIES = 4
INLINE_METHOD = 5
VTABLE_OFFSET = 6
FIELD_OFFSET = 7
KIND_RAW_STRING = 8
OPERAND_REGISTER = 0
OPERAND_LITERAL = 1
OPERAND_RAW = 2
OPERAND_OFFSET = 3
OPERAND_KIND = 0x100
def get_kind(cm, kind, value):
"""
Return the value of the 'kind' argument
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
:param kind: the type of the 'kind' argument
:type kind: int
:param value: the value of the 'kind' argument
:type value: int
:rtype: string
"""
if kind == KIND_METH:
method = cm.get_method_ref(value)
class_name = method.get_class_name()
name = method.get_name()
descriptor = method.get_descriptor()
return "%s->%s%s" % (class_name, name, descriptor)
elif kind == KIND_STRING:
return repr(cm.get_string(value))
elif kind == KIND_RAW_STRING:
return cm.get_string(value)
elif kind == KIND_FIELD:
class_name, proto, field_name = cm.get_field(value)
return "%s->%s %s" % (class_name, field_name, proto)
elif kind == KIND_TYPE:
return cm.get_type(value)
elif kind == VTABLE_OFFSET:
return "vtable[0x%x]" % value
elif kind == FIELD_OFFSET:
return "field[0x%x]" % value
elif kind == INLINE_METHOD:
buff = "inline[0x%x]" % value
# FIXME: depends of the android version ...
if len(INLINE_METHODS) > value:
elem = INLINE_METHODS[value]
buff += " %s->%s%s" % (elem[0], elem[1], elem[2])
return buff
return None
class Instruction(object):
"""
This class represents a dalvik instruction
"""
def get_kind(self):
"""
Return the 'kind' argument of the instruction
:rtype: int
"""
if self.OP > 0xff:
if self.OP >= 0xf2ff:
return DALVIK_OPCODES_OPTIMIZED[self.OP][1][1]
return DALVIK_OPCODES_EXTENDED_WIDTH[self.OP][1][1]
return DALVIK_OPCODES_FORMAT[self.OP][1][1]
def get_name(self):
"""
Return the name of the instruction
:rtype: string
"""
if self.OP > 0xff:
if self.OP >= 0xf2ff:
return DALVIK_OPCODES_OPTIMIZED[self.OP][1][0]
return DALVIK_OPCODES_EXTENDED_WIDTH[self.OP][1][0]
return DALVIK_OPCODES_FORMAT[self.OP][1][0]
def get_op_value(self):
"""
Return the value of the opcode
:rtype: int
"""
return self.OP
def get_literals(self):
"""
Return the associated literals
:rtype: list of int
"""
return []
def show(self, idx):
"""
Print the instruction
"""
print self.get_name() + " " + self.get_output(idx),
def show_buff(self, idx):
"""
Return the display of the instruction
:rtype: string
"""
return self.get_output(idx)
def get_translated_kind(self):
"""
Return the translated value of the 'kind' argument
:rtype: string
"""
return get_kind(self.cm, self.get_kind(), self.get_ref_kind())
def get_output(self, idx=-1):
"""
Return an additional output of the instruction
:rtype: string
"""
raise("not implemented")
def get_operands(self, idx=-1):
"""
Return all operands
:rtype: list
"""
raise("not implemented")
def get_length(self):
"""
Return the length of the instruction
:rtype: int
"""
raise("not implemented")
def get_raw(self):
"""
Return the object in a raw format
:rtype: string
"""
raise("not implemented")
def get_ref_kind(self):
"""
Return the value of the 'kind' argument
:rtype: value
"""
raise("not implemented")
def get_formatted_operands(self):
return None
class InstructionInvalid(Instruction):
"""
This class represents an invalid instruction
"""
def __init__(self, cm, buff):
super(InstructionInvalid, self).__init__()
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
#debug("OP:%x" % (self.OP))
def get_name(self):
"""
Return the name of the instruction
:rtype: string
"""
return "AG:invalid_instruction"
def get_output(self, idx=-1):
return "(OP:%x)" % self.OP
def get_operands(self, idx=-1):
return []
def get_length(self):
return 2
def get_raw(self):
return pack("=H", self.OP)
class FillArrayData(object):
"""
This class can parse a FillArrayData instruction
:param buff: a Buff object which represents a buffer where the instruction is stored
"""
def __init__(self, buff):
self.notes = []
self.format_general_size = calcsize("=HHI")
self.ident = unpack("=H", buff[0:2])[0]
self.element_width = unpack("=H", buff[2:4])[0]
self.size = unpack("=I", buff[4:8])[0]
self.data = buff[self.format_general_size:self.format_general_size + (self.size * self.element_width) + 1]
def add_note(self, msg):
"""
Add a note to this instruction
:param msg: the message
:type msg: objects (string)
"""
self.notes.append(msg)
def get_notes(self):
"""
Get all notes from this instruction
:rtype: a list of objects
"""
return self.notes
def get_op_value(self):
"""
Get the value of the opcode
:rtype: int
"""
return self.ident
def get_data(self):
"""
Return the data of this instruction (the payload)
:rtype: string
"""
return self.data
def get_output(self, idx=-1):
"""
Return an additional output of the instruction
:rtype: string
"""
buff = ""
data = self.get_data()
buff += repr(data) + " | "
for i in xrange(0, len(data)):
buff += "\\x%02x" % ord(data[i])
return buff
def get_operands(self, idx=-1):
return [(OPERAND_RAW, repr(self.get_data()))]
def get_formatted_operands(self):
return None
def get_name(self):
"""
Return the name of the instruction
:rtype: string
"""
return "fill-array-data-payload"
def show_buff(self, pos):
"""
Return the display of the instruction
:rtype: string
"""
buff = self.get_name() + " "
for i in xrange(0, len(self.data)):
buff += "\\x%02x" % ord(self.data[i])
return buff
def show(self, pos):
"""
Print the instruction
"""
print self.show_buff(pos),
def get_length(self):
"""
Return the length of the instruction
:rtype: int
"""
return ((self.size * self.element_width + 1) / 2 + 4) * 2
def get_raw(self):
return pack("=H", self.ident) + pack("=H", self.element_width) + pack("=I", self.size) + self.data
class SparseSwitch(object):
"""
This class can parse a SparseSwitch instruction
:param buff: a Buff object which represents a buffer where the instruction is stored
"""
def __init__(self, buff):
self.notes = []
self.format_general_size = calcsize("=HH")
self.ident = unpack("=H", buff[0:2])[0]
self.size = unpack("=H", buff[2:4])[0]
self.keys = []
self.targets = []
idx = self.format_general_size
for i in xrange(0, self.size):
self.keys.append(unpack('=l', buff[idx:idx + 4])[0])
idx += 4
for i in xrange(0, self.size):
self.targets.append(unpack('=l', buff[idx:idx + 4])[0])
idx += 4
def add_note(self, msg):
"""
Add a note to this instruction
:param msg: the message
:type msg: objects (string)
"""
self.notes.append(msg)
def get_notes(self):
"""
Get all notes from this instruction
:rtype: a list of objects
"""
return self.notes
def get_op_value(self):
"""
Get the value of the opcode
:rtype: int
"""
return self.ident
def get_keys(self):
"""
Return the keys of the instruction
:rtype: a list of long
"""
return self.keys
def get_values(self):
return self.get_keys()
def get_targets(self):
"""
Return the targets (address) of the instruction
:rtype: a list of long
"""
return self.targets
def get_output(self, idx=-1):
"""
Return an additional output of the instruction
:rtype: string
"""
return " ".join("%x" % i for i in self.keys)
def get_operands(self, idx=-1):
"""
Return an additional output of the instruction
:rtype: string
"""
return []
def get_formatted_operands(self):
return None
def get_name(self):
"""
Return the name of the instruction
:rtype: string
"""
return "sparse-switch-payload"
def show_buff(self, pos):
"""
Return the display of the instruction
:rtype: string
"""
buff = self.get_name() + " "
for i in xrange(0, len(self.keys)):
buff += "%x:%x " % (self.keys[i], self.targets[i])
return buff
def show(self, pos):
"""
Print the instruction
"""
print self.show_buff(pos),
def get_length(self):
return self.format_general_size + (self.size * calcsize('<L')) * 2
def get_raw(self):
return pack("=H", self.ident) + pack("=H", self.size) + ''.join(pack("=l", i) for i in self.keys) + ''.join(pack("=l", i) for i in self.targets)
class PackedSwitch(object):
"""
This class can parse a PackedSwitch instruction
:param buff: a Buff object which represents a buffer where the instruction is stored
"""
def __init__(self, buff):
self.notes = []
self.format_general_size = calcsize("=HHI")
self.ident = unpack("=H", buff[0:2])[0]
self.size = unpack("=H", buff[2:4])[0]
self.first_key = unpack("=i", buff[4:8])[0]
self.targets = []
idx = self.format_general_size
max_size = self.size
if (max_size * 4) > len(buff):
max_size = len(buff) - idx - 8
for i in xrange(0, max_size):
self.targets.append(unpack('=l', buff[idx:idx + 4])[0])
idx += 4
def add_note(self, msg):
"""
Add a note to this instruction
:param msg: the message
:type msg: objects (string)
"""
self.notes.append(msg)
def get_notes(self):
"""
Get all notes from this instruction
:rtype: a list of objects
"""
return self.notes
def get_op_value(self):
"""
Get the value of the opcode
:rtype: int
"""
return self.ident
def get_keys(self):
"""
Return the keys of the instruction
:rtype: a list of long
"""
return [(self.first_key + i) for i in range(0, len(self.targets))]
def get_values(self):
return self.get_keys()
def get_targets(self):
"""
Return the targets (address) of the instruction
:rtype: a list of long
"""
return self.targets
def get_output(self, idx=-1):
"""
Return an additional output of the instruction
:rtype: string
"""
return " ".join("%x" % (self.first_key + i) for i in range(0, len(self.targets)))
def get_operands(self, idx=-1):
"""
Return an additional output of the instruction
:rtype: string
"""
return []
def get_formatted_operands(self):
return None
def get_name(self):
"""
Return the name of the instruction
:rtype: string
"""
return "packed-switch-payload"
def show_buff(self, pos):
"""
Return the display of the instruction
:rtype: string
"""
buff = self.get_name() + " "
buff += "%x:" % self.first_key
for i in self.targets:
buff += " %x" % i
return buff
def show(self, pos):
"""
Print the instruction
"""
print self.show_buff(pos),
def get_length(self):
return self.format_general_size + (self.size * calcsize('=L'))
def get_raw(self):
return pack("=H", self.ident) + pack("=H", self.size) + pack("=i", self.first_key) + ''.join(pack("=l", i) for i in self.targets)
class Instruction35c(Instruction):
"""
This class represents all instructions which have the 35c format
"""
def __init__(self, cm, buff):
super(Instruction35c, self).__init__()
self.cm = cm
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.G = (i16 >> 8) & 0xf
self.A = (i16 >> 12) & 0xf
self.BBBB = unpack("=H", buff[2:4])[0]
i16 = unpack("=H", buff[4:6])[0]
self.C = i16 & 0xf
self.D = (i16 >> 4) & 0xf
self.E = (i16 >> 8) & 0xf
self.F = (i16 >> 12) & 0xf
def get_output(self, idx=-1):
buff = ""
kind = get_kind(self.cm, self.get_kind(), self.BBBB)
if self.A == 0:
buff += "%s" % (kind)
elif self.A == 1:
buff += "v%d, %s" % (self.C, kind)
elif self.A == 2:
buff += "v%d, v%d, %s" % (self.C, self.D, kind)
elif self.A == 3:
buff += "v%d, v%d, v%d, %s" % (self.C, self.D, self.E, kind)
elif self.A == 4:
buff += "v%d, v%d, v%d, v%d, %s" % (self.C, self.D, self.E, self.F, kind)
elif self.A == 5:
buff += "v%d, v%d, v%d, v%d, v%d, %s" % (self.C, self.D, self.E, self.F, self.G, kind)
return buff
def get_operands(self, idx=-1):
l = []
kind = get_kind(self.cm, self.get_kind(), self.BBBB)
if self.A == 0:
l.append((self.get_kind() + OPERAND_KIND, self.BBBB, kind))
elif self.A == 1:
l.extend([(OPERAND_REGISTER, self.C), (self.get_kind() + OPERAND_KIND, self.BBBB, kind)])
elif self.A == 2:
l.extend([(OPERAND_REGISTER, self.C), (OPERAND_REGISTER, self.D), (self.get_kind() + OPERAND_KIND, self.BBBB, kind)])
elif self.A == 3:
l.extend([(OPERAND_REGISTER, self.C), (OPERAND_REGISTER, self.D), (OPERAND_REGISTER, self.E), (self.get_kind() + OPERAND_KIND, self.BBBB, kind)])
elif self.A == 4:
l.extend([(OPERAND_REGISTER, self.C), (OPERAND_REGISTER, self.D), (OPERAND_REGISTER, self.E), (OPERAND_REGISTER, self.F), (self.get_kind() + OPERAND_KIND, self.BBBB, kind)])
elif self.A == 5:
l.extend([(OPERAND_REGISTER, self.C), (OPERAND_REGISTER, self.D), (OPERAND_REGISTER, self.E), (OPERAND_REGISTER, self.F), (OPERAND_REGISTER, self.G), (self.get_kind() + OPERAND_KIND, self.BBBB, kind)])
return l
def get_length(self):
return 6
def get_ref_kind(self):
return self.BBBB
def get_raw(self):
return pack("=HHH", (self.A << 12) | (self.G << 8) | self.OP, self.BBBB, (self.F << 12) | (self.E << 8) | (self.D << 4) | self.C)
class Instruction10x(Instruction):
"""
This class represents all instructions which have the 10x format
"""
def __init__(self, cm, buff):
super(Instruction10x, self).__init__()
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
#log_andro.debug("OP:%x %s" % (self.OP, args[0]))
def get_output(self, idx=-1):
return ""
def get_operands(self, idx=-1):
return []
def get_length(self):
return 2
def get_raw(self):
return pack("=H", self.OP)
class Instruction21h(Instruction):
"""
This class represents all instructions which have the 21h format
"""
def __init__(self, cm, buff):
super(Instruction21h, self).__init__()
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AA = (i16 >> 8) & 0xff
self.BBBB = unpack("=h", buff[2:4])[0]
#log_andro.debug("OP:%x %s AA:%x BBBBB:%x" % (self.OP, args[0], self.AA, self.BBBB))
self.formatted_operands = []
if self.OP == 0x15:
self.formatted_operands.append(unpack('=f', '\x00\x00' + pack('=h', self.BBBB))[0])
elif self.OP == 0x19:
self.formatted_operands.append(unpack('=d', '\x00\x00\x00\x00\x00\x00' + pack('=h', self.BBBB))[0])
def get_length(self):
return 4
def get_output(self, idx=-1):
buff = ""
buff += "v%d, %d" % (self.AA, self.BBBB)
if self.formatted_operands != []:
buff += " # %s" % (str(self.formatted_operands))
return buff
def get_operands(self, idx=-1):
return [(OPERAND_REGISTER, self.AA), (OPERAND_LITERAL, self.BBBB)]
def get_formatted_operands(self):
return self.formatted_operands
def get_literals(self):
return [self.BBBB]
def get_raw(self):
return pack("=Hh", (self.AA << 8) | self.OP, self.BBBB)
class Instruction11n(Instruction):
"""
This class represents all instructions which have the 11n format
"""
def __init__(self, cm, buff):
super(Instruction11n, self).__init__()
i16 = unpack("=h", buff[0:2])[0]
self.OP = i16 & 0xff
self.A = (i16 >> 8) & 0xf
self.B = (i16 >> 12)
#log_andro.debug("OP:%x %s A:%x B:%x" % (self.OP, args[0], self.A, self.B))
def get_output(self, idx=-1):
buff = ""
buff += "v%d, %d" % (self.A, self.B)
return buff
def get_operands(self, idx=-1):
return [(OPERAND_REGISTER, self.A), (OPERAND_LITERAL, self.B)]
def get_literals(self):
return [self.B]
def get_length(self):
return 2
def get_raw(self):
return pack("=h", (self.B << 12) | (self.A << 8) | self.OP)
class Instruction21c(Instruction):
"""
This class represents all instructions which have the 21c format
"""
def __init__(self, cm, buff):
super(Instruction21c, self).__init__()
self.cm = cm
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AA = (i16 >> 8) & 0xff
self.BBBB = unpack("=H", buff[2:4])[0]
#log_andro.debug("OP:%x %s AA:%x BBBBB:%x" % (self.OP, args[0], self.AA, self.BBBB))
def get_length(self):
return 4
def get_output(self, idx=-1):
buff = ""
kind = get_kind(self.cm, self.get_kind(), self.BBBB)
buff += "v%d, %s" % (self.AA, kind)
return buff
def get_operands(self, idx=-1):
kind = get_kind(self.cm, self.get_kind(), self.BBBB)
return [(OPERAND_REGISTER, self.AA), (self.get_kind() + OPERAND_KIND, self.BBBB, kind)]
def get_ref_kind(self):
return self.BBBB
def get_string(self):
return get_kind(self.cm, self.get_kind(), self.BBBB)
def get_raw_string(self):
return get_kind(self.cm, KIND_RAW_STRING, self.BBBB)
def get_raw(self):
return pack("=HH", (self.AA << 8) | self.OP, self.BBBB)
class Instruction21s(Instruction):
"""
This class represents all instructions which have the 21s format
"""
def __init__(self, cm, buff):
super(Instruction21s, self).__init__()
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AA = (i16 >> 8) & 0xff
self.BBBB = unpack("=h", buff[2:4])[0]
self.formatted_operands = []
if self.OP == 0x16:
self.formatted_operands.append(unpack('=d', pack('=d', self.BBBB))[0])
#log_andro.debug("OP:%x %s AA:%x BBBBB:%x" % (self.OP, args[0], self.AA, self.BBBB))
def get_length(self):
return 4
def get_output(self, idx=-1):
buff = ""
buff += "v%d, %d" % (self.AA, self.BBBB)
if self.formatted_operands != []:
buff += " # %s" % str(self.formatted_operands)
return buff
def get_operands(self, idx=-1):
return [(OPERAND_REGISTER, self.AA), (OPERAND_LITERAL, self.BBBB)]
def get_literals(self):
return [self.BBBB]
def get_formatted_operands(self):
return self.formatted_operands
def get_raw(self):
return pack("=Hh", (self.AA << 8) | self.OP, self.BBBB)
class Instruction22c(Instruction):
"""
This class represents all instructions which have the 22c format
"""
def __init__(self, cm, buff):
super(Instruction22c, self).__init__()
self.cm = cm
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.A = (i16 >> 8) & 0xf
self.B = (i16 >> 12) & 0xf
self.CCCC = unpack("=H", buff[2:4])[0]
#log_andro.debug("OP:%x %s A:%x B:%x CCCC:%x" % (self.OP, args[0], self.A, self.B, self.CCCC))
def get_length(self):
return 4
def get_output(self, idx=-1):
buff = ""
kind = get_kind(self.cm, self.get_kind(), self.CCCC)
buff += "v%d, v%d, %s" % (self.A, self.B, kind)
return buff
def get_operands(self, idx=-1):
kind = get_kind(self.cm, self.get_kind(), self.CCCC)
return [(OPERAND_REGISTER, self.A), (OPERAND_REGISTER, self.B), (self.get_kind() + OPERAND_KIND, self.CCCC, kind)]
def get_ref_kind(self):
return self.CCCC
def get_raw(self):
return pack("=HH", (self.B << 12) | (self.A << 8) | (self.OP), self.CCCC)
class Instruction22cs(Instruction):
"""
This class represents all instructions which have the 22cs format
"""
def __init__(self, cm, buff):
super(Instruction22cs, self).__init__()
self.cm = cm
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.A = (i16 >> 8) & 0xf
self.B = (i16 >> 12) & 0xf
self.CCCC = unpack("=H", buff[2:4])[0]
#log_andro.debug("OP:%x %s A:%x B:%x CCCC:%x" % (self.OP, args[0], self.A, self.B, self.CCCC))
def get_length(self):
return 4
def get_output(self, idx=-1):
buff = ""
kind = get_kind(self.cm, self.get_kind(), self.CCCC)
buff += "v%d, v%d, %s" % (self.A, self.B, kind)
return buff
def get_operands(self, idx=-1):
kind = get_kind(self.cm, self.get_kind(), self.CCCC)
return [(OPERAND_REGISTER, self.A), (OPERAND_REGISTER, self.B), (self.get_kind() + OPERAND_KIND, self.CCCC, kind)]
def get_ref_kind(self):
return self.CCCC
def get_raw(self):
return pack("=HH", (self.B << 12) | (self.A << 8) | (self.OP), self.CCCC)
class Instruction31t(Instruction):
"""
This class represents all instructions which have the 31t format
"""
def __init__(self, cm, buff):
super(Instruction31t, self).__init__()
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AA = (i16 >> 8) & 0xff
self.BBBBBBBB = unpack("=i", buff[2:6])[0]
#log_andro.debug("OP:%x %s AA:%x BBBBBBBBB:%x" % (self.OP, args[0], self.AA, self.BBBBBBBB))
def get_length(self):
return 6
def get_output(self, idx=-1):
buff = ""
buff += "v%d, +%x (0x%x)" % (self.AA, self.BBBBBBBB, self.BBBBBBBB * 2 + idx)
return buff
def get_operands(self, idx=-1):
return [(OPERAND_REGISTER, self.AA), (OPERAND_LITERAL, self.BBBBBBBB)]
def get_ref_off(self):
return self.BBBBBBBB
def get_raw(self):
return pack("=Hi", (self.AA << 8) | self.OP, self.BBBBBBBB)
class Instruction31c(Instruction):
"""
This class represents all instructions which have the 31c format
"""
def __init__(self, cm, buff):
super(Instruction31c, self).__init__()
self.cm = cm
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AA = (i16 >> 8) & 0xff
self.BBBBBBBB = unpack("=I", buff[2:6])[0]
#log_andro.debug("OP:%x %s AA:%x BBBBBBBBB:%x" % (self.OP, args[0], self.AA, self.BBBBBBBB))
def get_length(self):
return 6
def get_output(self, idx=-1):
buff = ""
kind = get_kind(self.cm, self.get_kind(), self.BBBBBBBB)
buff += "v%d, %s" % (self.AA, kind)
return buff
def get_operands(self, idx=-1):
kind = get_kind(self.cm, self.get_kind(), self.BBBBBBBB)
return [(OPERAND_REGISTER, self.AA), (self.get_kind() + OPERAND_KIND, self.BBBBBBBB, kind)]
def get_ref_kind(self):
return self.BBBBBBBB
def get_string(self):
"""
Return the string associated to the 'kind' argument
:rtype: string
"""
return get_kind(self.cm, self.get_kind(), self.BBBBBBBB)
def get_raw_string(self):
return get_kind(self.cm, KIND_RAW_STRING, self.BBBBBBBB)
def get_raw(self):
return pack("=HI", (self.AA << 8) | self.OP, self.BBBBBBBB)
class Instruction12x(Instruction):
"""
This class represents all instructions which have the 12x format
"""
def __init__(self, cm, buff):
super(Instruction12x, self).__init__()
i16 = unpack("=h", buff[0:2])[0]
self.OP = i16 & 0xff
self.A = (i16 >> 8) & 0xf
self.B = (i16 >> 12) & 0xf
#log_andro.debug("OP:%x %s A:%x B:%x" % (self.OP, args[0], self.A, self.B))
def get_length(self):
return 2
def get_output(self, idx=-1):
buff = ""
buff += "v%d, v%d" % (self.A, self.B)
return buff
def get_operands(self, idx=-1):
return [(OPERAND_REGISTER, self.A), (OPERAND_REGISTER, self.B)]
def get_raw(self):
return pack("=H", (self.B << 12) | (self.A << 8) | (self.OP))
class Instruction11x(Instruction):
"""
This class represents all instructions which have the 11x format
"""
def __init__(self, cm, buff):
super(Instruction11x, self).__init__()
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AA = (i16 >> 8) & 0xff
#log_andro.debug("OP:%x %s AA:%x" % (self.OP, args[0], self.AA))
def get_length(self):
return 2
def get_output(self, idx=-1):
buff = ""
buff += "v%d" % (self.AA)
return buff
def get_operands(self, idx=-1):
return [(OPERAND_REGISTER, self.AA)]
def get_raw(self):
return pack("=H", (self.AA << 8) | self.OP)
class Instruction51l(Instruction):
"""
This class represents all instructions which have the 51l format
"""
def __init__(self, cm, buff):
super(Instruction51l, self).__init__()
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AA = (i16 >> 8) & 0xff
self.BBBBBBBBBBBBBBBB = unpack("=q", buff[2:10])[0]
self.formatted_operands = []
if self.OP == 0x18:
self.formatted_operands.append(unpack('=d', pack('=q', self.BBBBBBBBBBBBBBBB))[0])
#log_andro.debug("OP:%x %s AA:%x BBBBBBBBBBBBBBBB:%x" % (self.OP, args[0], self.AA, self.BBBBBBBBBBBBBBBB))
def get_length(self):
return 10
def get_output(self, idx=-1):
buff = ""
buff += "v%d, %d" % (self.AA, self.BBBBBBBBBBBBBBBB)
if self.formatted_operands:
buff += " # %s" % str(self.formatted_operands)
return buff
def get_operands(self, idx=-1):
return [(OPERAND_REGISTER, self.AA), (OPERAND_LITERAL, self.BBBBBBBBBBBBBBBB)]
def get_formatted_operands(self):
return self.formatted_operands
def get_literals(self):
return [self.BBBBBBBBBBBBBBBB]
def get_raw(self):
return pack("=Hq", (self.AA << 8) | self.OP, self.BBBBBBBBBBBBBBBB)
class Instruction31i(Instruction):
"""
This class represents all instructions which have the 3li format
"""
def __init__(self, cm, buff):
super(Instruction31i, self).__init__()
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AA = (i16 >> 8) & 0xff
self.BBBBBBBB = unpack("=i", buff[2:6])[0]
self.formatted_operands = []
if self.OP == 0x14:
self.formatted_operands.append(unpack("=f", pack("=i", self.BBBBBBBB))[0])
elif self.OP == 0x17:
self.formatted_operands.append(unpack('=d', pack('=d', self.BBBBBBBB))[0])
#log_andro.debug("OP:%x %s AA:%x BBBBBBBBB:%x" % (self.OP, args[0], self.AA, self.BBBBBBBB))
def get_length(self):
return 6
def get_output(self, idx=-1):
buff = ""
buff += "v%d, %d" % (self.AA, self.BBBBBBBB)
if self.formatted_operands:
buff += " # %s" % str(self.formatted_operands)
return buff
def get_operands(self, idx=-1):
return [(OPERAND_REGISTER, self.AA), (OPERAND_LITERAL, self.BBBBBBBB)]
def get_formatted_operands(self):
return self.formatted_operands
def get_literals(self):
return [self.BBBBBBBB]
def get_raw(self):
return pack("=Hi", (self.AA << 8) | self.OP, self.BBBBBBBB)
class Instruction22x(Instruction):
"""
This class represents all instructions which have the 22x format
"""
def __init__(self, cm, buff):
super(Instruction22x, self).__init__()
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AA = (i16 >> 8) & 0xff
self.BBBB = unpack("=H", buff[2:4])[0]
#log_andro.debug("OP:%x %s AA:%x BBBBB:%x" % (self.OP, args[0], self.AA, self.BBBB))
def get_length(self):
return 4
def get_output(self, idx=-1):
buff = ""
buff += "v%d, v%d" % (self.AA, self.BBBB)
return buff
def get_operands(self, idx=-1):
return [(OPERAND_REGISTER, self.AA), (OPERAND_REGISTER, self.BBBB)]
def get_raw(self):
return pack("=HH", (self.AA << 8) | self.OP, self.BBBB)
class Instruction23x(Instruction):
"""
This class represents all instructions which have the 23x format
"""
def __init__(self, cm, buff):
super(Instruction23x, self).__init__()
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AA = (i16 >> 8) & 0xff
i16 = unpack("=H", buff[2:4])[0]
self.BB = i16 & 0xff
self.CC = (i16 >> 8) & 0xff
#log_andro.debug("OP:%x %s AA:%x BB:%x CC:%x" % (self.OP, args[0], self.AA, self.BB, self.CC))
def get_length(self):
return 4
def get_output(self, idx=-1):
buff = ""
buff += "v%d, v%d, v%d" % (self.AA, self.BB, self.CC)
return buff
def get_operands(self, idx=-1):
return [(OPERAND_REGISTER, self.AA), (OPERAND_REGISTER, self.BB), (OPERAND_REGISTER, self.CC)]
def get_raw(self):
return pack("=HH", (self.AA << 8) | self.OP, (self.CC << 8) | self.BB)
class Instruction20t(Instruction):
"""
This class represents all instructions which have the 20t format
"""
def __init__(self, cm, buff):
super(Instruction20t, self).__init__()
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AAAA = unpack("=h", buff[2:4])[0]
#log_andro.debug("OP:%x %s AAAA:%x" % (self.OP, args[0], self.AAAA))
def get_length(self):
return 4
def get_output(self, idx=-1):
buff = ""
buff += "%+x" % (self.AAAA)
return buff
def get_operands(self, idx=-1):
return [(OPERAND_OFFSET, self.AAAA)]
def get_ref_off(self):
return self.AAAA
def get_raw(self):
return pack("=Hh", self.OP, self.AAAA)
class Instruction21t(Instruction):
"""
This class represents all instructions which have the 21t format
"""
def __init__(self, cm, buff):
super(Instruction21t, self).__init__()
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AA = (i16 >> 8) & 0xff
self.BBBB = unpack("=h", buff[2:4])[0]
#log_andro.debug("OP:%x %s AA:%x BBBBB:%x" % (self.OP, args[0], self.AA, self.BBBB))
def get_length(self):
return 4
def get_output(self, idx=-1):
buff = ""
buff += "v%d, %+x" % (self.AA, self.BBBB)
return buff
def get_operands(self, idx=-1):
return [(OPERAND_REGISTER, self.AA), (OPERAND_OFFSET, self.BBBB)]
def get_ref_off(self):
return self.BBBB
def get_raw(self):
return pack("=Hh", (self.AA << 8) | self.OP, self.BBBB)
class Instruction10t(Instruction):
"""
This class represents all instructions which have the 10t format
"""
def __init__(self, cm, buff):
super(Instruction10t, self).__init__()
self.OP = unpack("=B", buff[0:1])[0]
self.AA = unpack("=b", buff[1:2])[0]
#log_andro.debug("OP:%x %s AA:%x" % (self.OP, args[0], self.AA))
def get_length(self):
return 2
def get_output(self, idx=-1):
buff = ""
buff += "%+x" % (self.AA)
return buff
def get_operands(self, idx=-1):
return [(OPERAND_OFFSET, self.AA)]
def get_ref_off(self):
return self.AA
def get_raw(self):
return pack("=Bb", self.OP, self.AA)
class Instruction22t(Instruction):
"""
This class represents all instructions which have the 22t format
"""
def __init__(self, cm, buff):
super(Instruction22t, self).__init__()
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.A = (i16 >> 8) & 0xf
self.B = (i16 >> 12) & 0xf
self.CCCC = unpack("=h", buff[2:4])[0]
#log_andro.debug("OP:%x %s A:%x B:%x CCCC:%x" % (self.OP, args[0], self.A, self.B, self.CCCC))
def get_length(self):
return 4
def get_output(self, idx=-1):
buff = ""
buff += "v%d, v%d, %+x" % (self.A, self.B, self.CCCC)
return buff
def get_operands(self, idx=-1):
return [(OPERAND_REGISTER, self.A), (OPERAND_REGISTER, self.B), (OPERAND_OFFSET, self.CCCC)]
def get_ref_off(self):
return self.CCCC
def get_raw(self):
return pack("=Hh", (self.B << 12) | (self.A << 8) | self.OP, self.CCCC)
class Instruction22s(Instruction):
"""
This class represents all instructions which have the 22s format
"""
def __init__(self, cm, buff):
super(Instruction22s, self).__init__()
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.A = (i16 >> 8) & 0xf
self.B = (i16 >> 12) & 0xf
self.CCCC = unpack("=h", buff[2:4])[0]
#log_andro.debug("OP:%x %s A:%x B:%x CCCC:%x" % (self.OP, args[0], self.A, self.B, self.CCCC))
def get_length(self):
return 4
def get_output(self, idx=-1):
buff = ""
buff += "v%d, v%d, %d" % (self.A, self.B, self.CCCC)
return buff
def get_operands(self, idx=-1):
return [(OPERAND_REGISTER, self.A), (OPERAND_REGISTER, self.B), (OPERAND_LITERAL, self.CCCC)]
def get_literals(self):
return [self.CCCC]
def get_raw(self):
return pack("=Hh", (self.B << 12) | (self.A << 8) | self.OP, self.CCCC)
class Instruction22b(Instruction):
"""
This class represents all instructions which have the 22b format
"""
def __init__(self, cm, buff):
super(Instruction22b, self).__init__()
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AA = (i16 >> 8) & 0xff
self.BB = unpack("=B", buff[2:3])[0]
self.CC = unpack("=b", buff[3:4])[0]
#log_andro.debug("OP:%x %s AA:%x BB:%x CC:%x" % (self.OP, args[0], self.AA, self.BB, self.CC))
def get_length(self):
return 4
def get_output(self, idx=-1):
buff = ""
buff += "v%d, v%d, %d" % (self.AA, self.BB, self.CC)
return buff
def get_operands(self, idx=-1):
return [(OPERAND_REGISTER, self.AA), (OPERAND_REGISTER, self.BB), (OPERAND_LITERAL, self.CC)]
def get_literals(self):
return [self.CC]
def get_raw(self):
return pack("=Hh", (self.AA << 8) | self.OP, (self.CC << 8) | self.BB)
class Instruction30t(Instruction):
"""
This class represents all instructions which have the 30t format
"""
def __init__(self, cm, buff):
super(Instruction30t, self).__init__()
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AAAAAAAA = unpack("=i", buff[2:6])[0]
#log_andro.debug("OP:%x %s AAAAAAAA:%x" % (self.OP, args[0], self.AAAAAAAA))
def get_length(self):
return 6
def get_output(self, idx=-1):
buff = ""
buff += "%+x" % (self.AAAAAAAA)
return buff
def get_operands(self, idx=-1):
return [(OPERAND_OFFSET, self.AAAAAAAA)]
def get_ref_off(self):
return self.AAAAAAAA
def get_raw(self):
return pack("=Hi", self.OP, self.AAAAAAAA)
class Instruction3rc(Instruction):
"""
This class represents all instructions which have the 3rc format
"""
def __init__(self, cm, buff):
super(Instruction3rc, self).__init__()
self.cm = cm
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AA = (i16 >> 8) & 0xff
self.BBBB = unpack("=H", buff[2:4])[0]
self.CCCC = unpack("=H", buff[4:6])[0]
self.NNNN = self.CCCC + self.AA - 1
#log_andro.debug("OP:%x %s AA:%x BBBB:%x CCCC:%x NNNN:%d" % (self.OP, args[0], self.AA, self.BBBB, self.CCCC, self.NNNN))
def get_length(self):
return 6
def get_output(self, idx=-1):
buff = ""
kind = get_kind(self.cm, self.get_kind(), self.BBBB)
if self.CCCC == self.NNNN:
buff += "v%d, %s" % (self.CCCC, kind)
else:
buff += "v%d ... v%d, %s" % (self.CCCC, self.NNNN, kind)
return buff
def get_operands(self, idx=-1):
kind = get_kind(self.cm, self.get_kind(), self.BBBB)
if self.CCCC == self.NNNN:
return [(OPERAND_REGISTER, self.CCCC), (self.get_kind() + OPERAND_KIND, self.BBBB, kind)]
else:
l = []
for i in range(self.CCCC, self.NNNN):
l.append((OPERAND_REGISTER, i))
l.append((self.get_kind() + OPERAND_KIND, self.BBBB, kind))
return l
def get_ref_kind(self):
return self.BBBB
def get_raw(self):
return pack("=HHH", (self.AA << 8) | self.OP, self.BBBB, self.CCCC)
class Instruction32x(Instruction):
"""
This class represents all instructions which have the 32x format
"""
def __init__(self, cm, buff):
super(Instruction32x, self).__init__()
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AAAA = unpack("=H", buff[2:4])[0]
self.BBBB = unpack("=H", buff[4:6])[0]
#log_andro.debug("OP:%x %s AAAAA:%x BBBBB:%x" % (self.OP, args[0], self.AAAA, self.BBBB))
def get_length(self):
return 6
def get_output(self, idx=-1):
buff = ""
buff += "v%d, v%d" % (self.AAAA, self.BBBB)
return buff
def get_operands(self, idx=-1):
return [(OPERAND_REGISTER, self.AAAA), (OPERAND_REGISTER, self.BBBB)]
def get_raw(self):
return pack("=HHH", self.OP, self.AAAA, self.BBBB)
class Instruction20bc(Instruction):
"""
This class represents all instructions which have the 20bc format
"""
def __init__(self, cm, buff):
super(Instruction20bc, self).__init__()
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AA = (i16 >> 8) & 0xff
self.BBBB = unpack("=H", buff[2:4])[0]
#log_andro.debug("OP:%x %s AA:%x BBBBB:%x" % (self.OP, args[0], self.AA, self.BBBB))
def get_length(self):
return 4
def get_output(self, idx=-1):
buff = ""
buff += "%d, %d" % (self.AA, self.BBBB)
return buff
def get_operands(self, idx=-1):
return [(OPERAND_LITERAL, self.AA), (OPERAND_LITERAL, self.BBBB)]
def get_raw(self):
return pack("=HH", (self.AA << 8) | self.OP, self.BBBB)
class Instruction35mi(Instruction):
"""
This class represents all instructions which have the 35mi format
"""
def __init__(self, cm, buff):
super(Instruction35mi, self).__init__()
self.cm = cm
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.G = (i16 >> 8) & 0xf
self.A = (i16 >> 12) & 0xf
self.BBBB = unpack("=H", buff[2:4])[0]
i16 = unpack("=H", buff[4:6])[0]
self.C = i16 & 0xf
self.D = (i16 >> 4) & 0xf
self.E = (i16 >> 8) & 0xf
self.F = (i16 >> 12) & 0xf
#log_andro.debug("OP:%x %s G:%x A:%x BBBB:%x C:%x D:%x E:%x F:%x" % (self.OP, args[0], self.G, self.A, self.BBBB, self.C, self.D, self.E, self.F))
def get_output(self, idx=-1):
buff = ""
kind = get_kind(self.cm, self.get_kind(), self.BBBB)
if self.A == 1:
buff += "v%d, %s" % (self.C, kind)
elif self.A == 2:
buff += "v%d, v%d, %s" % (self.C, self.D, kind)
elif self.A == 3:
buff += "v%d, v%d, v%d, %s" % (self.C, self.D, self.E, kind)
elif self.A == 4:
buff += "v%d, v%d, v%d, v%d, %s" % (self.C, self.D, self.E, self.F, kind)
elif self.A == 5:
buff += "v%d, v%d, v%d, v%d, v%d, %s" % (self.C, self.D, self.E, self.F, self.G, kind)
return buff
def get_operands(self, idx=-1):
l = []
kind = get_kind(self.cm, self.get_kind(), self.BBBB)
if self.A == 1:
l.extend([(OPERAND_REGISTER, self.C), (self.get_kind() + OPERAND_KIND, self.BBBB, kind)])
elif self.A == 2:
l.extend([(OPERAND_REGISTER, self.C), (OPERAND_REGISTER, self.D), (self.get_kind() + OPERAND_KIND, self.BBBB, kind)])
elif self.A == 3:
l.extend([(OPERAND_REGISTER, self.C), (OPERAND_REGISTER, self.D), (OPERAND_REGISTER, self.E), (self.get_kind() + OPERAND_KIND, self.BBBB, kind)])
elif self.A == 4:
l.extend([(OPERAND_REGISTER, self.C), (OPERAND_REGISTER, self.D), (OPERAND_REGISTER, self.E), (OPERAND_REGISTER, self.F), (self.get_kind() + OPERAND_KIND, self.BBBB, kind)])
elif self.A == 5:
l.extend([(OPERAND_REGISTER, self.C), (OPERAND_REGISTER, self.D), (OPERAND_REGISTER, self.E), (OPERAND_REGISTER, self.F), (OPERAND_REGISTER, self.G), (self.get_kind() + OPERAND_KIND, self.BBBB, kind)])
return l
def get_length(self):
return 6
def get_ref_kind(self):
return self.BBBB
def get_raw(self):
return pack("=HHH", (self.A << 12) | (self.G << 8) | self.OP, self.BBBB, (self.F << 12) | (self.E << 8) | (self.D << 4) | self.C)
class Instruction35ms(Instruction):
"""
This class represents all instructions which have the 35ms format
"""
def __init__(self, cm, buff):
super(Instruction35ms, self).__init__()
self.cm = cm
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.G = (i16 >> 8) & 0xf
self.A = (i16 >> 12) & 0xf
self.BBBB = unpack("=H", buff[2:4])[0]
i16 = unpack("=H", buff[4:6])[0]
self.C = i16 & 0xf
self.D = (i16 >> 4) & 0xf
self.E = (i16 >> 8) & 0xf
self.F = (i16 >> 12) & 0xf
#log_andro.debug("OP:%x %s G:%x A:%x BBBB:%x C:%x D:%x E:%x F:%x" % (self.OP, args[0], self.G, self.A, self.BBBB, self.C, self.D, self.E, self.F))
def get_output(self, idx=-1):
buff = ""
kind = get_kind(self.cm, self.get_kind(), self.BBBB)
if self.A == 1:
buff += "v%d, %s" % (self.C, kind)
elif self.A == 2:
buff += "v%d, v%d, %s" % (self.C, self.D, kind)
elif self.A == 3:
buff += "v%d, v%d, v%d, %s" % (self.C, self.D, self.E, kind)
elif self.A == 4:
buff += "v%d, v%d, v%d, v%d, %s" % (self.C, self.D, self.E, self.F, kind)
elif self.A == 5:
buff += "v%d, v%d, v%d, v%d, v%d, %s" % (self.C, self.D, self.E, self.F, self.G, kind)
return buff
def get_operands(self, idx=-1):
l = []
kind = get_kind(self.cm, self.get_kind(), self.BBBB)
if self.A == 1:
l.extend([(OPERAND_REGISTER, self.C), (self.get_kind() + OPERAND_KIND, self.BBBB, kind)])
elif self.A == 2:
l.extend([(OPERAND_REGISTER, self.C), (OPERAND_REGISTER, self.D), (self.get_kind() + OPERAND_KIND, self.BBBB, kind)])
elif self.A == 3:
l.extend([(OPERAND_REGISTER, self.C), (OPERAND_REGISTER, self.D), (OPERAND_REGISTER, self.E), (self.get_kind() + OPERAND_KIND, self.BBBB, kind)])
elif self.A == 4:
l.extend([(OPERAND_REGISTER, self.C), (OPERAND_REGISTER, self.D), (OPERAND_REGISTER, self.E), (OPERAND_REGISTER, self.F), (self.get_kind() + OPERAND_KIND, self.BBBB, kind)])
elif self.A == 5:
l.extend([(OPERAND_REGISTER, self.C), (OPERAND_REGISTER, self.D), (OPERAND_REGISTER, self.E), (OPERAND_REGISTER, self.F), (OPERAND_REGISTER, self.G), (self.get_kind() + OPERAND_KIND, self.BBBB, kind)])
return l
def get_length(self):
return 6
def get_ref_kind(self):
return self.BBBB
def get_raw(self):
return pack("=HHH", (self.A << 12) | (self.G << 8) | self.OP, self.BBBB, (self.F << 12) | (self.E << 8) | (self.D << 4) | self.C)
class Instruction3rmi(Instruction):
"""
This class represents all instructions which have the 3rmi format
"""
def __init__(self, cm, buff):
super(Instruction3rmi, self).__init__()
self.cm = cm
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AA = (i16 >> 8) & 0xff
self.BBBB = unpack("=H", buff[2:4])[0]
self.CCCC = unpack("=H", buff[4:6])[0]
self.NNNN = self.CCCC + self.AA - 1
#log_andro.debug("OP:%x %s AA:%x BBBB:%x CCCC:%x NNNN:%d" % (self.OP, args[0], self.AA, self.BBBB, self.CCCC, self.NNNN))
def get_length(self):
return 6
def get_output(self, idx=-1):
buff = ""
kind = get_kind(self.cm, self.get_kind(), self.BBBB)
if self.CCCC == self.NNNN:
buff += "v%d, %s" % (self.CCCC, kind)
else:
buff += "v%d ... v%d, %s" % (self.CCCC, self.NNNN, kind)
return buff
def get_operands(self, idx=-1):
kind = get_kind(self.cm, self.get_kind(), self.BBBB)
if self.CCCC == self.NNNN:
return [(OPERAND_REGISTER, self.CCCC), (self.get_kind() + OPERAND_KIND, self.BBBB, kind)]
else:
l = []
for i in range(self.CCCC, self.NNNN):
l.append((OPERAND_REGISTER, i))
l.append((self.get_kind() + OPERAND_KIND, self.BBBB, kind))
return l
def get_ref_kind(self):
return self.BBBB
def get_raw(self):
return pack("=HHH", (self.AA << 8) | self.OP, self.BBBB, self.CCCC)
class Instruction3rms(Instruction):
"""
This class represents all instructions which have the 3rms format
"""
def __init__(self, cm, buff):
super(Instruction3rms, self).__init__()
self.cm = cm
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AA = (i16 >> 8) & 0xff
self.BBBB = unpack("=H", buff[2:4])[0]
self.CCCC = unpack("=H", buff[4:6])[0]
self.NNNN = self.CCCC + self.AA - 1
#log_andro.debug("OP:%x %s AA:%x BBBB:%x CCCC:%x NNNN:%d" % (self.OP, args[0], self.AA, self.BBBB, self.CCCC, self.NNNN))
def get_length(self):
return 6
def get_output(self, idx=-1):
buff = ""
kind = get_kind(self.cm, self.get_kind(), self.BBBB)
if self.CCCC == self.NNNN:
buff += "v%d, %s" % (self.CCCC, kind)
else:
buff += "v%d ... v%d, %s" % (self.CCCC, self.NNNN, kind)
return buff
def get_operands(self, idx=-1):
kind = get_kind(self.cm, self.get_kind(), self.BBBB)
if self.CCCC == self.NNNN:
return [(OPERAND_REGISTER, self.CCCC), (self.get_kind() + OPERAND_KIND, self.BBBB, kind)]
else:
l = []
for i in range(self.CCCC, self.NNNN):
l.append((OPERAND_REGISTER, i))
l.append((self.get_kind() + OPERAND_KIND, self.BBBB, kind))
return l
def get_ref_kind(self):
return self.BBBB
def get_raw(self):
return pack("=HHH", (self.AA << 8) | self.OP, self.BBBB, self.CCCC)
class Instruction41c(Instruction):
"""
This class represents all instructions which have the 41c format
"""
def __init__(self, cm, buff):
super(Instruction41c, self).__init__()
self.cm = cm
self.OP = unpack("=H", buff[0:2])[0]
self.BBBBBBBB = unpack("=I", buff[2:6])[0]
self.AAAA = unpack("=H", buff[6:8])[0]
#log_andro.debug("OP:%x %s AAAAA:%x BBBBB:%x" % (self.OP, args[0], self.AAAA, self.BBBBBBBB))
def get_length(self):
return 8
def get_output(self, idx=-1):
kind = get_kind(self.cm, self.get_kind(), self.BBBBBBBB)
buff = ""
buff += "v%d, %s" % (self.AAAA, kind)
return buff
def get_operands(self, idx=-1):
kind = get_kind(self.cm, self.get_kind(), self.BBBBBBBB)
return [(OPERAND_REGISTER, self.AAAA), (self.get_kind() + OPERAND_KIND, self.BBBBBBBB, kind)]
def get_ref_kind(self):
return self.BBBBBBBB
def get_raw(self):
return pack("=HIH", self.OP, self.BBBBBBBB, self.AAAA)
class Instruction40sc(Instruction):
"""
This class represents all instructions which have the 40sc format
"""
def __init__(self, cm, buff):
super(Instruction40sc, self).__init__()
self.cm = cm
self.OP = unpack("=H", buff[0:2])[0]
self.BBBBBBBB = unpack("=I", buff[2:6])[0]
self.AAAA = unpack("=H", buff[6:8])[0]
#log_andro.debug("OP:%x %s AAAAA:%x BBBBB:%x" % (self.OP, args[0], self.AAAA, self.BBBBBBBB))
def get_length(self):
return 8
def get_output(self, idx=-1):
kind = get_kind(self.cm, self.get_kind(), self.BBBBBBBB)
buff = ""
buff += "%d, %s" % (self.AAAA, kind)
return buff
def get_operands(self, idx=-1):
kind = get_kind(self.cm, self.get_kind(), self.BBBBBBBB)
return [(OPERAND_LITERAL, self.AAAA), (self.get_kind() + OPERAND_KIND, self.BBBBBBBB, kind)]
def get_ref_kind(self):
return self.BBBBBBBB
def get_raw(self):
return pack("=HIH", self.OP, self.BBBBBBBB, self.AAAA)
class Instruction52c(Instruction):
"""
This class represents all instructions which have the 52c format
"""
def __init__(self, cm, buff):
super(Instruction52c, self).__init__()
self.cm = cm
self.OP = unpack("=H", buff[0:2])[0]
self.CCCCCCCC = unpack("=I", buff[2:6])[0]
self.AAAA = unpack("=H", buff[6:8])[0]
self.BBBB = unpack("=H", buff[8:10])[0]
#log_andro.debug("OP:%x %s AAAAA:%x BBBBB:%x" % (self.OP, args[0], self.AAAA, self.BBBB))
def get_length(self):
return 10
def get_output(self, idx=-1):
kind = get_kind(self.cm, self.get_kind(), self.CCCCCCCC)
buff = ""
buff += "v%d, v%d, %s" % (self.AAAA, self.BBBB, kind)
return buff
def get_operands(self, idx=-1):
kind = get_kind(self.cm, self.get_kind(), self.CCCCCCCC)
return [(OPERAND_LITERAL, self.AAAA), (OPERAND_LITERAL, self.BBBB), (self.get_kind() + OPERAND_KIND, self.CCCCCCCC, kind)]
def get_ref_kind(self):
return self.CCCCCCCC
def get_raw(self):
return pack("=HIHH", self.OP, self.CCCCCCCC, self.AAAA, self.BBBB)
class Instruction5rc(Instruction):
"""
This class represents all instructions which have the 5rc format
"""
def __init__(self, cm, buff):
super(Instruction5rc, self).__init__()
self.cm = cm
self.OP = unpack("=H", buff[0:2])[0]
self.BBBBBBBB = unpack("=I", buff[2:6])[0]
self.AAAA = unpack("=H", buff[6:8])[0]
self.CCCC = unpack("=H", buff[8:10])[0]
self.NNNN = self.CCCC + self.AAAA - 1
#log_andro.debug("OP:%x %s AA:%x BBBB:%x CCCC:%x NNNN:%d" % (self.OP, args[0], self.AAAA, self.BBBBBBBB, self.CCCC, self.NNNN))
def get_length(self):
return 10
def get_output(self, idx=-1):
buff = ""
kind = get_kind(self.cm, self.get_kind(), self.BBBBBBBB)
if self.CCCC == self.NNNN:
buff += "v%d, %s" % (self.CCCC, kind)
else:
buff += "v%d ... v%d, %s" % (self.CCCC, self.NNNN, kind)
return buff
def get_operands(self, idx=-1):
kind = get_kind(self.cm, self.get_kind(), self.BBBBBBBB)
if self.CCCC == self.NNNN:
return [(OPERAND_REGISTER, self.CCCC), (self.get_kind() + OPERAND_KIND, self.BBBB, kind)]
else:
l = []
for i in range(self.CCCC, self.NNNN):
l.append((OPERAND_REGISTER, i))
l.append((self.get_kind() + OPERAND_KIND, self.BBBB, kind))
return l
def get_ref_kind(self):
return self.BBBBBBBB
def get_raw(self):
return pack("=HIHH", self.OP, self.BBBBBBBB, self.AAAA, self.CCCC)
DALVIK_OPCODES_FORMAT = {
0x00 : [Instruction10x, [ "nop" ] ],
0x01 : [Instruction12x, [ "move" ] ],
0x02 : [Instruction22x, [ "move/from16" ] ],
0x03 : [Instruction32x, [ "move/16" ] ],
0x04 : [Instruction12x, [ "move-wide" ] ],
0x05 : [Instruction22x, [ "move-wide/from16" ] ],
0x06 : [Instruction32x, [ "move-wide/16" ] ],
0x07 : [Instruction12x, [ "move-object" ] ],
0x08 : [Instruction22x, [ "move-object/from16" ] ],
0x09 : [Instruction32x, [ "move-object/16" ] ],
0x0a : [Instruction11x, [ "move-result" ] ],
0x0b : [Instruction11x, [ "move-result-wide" ] ],
0x0c : [Instruction11x, [ "move-result-object" ] ],
0x0d : [Instruction11x, [ "move-exception" ] ],
0x0e : [Instruction10x, [ "return-void" ] ],
0x0f : [Instruction11x, [ "return" ] ],
0x10 : [Instruction11x, [ "return-wide" ] ],
0x11 : [Instruction11x, [ "return-object" ] ],
0x12 : [Instruction11n, [ "const/4" ] ],
0x13 : [Instruction21s, [ "const/16" ] ],
0x14 : [Instruction31i, [ "const" ] ],
0x15 : [Instruction21h, [ "const/high16" ] ],
0x16 : [Instruction21s, [ "const-wide/16" ] ],
0x17 : [Instruction31i, [ "const-wide/32" ] ],
0x18 : [Instruction51l, [ "const-wide" ] ],
0x19 : [Instruction21h, [ "const-wide/high16" ] ],
0x1a : [Instruction21c, [ "const-string", KIND_STRING ] ],
0x1b : [Instruction31c, [ "const-string/jumbo", KIND_STRING ] ],
0x1c : [Instruction21c, [ "const-class", KIND_TYPE ] ],
0x1d : [Instruction11x, [ "monitor-enter" ] ],
0x1e : [Instruction11x, [ "monitor-exit" ] ],
0x1f : [Instruction21c, [ "check-cast", KIND_TYPE ] ],
0x20 : [Instruction22c, [ "instance-of", KIND_TYPE ] ],
0x21 : [Instruction12x, [ "array-length", KIND_TYPE ] ],
0x22 : [Instruction21c, [ "new-instance", KIND_TYPE ] ],
0x23 : [Instruction22c, [ "new-array", KIND_TYPE ] ],
0x24 : [Instruction35c, [ "filled-new-array", KIND_TYPE ] ],
0x25 : [Instruction3rc, [ "filled-new-array/range", KIND_TYPE ] ],
0x26 : [Instruction31t, [ "fill-array-data" ] ],
0x27 : [Instruction11x, [ "throw" ] ],
0x28 : [Instruction10t, [ "goto" ] ],
0x29 : [Instruction20t, [ "goto/16" ] ],
0x2a : [Instruction30t, [ "goto/32" ] ],
0x2b : [Instruction31t, [ "packed-switch" ] ],
0x2c : [Instruction31t, [ "sparse-switch" ] ],
0x2d : [Instruction23x, [ "cmpl-float" ] ],
0x2e : [Instruction23x, [ "cmpg-float" ] ],
0x2f : [Instruction23x, [ "cmpl-double" ] ],
0x30 : [Instruction23x, [ "cmpg-double" ] ],
0x31 : [Instruction23x, [ "cmp-long" ] ],
0x32 : [Instruction22t, [ "if-eq" ] ],
0x33 : [Instruction22t, [ "if-ne" ] ],
0x34 : [Instruction22t, [ "if-lt" ] ],
0x35 : [Instruction22t, [ "if-ge" ] ],
0x36 : [Instruction22t, [ "if-gt" ] ],
0x37 : [Instruction22t, [ "if-le" ] ],
0x38 : [Instruction21t, [ "if-eqz" ] ],
0x39 : [Instruction21t, [ "if-nez" ] ],
0x3a : [Instruction21t, [ "if-ltz" ] ],
0x3b : [Instruction21t, [ "if-gez" ] ],
0x3c : [Instruction21t, [ "if-gtz" ] ],
0x3d : [Instruction21t, [ "if-lez" ] ],
#unused
0x3e : [Instruction10x, [ "nop" ] ],
0x3f : [Instruction10x, [ "nop" ] ],
0x40 : [Instruction10x, [ "nop" ] ],
0x41 : [Instruction10x, [ "nop" ] ],
0x42 : [Instruction10x, [ "nop" ] ],
0x43 : [Instruction10x, [ "nop" ] ],
0x44 : [Instruction23x, [ "aget" ] ],
0x45 : [Instruction23x, [ "aget-wide" ] ],
0x46 : [Instruction23x, [ "aget-object" ] ],
0x47 : [Instruction23x, [ "aget-boolean" ] ],
0x48 : [Instruction23x, [ "aget-byte" ] ],
0x49 : [Instruction23x, [ "aget-char" ] ],
0x4a : [Instruction23x, [ "aget-short" ] ],
0x4b : [Instruction23x, [ "aput" ] ],
0x4c : [Instruction23x, [ "aput-wide" ] ],
0x4d : [Instruction23x, [ "aput-object" ] ],
0x4e : [Instruction23x, [ "aput-boolean" ] ],
0x4f : [Instruction23x, [ "aput-byte" ] ],
0x50 : [Instruction23x, [ "aput-char" ] ],
0x51 : [Instruction23x, [ "aput-short" ] ],
0x52 : [Instruction22c, [ "iget", KIND_FIELD ] ],
0x53 : [Instruction22c, [ "iget-wide", KIND_FIELD ] ],
0x54 : [Instruction22c, [ "iget-object", KIND_FIELD ] ],
0x55 : [Instruction22c, [ "iget-boolean", KIND_FIELD ] ],
0x56 : [Instruction22c, [ "iget-byte", KIND_FIELD ] ],
0x57 : [Instruction22c, [ "iget-char", KIND_FIELD ] ],
0x58 : [Instruction22c, [ "iget-short", KIND_FIELD ] ],
0x59 : [Instruction22c, [ "iput", KIND_FIELD ] ],
0x5a : [Instruction22c, [ "iput-wide", KIND_FIELD ] ],
0x5b : [Instruction22c, [ "iput-object", KIND_FIELD ] ],
0x5c : [Instruction22c, [ "iput-boolean", KIND_FIELD ] ],
0x5d : [Instruction22c, [ "iput-byte", KIND_FIELD ] ],
0x5e : [Instruction22c, [ "iput-char", KIND_FIELD ] ],
0x5f : [Instruction22c, [ "iput-short", KIND_FIELD ] ],
0x60 : [Instruction21c, [ "sget", KIND_FIELD ] ],
0x61 : [Instruction21c, [ "sget-wide", KIND_FIELD ] ],
0x62 : [Instruction21c, [ "sget-object", KIND_FIELD ] ],
0x63 : [Instruction21c, [ "sget-boolean", KIND_FIELD ] ],
0x64 : [Instruction21c, [ "sget-byte", KIND_FIELD ] ],
0x65 : [Instruction21c, [ "sget-char", KIND_FIELD ] ],
0x66 : [Instruction21c, [ "sget-short", KIND_FIELD ] ],
0x67 : [Instruction21c, [ "sput", KIND_FIELD ] ],
0x68 : [Instruction21c, [ "sput-wide", KIND_FIELD ] ],
0x69 : [Instruction21c, [ "sput-object", KIND_FIELD ] ],
0x6a : [Instruction21c, [ "sput-boolean", KIND_FIELD ] ],
0x6b : [Instruction21c, [ "sput-byte", KIND_FIELD ] ],
0x6c : [Instruction21c, [ "sput-char", KIND_FIELD ] ],
0x6d : [Instruction21c, [ "sput-short", KIND_FIELD ] ],
0x6e : [Instruction35c, [ "invoke-virtual", KIND_METH ] ],
0x6f : [Instruction35c, [ "invoke-super", KIND_METH ] ],
0x70 : [Instruction35c, [ "invoke-direct", KIND_METH ] ],
0x71 : [Instruction35c, [ "invoke-static", KIND_METH ] ],
0x72 : [Instruction35c, [ "invoke-interface", KIND_METH ] ],
# unused
0x73 : [Instruction10x, [ "nop" ] ],
0x74 : [Instruction3rc, [ "invoke-virtual/range", KIND_METH ] ],
0x75 : [Instruction3rc, [ "invoke-super/range", KIND_METH ] ],
0x76 : [Instruction3rc, [ "invoke-direct/range", KIND_METH ] ],
0x77 : [Instruction3rc, [ "invoke-static/range", KIND_METH ] ],
0x78 : [Instruction3rc, [ "invoke-interface/range", KIND_METH ] ],
# unused
0x79 : [Instruction10x, [ "nop" ] ],
0x7a : [Instruction10x, [ "nop" ] ],
0x7b : [Instruction12x, [ "neg-int" ] ],
0x7c : [Instruction12x, [ "not-int" ] ],
0x7d : [Instruction12x, [ "neg-long" ] ],
0x7e : [Instruction12x, [ "not-long" ] ],
0x7f : [Instruction12x, [ "neg-float" ] ],
0x80 : [Instruction12x, [ "neg-double" ] ],
0x81 : [Instruction12x, [ "int-to-long" ] ],
0x82 : [Instruction12x, [ "int-to-float" ] ],
0x83 : [Instruction12x, [ "int-to-double" ] ],
0x84 : [Instruction12x, [ "long-to-int" ] ],
0x85 : [Instruction12x, [ "long-to-float" ] ],
0x86 : [Instruction12x, [ "long-to-double" ] ],
0x87 : [Instruction12x, [ "float-to-int" ] ],
0x88 : [Instruction12x, [ "float-to-long" ] ],
0x89 : [Instruction12x, [ "float-to-double" ] ],
0x8a : [Instruction12x, [ "double-to-int" ] ],
0x8b : [Instruction12x, [ "double-to-long" ] ],
0x8c : [Instruction12x, [ "double-to-float" ] ],
0x8d : [Instruction12x, [ "int-to-byte" ] ],
0x8e : [Instruction12x, [ "int-to-char" ] ],
0x8f : [Instruction12x, [ "int-to-short" ] ],
0x90 : [Instruction23x, [ "add-int" ] ],
0x91 : [Instruction23x, [ "sub-int" ] ],
0x92 : [Instruction23x, [ "mul-int" ] ],
0x93 : [Instruction23x, [ "div-int" ] ],
0x94 : [Instruction23x, [ "rem-int" ] ],
0x95 : [Instruction23x, [ "and-int" ] ],
0x96 : [Instruction23x, [ "or-int" ] ],
0x97 : [Instruction23x, [ "xor-int" ] ],
0x98 : [Instruction23x, [ "shl-int" ] ],
0x99 : [Instruction23x, [ "shr-int" ] ],
0x9a : [Instruction23x, [ "ushr-int" ] ],
0x9b : [Instruction23x, [ "add-long" ] ],
0x9c : [Instruction23x, [ "sub-long" ] ],
0x9d : [Instruction23x, [ "mul-long" ] ],
0x9e : [Instruction23x, [ "div-long" ] ],
0x9f : [Instruction23x, [ "rem-long" ] ],
0xa0 : [Instruction23x, [ "and-long" ] ],
0xa1 : [Instruction23x, [ "or-long" ] ],
0xa2 : [Instruction23x, [ "xor-long" ] ],
0xa3 : [Instruction23x, [ "shl-long" ] ],
0xa4 : [Instruction23x, [ "shr-long" ] ],
0xa5 : [Instruction23x, [ "ushr-long" ] ],
0xa6 : [Instruction23x, [ "add-float" ] ],
0xa7 : [Instruction23x, [ "sub-float" ] ],
0xa8 : [Instruction23x, [ "mul-float" ] ],
0xa9 : [Instruction23x, [ "div-float" ] ],
0xaa : [Instruction23x, [ "rem-float" ] ],
0xab : [Instruction23x, [ "add-double" ] ],
0xac : [Instruction23x, [ "sub-double" ] ],
0xad : [Instruction23x, [ "mul-double" ] ],
0xae : [Instruction23x, [ "div-double" ] ],
0xaf : [Instruction23x, [ "rem-double" ] ],
0xb0 : [Instruction12x, [ "add-int/2addr" ] ],
0xb1 : [Instruction12x, [ "sub-int/2addr" ] ],
0xb2 : [Instruction12x, [ "mul-int/2addr" ] ],
0xb3 : [Instruction12x, [ "div-int/2addr" ] ],
0xb4 : [Instruction12x, [ "rem-int/2addr" ] ],
0xb5 : [Instruction12x, [ "and-int/2addr" ] ],
0xb6 : [Instruction12x, [ "or-int/2addr" ] ],
0xb7 : [Instruction12x, [ "xor-int/2addr" ] ],
0xb8 : [Instruction12x, [ "shl-int/2addr" ] ],
0xb9 : [Instruction12x, [ "shr-int/2addr" ] ],
0xba : [Instruction12x, [ "ushr-int/2addr" ] ],
0xbb : [Instruction12x, [ "add-long/2addr" ] ],
0xbc : [Instruction12x, [ "sub-long/2addr" ] ],
0xbd : [Instruction12x, [ "mul-long/2addr" ] ],
0xbe : [Instruction12x, [ "div-long/2addr" ] ],
0xbf : [Instruction12x, [ "rem-long/2addr" ] ],
0xc0 : [Instruction12x, [ "and-long/2addr" ] ],
0xc1 : [Instruction12x, [ "or-long/2addr" ] ],
0xc2 : [Instruction12x, [ "xor-long/2addr" ] ],
0xc3 : [Instruction12x, [ "shl-long/2addr" ] ],
0xc4 : [Instruction12x, [ "shr-long/2addr" ] ],
0xc5 : [Instruction12x, [ "ushr-long/2addr" ] ],
0xc6 : [Instruction12x, [ "add-float/2addr" ] ],
0xc7 : [Instruction12x, [ "sub-float/2addr" ] ],
0xc8 : [Instruction12x, [ "mul-float/2addr" ] ],
0xc9 : [Instruction12x, [ "div-float/2addr" ] ],
0xca : [Instruction12x, [ "rem-float/2addr" ] ],
0xcb : [Instruction12x, [ "add-double/2addr" ] ],
0xcc : [Instruction12x, [ "sub-double/2addr" ] ],
0xcd : [Instruction12x, [ "mul-double/2addr" ] ],
0xce : [Instruction12x, [ "div-double/2addr" ] ],
0xcf : [Instruction12x, [ "rem-double/2addr" ] ],
0xd0 : [Instruction22s, [ "add-int/lit16" ] ],
0xd1 : [Instruction22s, [ "rsub-int" ] ],
0xd2 : [Instruction22s, [ "mul-int/lit16" ] ],
0xd3 : [Instruction22s, [ "div-int/lit16" ] ],
0xd4 : [Instruction22s, [ "rem-int/lit16" ] ],
0xd5 : [Instruction22s, [ "and-int/lit16" ] ],
0xd6 : [Instruction22s, [ "or-int/lit16" ] ],
0xd7 : [Instruction22s, [ "xor-int/lit16" ] ],
0xd8 : [Instruction22b, [ "add-int/lit8" ] ],
0xd9 : [Instruction22b, [ "rsub-int/lit8" ] ],
0xda : [Instruction22b, [ "mul-int/lit8" ] ],
0xdb : [Instruction22b, [ "div-int/lit8" ] ],
0xdc : [Instruction22b, [ "rem-int/lit8" ] ],
0xdd : [Instruction22b, [ "and-int/lit8" ] ],
0xde : [Instruction22b, [ "or-int/lit8" ] ],
0xdf : [Instruction22b, [ "xor-int/lit8" ] ],
0xe0 : [Instruction22b, [ "shl-int/lit8" ] ],
0xe1 : [Instruction22b, [ "shr-int/lit8" ] ],
0xe2 : [Instruction22b, [ "ushr-int/lit8" ] ],
# expanded opcodes
0xe3 : [Instruction22c, [ "iget-volatile", KIND_FIELD ] ],
0xe4 : [Instruction22c, [ "iput-volatile", KIND_FIELD ] ],
0xe5 : [Instruction21c, [ "sget-volatile", KIND_FIELD ] ],
0xe6 : [Instruction21c, [ "sput-volatile", KIND_FIELD ] ],
0xe7 : [Instruction22c, [ "iget-object-volatile", KIND_FIELD ] ],
0xe8 : [Instruction22c, [ "iget-wide-volatile", KIND_FIELD ] ],
0xe9 : [Instruction22c, [ "iput-wide-volatile", KIND_FIELD ] ],
0xea : [Instruction21c, [ "sget-wide-volatile", KIND_FIELD ] ],
0xeb : [Instruction21c, [ "sput-wide-volatile", KIND_FIELD ] ],
0xec : [Instruction10x, [ "breakpoint" ] ],
0xed : [Instruction20bc, [ "throw-verification-error", VARIES ] ],
0xee : [Instruction35mi, [ "execute-inline", INLINE_METHOD ] ],
0xef : [Instruction3rmi, [ "execute-inline/range", INLINE_METHOD ] ],
0xf0 : [Instruction35c, [ "invoke-object-init/range", KIND_METH ] ],
0xf1 : [Instruction10x, [ "return-void-barrier" ] ],
0xf2 : [Instruction22cs, [ "iget-quick", FIELD_OFFSET ] ],
0xf3 : [Instruction22cs, [ "iget-wide-quick", FIELD_OFFSET ] ],
0xf4 : [Instruction22cs, [ "iget-object-quick", FIELD_OFFSET ] ],
0xf5 : [Instruction22cs, [ "iput-quick", FIELD_OFFSET ] ],
0xf6 : [Instruction22cs, [ "iput-wide-quick", FIELD_OFFSET ] ],
0xf7 : [Instruction22cs, [ "iput-object-quick", FIELD_OFFSET ] ],
0xf8 : [Instruction35ms, [ "invoke-virtual-quick", VTABLE_OFFSET ] ],
0xf9 : [Instruction3rms, [ "invoke-virtual-quick/range", VTABLE_OFFSET ] ],
0xfa : [Instruction35ms, [ "invoke-super-quick", VTABLE_OFFSET ] ],
0xfb : [Instruction3rms, [ "invoke-super-quick/range", VTABLE_OFFSET ] ],
0xfc : [Instruction22c, [ "iput-object-volatile", KIND_FIELD ] ],
0xfd : [Instruction21c, [ "sget-object-volatile", KIND_FIELD ] ],
0xfe : [Instruction21c, [ "sput-object-volatile", KIND_FIELD ] ],
}
DALVIK_OPCODES_PAYLOAD = {
0x0100 : [PackedSwitch],
0x0200 : [SparseSwitch],
0x0300 : [FillArrayData],
}
INLINE_METHODS = [
[ "Lorg/apache/harmony/dalvik/NativeTestTarget;", "emptyInlineMethod", "()V" ],
[ "Ljava/lang/String;", "charAt", "(I)C" ],
[ "Ljava/lang/String;", "compareTo", "(Ljava/lang/String;)I" ],
[ "Ljava/lang/String;", "equals", "(Ljava/lang/Object;)Z" ],
[ "Ljava/lang/String;", "fastIndexOf", "(II)I" ],
[ "Ljava/lang/String;", "isEmpty", "()Z" ],
[ "Ljava/lang/String;", "length", "()I" ],
[ "Ljava/lang/Math;", "abs", "(I)I" ],
[ "Ljava/lang/Math;", "abs", "(J)J" ],
[ "Ljava/lang/Math;", "abs", "(F)F" ],
[ "Ljava/lang/Math;", "abs", "(D)D" ],
[ "Ljava/lang/Math;", "min", "(II)I" ],
[ "Ljava/lang/Math;", "max", "(II)I" ],
[ "Ljava/lang/Math;", "sqrt", "(D)D" ],
[ "Ljava/lang/Math;", "cos", "(D)D" ],
[ "Ljava/lang/Math;", "sin", "(D)D" ],
[ "Ljava/lang/Float;", "floatToIntBits", "(F)I" ],
[ "Ljava/lang/Float;", "floatToRawIntBits", "(F)I" ],
[ "Ljava/lang/Float;", "intBitsToFloat", "(I)F" ],
[ "Ljava/lang/Double;", "doubleToLongBits", "(D)J" ],
[ "Ljava/lang/Double;", "doubleToRawLongBits", "(D)J" ],
[ "Ljava/lang/Double;", "longBitsToDouble", "(J)D" ],
]
DALVIK_OPCODES_EXTENDED_WIDTH = {
0x00ff: [ Instruction41c, ["const-class/jumbo", KIND_TYPE ] ],
0x01ff: [ Instruction41c, ["check-cast/jumbo", KIND_TYPE ] ],
0x02ff: [ Instruction52c, ["instance-of/jumbo", KIND_TYPE ] ],
0x03ff: [ Instruction41c, ["new-instance/jumbo", KIND_TYPE ] ],
0x04ff: [ Instruction52c, ["new-array/jumbo", KIND_TYPE ] ],
0x05ff: [ Instruction5rc, ["filled-new-array/jumbo", KIND_TYPE ] ],
0x06ff: [ Instruction52c, ["iget/jumbo", KIND_FIELD ] ],
0x07ff: [ Instruction52c, ["iget-wide/jumbo", KIND_FIELD ] ],
0x08ff: [ Instruction52c, ["iget-object/jumbo", KIND_FIELD ] ],
0x09ff: [ Instruction52c, ["iget-boolean/jumbo", KIND_FIELD ] ],
0x0aff: [ Instruction52c, ["iget-byte/jumbo", KIND_FIELD ] ],
0x0bff: [ Instruction52c, ["iget-char/jumbo", KIND_FIELD ] ],
0x0cff: [ Instruction52c, ["iget-short/jumbo", KIND_FIELD ] ],
0x0dff: [ Instruction52c, ["iput/jumbo", KIND_FIELD ] ],
0x0eff: [ Instruction52c, ["iput-wide/jumbo", KIND_FIELD ] ],
0x0fff: [ Instruction52c, ["iput-object/jumbo", KIND_FIELD ] ],
0x10ff: [ Instruction52c, ["iput-boolean/jumbo", KIND_FIELD ] ],
0x11ff: [ Instruction52c, ["iput-byte/jumbo", KIND_FIELD ] ],
0x12ff: [ Instruction52c, ["iput-char/jumbo", KIND_FIELD ] ],
0x13ff: [ Instruction52c, ["iput-short/jumbo", KIND_FIELD ] ],
0x14ff: [ Instruction41c, ["sget/jumbo", KIND_FIELD ] ],
0x15ff: [ Instruction41c, ["sget-wide/jumbo", KIND_FIELD ] ],
0x16ff: [ Instruction41c, ["sget-object/jumbo", KIND_FIELD ] ],
0x17ff: [ Instruction41c, ["sget-boolean/jumbo", KIND_FIELD ] ],
0x18ff: [ Instruction41c, ["sget-byte/jumbo", KIND_FIELD ] ],
0x19ff: [ Instruction41c, ["sget-char/jumbo", KIND_FIELD ] ],
0x1aff: [ Instruction41c, ["sget-short/jumbo", KIND_FIELD ] ],
0x1bff: [ Instruction41c, ["sput/jumbo", KIND_FIELD ] ],
0x1cff: [ Instruction41c, ["sput-wide/jumbo", KIND_FIELD ] ],
0x1dff: [ Instruction41c, ["sput-object/jumbo", KIND_FIELD ] ],
0x1eff: [ Instruction41c, ["sput-boolean/jumbo", KIND_FIELD ] ],
0x1fff: [ Instruction41c, ["sput-byte/jumbo", KIND_FIELD ] ],
0x20ff: [ Instruction41c, ["sput-char/jumbo", KIND_FIELD ] ],
0x21ff: [ Instruction41c, ["sput-short/jumbo", KIND_FIELD ] ],
0x22ff: [ Instruction5rc, ["invoke-virtual/jumbo", KIND_METH ] ],
0x23ff: [ Instruction5rc, ["invoke-super/jumbo", KIND_METH ] ],
0x24ff: [ Instruction5rc, ["invoke-direct/jumbo", KIND_METH ] ],
0x25ff: [ Instruction5rc, ["invoke-static/jumbo", KIND_METH ] ],
0x26ff: [ Instruction5rc, ["invoke-interface/jumbo", KIND_METH ] ],
}
DALVIK_OPCODES_OPTIMIZED = {
0xf2ff : [ Instruction5rc, ["invoke-object-init/jumbo", KIND_METH ] ],
0xf3ff : [ Instruction52c, ["iget-volatile/jumbo", KIND_FIELD ] ],
0xf4ff : [ Instruction52c, ["iget-wide-volatile/jumbo", KIND_FIELD ] ],
0xf5ff : [ Instruction52c, ["iget-object-volatile/jumbo ", KIND_FIELD ] ],
0xf6ff : [ Instruction52c, ["iput-volatile/jumbo", KIND_FIELD ] ],
0xf7ff : [ Instruction52c, ["iput-wide-volatile/jumbo", KIND_FIELD ] ],
0xf8ff : [ Instruction52c, ["iput-object-volatile/jumbo", KIND_FIELD ] ],
0xf9ff : [ Instruction41c, ["sget-volatile/jumbo", KIND_FIELD ] ],
0xfaff : [ Instruction41c, ["sget-wide-volatile/jumbo", KIND_FIELD ] ],
0xfbff : [ Instruction41c, ["sget-object-volatile/jumbo", KIND_FIELD ] ],
0xfcff : [ Instruction41c, ["sput-volatile/jumbo", KIND_FIELD ] ],
0xfdff : [ Instruction41c, ["sput-wide-volatile/jumbo", KIND_FIELD ] ],
0xfeff : [ Instruction41c, ["sput-object-volatile/jumbo", KIND_FIELD ] ],
0xffff : [ Instruction40sc, ["throw-verification-error/jumbo", VARIES ] ],
}
class Unresolved(Instruction):
def __init__(self, cm, data):
self.cm = cm
self.data = data
def get_name(self):
return "unresolved"
def get_operands(self, idx=-1):
return [(OPERAND_KIND + KIND_STRING, -1, "AG:OP: invalid opcode " + repr(self.data))]
def get_op_value(self):
return -1
def get_output(self, idx=-1):
return repr(self.data)
def get_length(self):
return len(self.data)
def get_raw(self):
return self.data
def get_instruction(cm, op_value, buff, odex=False):
try:
if not odex and (op_value >= 0xe3 and op_value <= 0xfe):
return InstructionInvalid(cm, buff)
try:
return DALVIK_OPCODES_FORMAT[op_value][0](cm, buff)
except KeyError:
return InstructionInvalid(cm, buff)
except:
return Unresolved(cm, buff)
def get_extented_instruction(cm, op_value, buff):
return DALVIK_OPCODES_EXTENDED_WIDTH[op_value][0]( cm, buff )
def get_optimized_instruction(cm, op_value, buff):
return DALVIK_OPCODES_OPTIMIZED[op_value][0]( cm, buff )
def get_instruction_payload(op_value, buff):
return DALVIK_OPCODES_PAYLOAD[op_value][0]( buff )
class LinearSweepAlgorithm(object):
"""
This class is used to disassemble a method. The algorithm used by this class is linear sweep.
"""
def get_instructions(self, cm, size, insn, idx):
"""
:param cm: a ClassManager object
:type cm: :class:`ClassManager` object
:param size: the total size of the buffer
:type size: int
:param insn: a raw buffer where are the instructions
:type insn: string
:param idx: a start address in the buffer
:type idx: int
:rtype: a generator of :class:`Instruction` objects
"""
self.odex = cm.get_odex_format()
max_idx = size * calcsize('=H')
if max_idx > len(insn):
max_idx = len(insn)
# Get instructions
while idx < max_idx:
obj = None
classic_instruction = True
op_value = unpack('=B', insn[idx])[0]
#print "%x %x" % (op_value, idx)
#payload instructions or extented/optimized instructions
if (op_value == 0x00 or op_value == 0xff) and ((idx + 2) < max_idx):
op_value = unpack('=H', insn[idx:idx + 2])[0]
# payload instructions ?
if op_value in DALVIK_OPCODES_PAYLOAD:
try:
obj = get_instruction_payload(op_value, insn[idx:])
classic_instruction = False
except struct.error:
warning("error while decoding instruction ...")
elif op_value in DALVIK_OPCODES_EXTENDED_WIDTH:
try:
obj = get_extented_instruction(cm, op_value, insn[idx:])
classic_instruction = False
except struct.error, why:
warning("error while decoding instruction ..." + why.__str__())
# optimized instructions ?
elif self.odex and (op_value in DALVIK_OPCODES_OPTIMIZED):
obj = get_optimized_instruction(cm, op_value, insn[idx:])
classic_instruction = False
# classical instructions
if classic_instruction:
op_value = unpack('=B', insn[idx])[0]
obj = get_instruction(cm, op_value, insn[idx:], self.odex)
# emit instruction
yield obj
idx = idx + obj.get_length()
class DCode(object):
"""
This class represents the instructions of a method
:param class_manager: the ClassManager
:type class_manager: :class:`ClassManager` object
:param offset: the offset of the buffer
:type offset: int
:param size: the total size of the buffer
:type size: int
:param buff: a raw buffer where are the instructions
:type buff: string
"""
def __init__(self, class_manager, offset, size, buff):
self.CM = class_manager
self.insn = buff
self.offset = offset
self.size = size
self.notes = {}
self.cached_instructions = []
self.rcache = 0
self.idx = 0
def get_insn(self):
"""
Get the insn buffer
:rtype: string
"""
return self.insn
def set_insn(self, insn):
"""
Set a new raw buffer to disassemble
:param insn: the buffer
:type insn: string
"""
self.insn = insn
self.size = len(self.insn)
def set_idx(self, idx):
"""
Set the start address of the buffer
:param idx: the index
:type idx: int
"""
self.idx = idx
def set_instructions(self, instructions):
"""
Set the instructions
:param instructions: the list of instructions
:type instructions: a list of :class:`Instruction`
"""
self.cached_instructions = instructions
def get_instructions(self):
"""
Get the instructions
:rtype: a generator of each :class:`Instruction` (or a cached list of instructions if you have setup instructions)
"""
# it is possible to a cache for instructions (avoid a new disasm)
if self.cached_instructions:
for i in self.cached_instructions:
yield i
else:
if self.rcache >= 5:
lsa = LinearSweepAlgorithm()
for i in lsa.get_instructions(self.CM, self.size, self.insn, self.idx):
self.cached_instructions.append(i)
for i in self.cached_instructions:
yield i
else:
self.rcache += 1
if self.size >= 1000:
self.rcache = 5
lsa = LinearSweepAlgorithm()
for i in lsa.get_instructions(self.CM, self.size, self.insn, self.idx):
yield i
def reload(self):
pass
def add_inote(self, msg, idx, off=None):
"""
Add a message to a specific instruction by using (default) the index of the address if specified
:param msg: the message
:type msg: string
:param idx: index of the instruction (the position in the list of the instruction)
:type idx: int
:param off: address of the instruction
:type off: int
"""
if off != None:
idx = self.off_to_pos(off)
if idx not in self.notes:
self.notes[idx] = []
self.notes[idx].append(msg)
def get_instruction(self, idx, off=None):
"""
Get a particular instruction by using (default) the index of the address if specified
:param idx: index of the instruction (the position in the list of the instruction)
:type idx: int
:param off: address of the instruction
:type off: int
:rtype: an :class:`Instruction` object
"""
if off != None:
idx = self.off_to_pos(off)
return [i for i in self.get_instructions()][idx]
def off_to_pos(self, off):
"""
Get the position of an instruction by using the address
:param off: address of the instruction
:type off: int
:rtype: int
"""
idx = 0
nb = 0
for i in self.get_instructions():
if idx == off:
return nb
nb += 1
idx += i.get_length()
return -1
def get_ins_off(self, off):
"""
Get a particular instruction by using the address
:param off: address of the instruction
:type off: int
:rtype: an :class:`Instruction` object
"""
idx = 0
for i in self.get_instructions():
if idx == off:
return i
idx += i.get_length()
return None
def show(self):
"""
Display this object
"""
nb = 0
idx = 0
for i in self.get_instructions():
print "%-8d(%08x)" % (nb, idx),
i.show(nb)
print
idx += i.get_length()
nb += 1
def pretty_show(self, m_a):
"""
Display (with a pretty print) this object
:param m_a: :class:`MethodAnalysis` object
"""
bytecode.PrettyShow(m_a, m_a.basic_blocks.gets(), self.notes)
bytecode.PrettyShowEx(m_a.exceptions.gets())
def get_raw(self):
"""
Return the raw buffer of this object
:rtype: string
"""
return ''.join(i.get_raw() for i in self.get_instructions())
def get_length(self):
"""
Return the length of this object
:rtype: int
"""
return len(self.get_raw())
class TryItem(object):
"""
This class represents the try_item format
:param buff: a raw buffer where are the try_item format
:type buff: string
:param cm: the ClassManager
:type cm: :class:`ClassManager` object
"""
def __init__(self, buff, cm):
self.offset = buff.get_idx()
self.__CM = cm
self.start_addr = unpack("=I", buff.read(4))[0]
self.insn_count = unpack("=H", buff.read(2))[0]
self.handler_off = unpack("=H", buff.read(2))[0]
def set_off(self, off):
self.offset = off
def get_off(self):
return self.offset
def get_start_addr(self):
"""
Get the start address of the block of code covered by this entry. The address is a count of 16-bit code units to the start of the first covered instruction.
:rtype: int
"""
return self.start_addr
def get_insn_count(self):
"""
Get the number of 16-bit code units covered by this entry
:rtype: int
"""
return self.insn_count
def get_handler_off(self):
"""
Get the offset in bytes from the start of the associated :class:`EncodedCatchHandlerList` to the :class:`EncodedCatchHandler` for this entry.
:rtype: int
"""
return self.handler_off
def get_raw(self):
return pack("=I", self.start_addr) + pack("=H", self.insn_count) + pack("=H", self.handler_off)
def get_length(self):
return len(self.get_raw())
class DalvikCode(object):
"""
This class represents the instructions of a method
:param buff: a raw buffer where are the instructions
:type buff: string
:param cm: the ClassManager
:type cm: :class:`ClassManager` object
"""
def __init__(self, buff, cm):
self.__CM = cm
self.offset = buff.get_idx()
self.int_padding = ""
off = buff.get_idx()
while off % 4 != 0:
self.int_padding += '\00'
off += 1
buff.set_idx(off)
self.__off = buff.get_idx()
self.registers_size = unpack("=H", buff.read(2))[0]
self.ins_size = unpack("=H", buff.read(2))[0]
self.outs_size = unpack("=H", buff.read(2))[0]
self.tries_size = unpack("=H", buff.read(2))[0]
self.debug_info_off = unpack("=I", buff.read(4))[0]
self.insns_size = unpack("=I", buff.read(4))[0]
ushort = calcsize('=H')
self.code = DCode(self.__CM, buff.get_idx(), self.insns_size, buff.read(self.insns_size * ushort))
if (self.insns_size % 2 == 1):
self.padding = unpack("=H", buff.read(2))[0]
self.tries = []
self.handlers = None
if self.tries_size > 0:
for i in xrange(0, self.tries_size):
self.tries.append(TryItem(buff, self.__CM))
self.handlers = EncodedCatchHandlerList(buff, self.__CM)
def get_registers_size(self):
"""
Get the number of registers used by this code
:rtype: int
"""
return self.registers_size
def get_ins_size(self):
"""
Get the number of words of incoming arguments to the method that this code is for
:rtype: int
"""
return self.ins_size
def get_outs_size(self):
"""
Get the number of words of outgoing argument space required by this code for method invocation
:rtype: int
"""
return self.outs_size
def get_tries_size(self):
"""
Get the number of :class:`TryItem` for this instance
:rtype: int
"""
return self.tries_size
def get_debug_info_off(self):
"""
Get the offset from the start of the file to the debug info (line numbers + local variable info) sequence for this code, or 0 if there simply is no information
:rtype: int
"""
return self.debug_info_off
def get_insns_size(self):
"""
Get the size of the instructions list, in 16-bit code units
:rtype: int
"""
return self.insns_size
def get_handlers(self):
"""
Get the bytes representing a list of lists of catch types and associated handler addresses.
:rtype: :class:`EncodedCatchHandlerList`
"""
return self.handlers
def get_tries(self):
"""
Get the array indicating where in the code exceptions are caught and how to handle them
:rtype: a list of :class:`TryItem` objects
"""
return self.tries
def get_debug(self):
"""
Return the associated debug object
:rtype: :class:`DebugInfoItem`
"""
return self.__CM.get_debug_off(self.debug_info_off)
def get_bc(self):
"""
Return the associated code object
:rtype: :class:`DCode`
"""
return self.code
def set_idx(self, idx):
self.code.set_idx(idx)
def reload(self):
self.code.reload()
def get_length(self):
return self.insns_size
def _begin_show(self):
debug("registers_size: %d" % self.registers_size)
debug("ins_size: %d" % self.ins_size)
debug("outs_size: %d" % self.outs_size)
debug("tries_size: %d" % self.tries_size)
debug("debug_info_off: %d" % self.debug_info_off)
debug("insns_size: %d" % self.insns_size)
bytecode._PrintBanner()
def show(self):
self._begin_show()
self.code.show()
self._end_show()
def _end_show(self):
bytecode._PrintBanner()
def pretty_show(self, m_a):
self._begin_show()
self.code.pretty_show(m_a)
self._end_show()
def get_obj(self):
return [self.code, self.tries, self.handlers]
def get_raw(self):
code_raw = self.code.get_raw()
self.insns_size = (len(code_raw) / 2) + (len(code_raw) % 2)
buff = self.int_padding
buff += pack("=H", self.registers_size) + \
pack("=H", self.ins_size) + \
pack("=H", self.outs_size) + \
pack("=H", self.tries_size) + \
pack("=I", self.debug_info_off) + \
pack("=I", self.insns_size) + \
code_raw
# if (self.insns_size % 2 == 1):
# buff += pack("=H", self.padding)
if self.tries_size > 0:
buff += ''.join(i.get_raw() for i in self.tries)
buff += self.handlers.get_raw()
return buff
def add_inote(self, msg, idx, off=None):
"""
Add a message to a specific instruction by using (default) the index of the address if specified
:param msg: the message
:type msg: string
:param idx: index of the instruction (the position in the list of the instruction)
:type idx: int
:param off: address of the instruction
:type off: int
"""
if self.code:
return self.code.add_inote(msg, idx, off)
def get_instruction(self, idx, off=None):
if self.code:
return self.code.get_instruction(idx, off)
def get_size(self):
length = len(self.int_padding)
length += len( pack("=H", self.registers_size) + \
pack("=H", self.ins_size) + \
pack("=H", self.outs_size) + \
pack("=H", self.tries_size) + \
pack("=I", self.debug_info_off) + \
pack("=I", self.insns_size) )
length += self.code.get_length()
if (self.insns_size % 2 == 1):
length += len(pack("=H", self.padding))
if self.tries_size > 0:
for i in self.tries:
length += i.get_length()
length += self.handlers.get_length()
return length
def get_off(self):
return self.__off
class CodeItem(object):
def __init__(self, size, buff, cm):
self.__CM = cm
self.offset = buff.get_idx()
self.code = []
self.__code_off = {}
for i in xrange(0, size):
x = DalvikCode( buff, cm )
self.code.append( x )
self.__code_off[ x.get_off() ] = x
def set_off(self, off):
self.offset = off
def get_off(self):
return self.offset
def get_code(self, off):
try:
return self.__code_off[off]
except KeyError:
return None
def reload(self):
for i in self.code:
i.reload()
def show(self):
print "CODE_ITEM"
for i in self.code:
i.show()
def get_obj(self):
return [ i for i in self.code ]
def get_raw(self):
return ''.join(i.get_raw() for i in self.code)
def get_length(self):
length = 0
for i in self.code:
length += i.get_size()
return length
class MapItem(object):
def __init__(self, buff, cm):
self.__CM = cm
self.off = buff.get_idx()
self.type = unpack("=H", buff.read(2))[0]
self.unused = unpack("=H", buff.read(2))[0]
self.size = unpack("=I", buff.read(4))[0]
self.offset = unpack("=I", buff.read(4))[0]
self.item = None
buff.set_idx( self.offset )
lazy_analysis = self.__CM.get_lazy_analysis()
if lazy_analysis:
self.next_lazy(buff, cm)
else:
self.next(buff, cm)
def get_off(self):
return self.off
def get_offset(self):
return self.offset
def get_type(self):
return self.type
def get_size(self):
return self.size
def next(self, buff, cm):
debug("%s @ 0x%x(%d) %x %x" % (TYPE_MAP_ITEM[self.type], buff.get_idx(), buff.get_idx(), self.size, self.offset))
if TYPE_MAP_ITEM[ self.type ] == "TYPE_STRING_ID_ITEM":
self.item = [ StringIdItem( buff, cm ) for i in xrange(0, self.size) ]
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_CODE_ITEM":
self.item = CodeItem( self.size, buff, cm )
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_TYPE_ID_ITEM":
self.item = TypeHIdItem( self.size, buff, cm )
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_PROTO_ID_ITEM":
self.item = ProtoHIdItem( self.size, buff, cm )
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_FIELD_ID_ITEM":
self.item = FieldHIdItem( self.size, buff, cm )
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_METHOD_ID_ITEM":
self.item = MethodHIdItem( self.size, buff, cm )
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_CLASS_DEF_ITEM":
self.item = ClassHDefItem( self.size, buff, cm )
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_HEADER_ITEM":
self.item = HeaderItem( self.size, buff, cm )
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_ANNOTATION_ITEM":
self.item = [ AnnotationItem( buff, cm ) for i in xrange(0, self.size) ]
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_ANNOTATION_SET_ITEM":
self.item = [ AnnotationSetItem( buff, cm ) for i in xrange(0, self.size) ]
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_ANNOTATIONS_DIRECTORY_ITEM":
self.item = [ AnnotationsDirectoryItem( buff, cm ) for i in xrange(0, self.size) ]
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_ANNOTATION_SET_REF_LIST":
self.item = [ AnnotationSetRefList( buff, cm ) for i in xrange(0, self.size) ]
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_TYPE_LIST":
self.item = [ TypeList( buff, cm ) for i in xrange(0, self.size) ]
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_STRING_DATA_ITEM":
self.item = [ StringDataItem( buff, cm ) for i in xrange(0, self.size) ]
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_DEBUG_INFO_ITEM":
self.item = DebugInfoItemEmpty( buff, cm )
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_ENCODED_ARRAY_ITEM":
self.item = [ EncodedArrayItem( buff, cm ) for i in xrange(0, self.size) ]
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_CLASS_DATA_ITEM":
self.item = [ ClassDataItem(buff, cm) for i in xrange(0, self.size) ]
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_MAP_LIST":
pass # It's me I think !!!
else:
bytecode.Exit( "Map item %d @ 0x%x(%d) is unknown" % (self.type, buff.get_idx(), buff.get_idx()) )
def next_lazy(self, buff, cm):
if TYPE_MAP_ITEM[ self.type ] == "TYPE_STRING_ID_ITEM":
self.item = [ StringIdItem( buff, cm ) for i in xrange(0, self.size) ]
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_CODE_ITEM":
self.item = CodeItem( self.size, buff, cm )
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_TYPE_ID_ITEM":
self.item = TypeIdItem( self.size, buff, cm )
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_PROTO_ID_ITEM":
self.item = ProtoIdItem( self.size, buff, cm )
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_FIELD_ID_ITEM":
self.item = FieldIdItem( self.size, buff, cm )
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_METHOD_ID_ITEM":
self.item = MethodIdItem( self.size, buff, cm )
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_CLASS_DEF_ITEM":
self.item = ClassDefItem( self.size, buff, cm )
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_HEADER_ITEM":
self.item = HeaderItem( self.size, buff, cm )
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_TYPE_LIST":
self.item = [ TypeList( buff, cm ) for i in xrange(0, self.size) ]
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_STRING_DATA_ITEM":
self.item = [ StringDataItem( buff, cm ) for i in xrange(0, self.size) ]
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_DEBUG_INFO_ITEM":
self.item = DebugInfoItemEmpty( buff, cm )
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_ENCODED_ARRAY_ITEM":
self.item = [ EncodedArrayItem( buff, cm ) for i in xrange(0, self.size) ]
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_CLASS_DATA_ITEM":
self.item = [ ClassDataItem(buff, cm) for i in xrange(0, self.size) ]
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_MAP_LIST":
pass # It's me I think !!!
def reload(self):
if self.item != None:
if isinstance( self.item, list ):
for i in self.item:
i.reload()
else:
self.item.reload()
def show(self):
bytecode._Print( "\tMAP_TYPE_ITEM", TYPE_MAP_ITEM[ self.type ])
if self.item != None:
if isinstance( self.item, list ):
for i in self.item:
i.show()
else:
self.item.show()
def pretty_show(self):
bytecode._Print( "\tMAP_TYPE_ITEM", TYPE_MAP_ITEM[ self.type ])
if self.item != None:
if isinstance( self.item, list ):
for i in self.item:
if isinstance(i, ClassDataItem):
i.pretty_show()
else:
i.show()
else:
self.item.show()
def get_obj(self):
return self.item
def get_raw(self):
if isinstance(self.item, list):
self.offset = self.item[0].get_off()
else:
self.offset = self.item.get_off()
return pack("=H", self.type) + pack("=H", self.unused) + pack("=I", self.size) + pack("=I", self.offset)
def get_length(self):
return calcsize( "=HHII" )
def get_item(self):
return self.item
def set_item(self, item):
self.item = item
class OffObj(object):
def __init__(self, o):
self.off = o
class ClassManager(object):
"""
This class is used to access to all elements (strings, type, proto ...) of the dex format
"""
def __init__(self, vm, config):
self.vm = vm
self.buff = vm
self.decompiler_ob = None
self.vmanalysis_ob = None
self.gvmanalysis_ob = None
self.__manage_item = {}
self.__manage_item_off = []
self.__strings_off = {}
self.__obj_offset = {}
self.__item_offset = {}
self.__cached_proto = {}
self.recode_ascii_string = config["RECODE_ASCII_STRING"]
self.recode_ascii_string_meth = None
if config["RECODE_ASCII_STRING_METH"]:
self.recode_ascii_string_meth = config["RECODE_ASCII_STRING_METH"]
self.lazy_analysis = config["LAZY_ANALYSIS"]
self.hook_strings = {}
self.engine = []
self.engine.append("python")
if self.vm != None:
self.odex_format = self.vm.get_format_type() == "ODEX"
def get_ascii_string(self, s):
try:
return s.decode("ascii")
except UnicodeDecodeError:
d = ""
for i in s:
if ord(i) < 128:
d += i
else:
d += "%x" % ord(i)
return d
def get_odex_format(self):
return self.odex_format
def get_obj_by_offset(self, offset):
return self.__obj_offset[ offset ]
def get_item_by_offset(self, offset):
return self.__item_offset[ offset ]
def get_string_by_offset(self, offset):
return self.__strings_off[ offset ]
def get_lazy_analysis(self):
return self.lazy_analysis
def get_vmanalysis(self):
return self.vmanalysis_ob
def set_vmanalysis(self, vmanalysis):
self.vmanalysis_ob = vmanalysis
def get_gvmanalysis(self):
return self.gvmanalysis_ob
def set_gvmanalysis(self, gvmanalysis):
self.gvmanalysis_ob = gvmanalysis
def set_decompiler(self, decompiler):
self.decompiler_ob = decompiler
def get_engine(self):
return self.engine[0]
def get_all_engine(self):
return self.engine
def add_type_item(self, type_item, c_item, item):
self.__manage_item[ type_item ] = item
self.__obj_offset[ c_item.get_off() ] = c_item
self.__item_offset[ c_item.get_offset() ] = item
sdi = False
if type_item == "TYPE_STRING_DATA_ITEM":
sdi = True
if item != None:
if isinstance(item, list):
for i in item:
goff = i.offset
self.__manage_item_off.append( goff )
self.__obj_offset[ i.get_off() ] = i
if sdi == True:
self.__strings_off[ goff ] = i
else:
self.__manage_item_off.append( c_item.get_offset() )
def get_code(self, idx):
try:
return self.__manage_item[ "TYPE_CODE_ITEM" ].get_code( idx )
except KeyError:
return None
def get_class_data_item(self, off):
for i in self.__manage_item[ "TYPE_CLASS_DATA_ITEM" ]:
if i.get_off() == off:
return i
bytecode.Exit( "unknown class data item @ 0x%x" % off )
def get_encoded_array_item(self, off):
for i in self.__manage_item["TYPE_ENCODED_ARRAY_ITEM" ]:
if i.get_off() == off:
return i
def get_string(self, idx):
if idx in self.hook_strings:
return self.hook_strings[ idx ]
try:
off = self.__manage_item[ "TYPE_STRING_ID_ITEM" ][idx].get_string_data_off()
except IndexError:
bytecode.Warning( "unknown string item @ %d" % (idx) )
return "AG:IS: invalid string"
try:
if self.recode_ascii_string:
if self.recode_ascii_string_meth:
return self.recode_ascii_string_meth(self.__strings_off[off].get())
return self.get_ascii_string(self.__strings_off[off].get())
return self.__strings_off[off].get()
except KeyError:
bytecode.Warning( "unknown string item @ 0x%x(%d)" % (off,idx) )
return "AG:IS: invalid string"
def get_raw_string(self, idx):
try:
off = self.__manage_item[ "TYPE_STRING_ID_ITEM" ][idx].get_string_data_off()
except IndexError:
bytecode.Warning( "unknown string item @ %d" % (idx) )
return "AG:IS: invalid string"
try:
return self.__strings_off[off].get()
except KeyError:
bytecode.Warning( "unknown string item @ 0x%x(%d)" % (off,idx) )
return "AG:IS: invalid string"
def get_type_list(self, off):
if off == 0:
return []
for i in self.__manage_item[ "TYPE_TYPE_LIST" ]:
if i.get_type_list_off() == off:
return [type_.get_string() for type_ in i.get_list()]
def get_type(self, idx):
_type = self.__manage_item[ "TYPE_TYPE_ID_ITEM" ].get( idx )
if _type == -1:
return "AG:ITI: invalid type"
return self.get_string( _type )
def get_type_ref(self, idx):
return self.__manage_item[ "TYPE_TYPE_ID_ITEM" ].get( idx )
def get_proto(self, idx):
try:
proto = self.__cached_proto[ idx ]
except KeyError:
proto = self.__manage_item[ "TYPE_PROTO_ID_ITEM" ].get( idx )
self.__cached_proto[ idx ] = proto
return [ proto.get_parameters_off_value(), proto.get_return_type_idx_value() ]
def get_field(self, idx):
field = self.__manage_item[ "TYPE_FIELD_ID_ITEM" ].get( idx )
return [ field.get_class_name(), field.get_type(), field.get_name() ]
def get_field_ref(self, idx):
return self.__manage_item[ "TYPE_FIELD_ID_ITEM" ].get( idx )
def get_method(self, idx):
method = self.__manage_item[ "TYPE_METHOD_ID_ITEM" ].get( idx )
return method.get_list()
def get_method_ref(self, idx):
return self.__manage_item[ "TYPE_METHOD_ID_ITEM" ].get( idx )
def set_hook_class_name(self, class_def, value):
_type = self.__manage_item[ "TYPE_TYPE_ID_ITEM" ].get( class_def.get_class_idx() )
self.set_hook_string( _type, value )
self.vm._delete_python_export_class( class_def )
class_def.reload()
# FIXME
self.__manage_item[ "TYPE_METHOD_ID_ITEM" ].reload()
for i in class_def.get_methods():
i.reload()
for i in class_def.get_fields():
i.reload()
self.vm._create_python_export_class( class_def )
def set_hook_method_name(self, encoded_method, value):
python_export = True
method = self.__manage_item[ "TYPE_METHOD_ID_ITEM" ].get( encoded_method.get_method_idx() )
self.set_hook_string( method.get_name_idx(), value )
class_def = self.__manage_item[ "TYPE_CLASS_DEF_ITEM" ].get_class_idx( method.get_class_idx() )
if class_def != None:
try:
name = "METHOD_" + bytecode.FormatNameToPython( encoded_method.get_name() )
except AttributeError:
name += "_" + bytecode.FormatDescriptorToPython(encoded_method.get_descriptor())
debug("try deleting old name in python...")
try:
delattr(class_def, name)
debug("success with regular name")
except AttributeError:
debug("WARNING: fail with regular name")
#python_export = False
try:
name = "METHOD_" + bytecode.FormatNameToPython( encoded_method.get_name() + '_' + encoded_method.proto.replace(' ','').replace('(','').replace('[','').replace(')','').replace('/','_').replace(';','') )
except AttributeError:
name += "_" + bytecode.FormatDescriptorToPython(encoded_method.get_descriptor())
try:
delattr(class_def, name)
debug("success with name containing prototype")
except AttributeError:
debug("WARNING: fail with name containing prototype")
python_export = False
if python_export:
name = "METHOD_" + bytecode.FormatNameToPython(value)
setattr(class_def, name, encoded_method)
debug("new name in python: created: %s." % name)
else:
debug("skipping creating new name in python")
method.reload()
def set_hook_field_name(self, encoded_field, value):
python_export = True
field = self.__manage_item[ "TYPE_FIELD_ID_ITEM" ].get( encoded_field.get_field_idx() )
self.set_hook_string( field.get_name_idx(), value )
class_def = self.__manage_item[ "TYPE_CLASS_DEF_ITEM" ].get_class_idx( field.get_class_idx() )
if class_def != None:
try:
name = "FIELD_" + bytecode.FormatNameToPython( encoded_field.get_name() )
except AttributeError:
name += "_" + bytecode.FormatDescriptorToPython( encoded_field.get_descriptor() )
try:
delattr( class_def, name )
except AttributeError:
python_export = False
if python_export:
name = "FIELD_" + bytecode.FormatNameToPython( value )
setattr( class_def, name, encoded_field )
field.reload()
def set_hook_string(self, idx, value):
self.hook_strings[ idx ] = value
def get_next_offset_item(self, idx):
for i in self.__manage_item_off:
if i > idx:
return i
return idx
def get_debug_off(self, off):
self.buff.set_idx( off )
return DebugInfoItem( self.buff, self )
class MapList(object):
"""
This class can parse the "map_list" of the dex format
"""
def __init__(self, cm, off, buff):
self.CM = cm
buff.set_idx( off )
self.offset = off
self.size = unpack("=I", buff.read( 4 ) )[0]
self.map_item = []
for i in xrange(0, self.size):
idx = buff.get_idx()
mi = MapItem( buff, self.CM )
self.map_item.append( mi )
buff.set_idx( idx + mi.get_length() )
c_item = mi.get_item()
if c_item == None:
mi.set_item( self )
c_item = mi.get_item()
self.CM.add_type_item( TYPE_MAP_ITEM[ mi.get_type() ], mi, c_item )
for i in self.map_item:
i.reload()
def reload(self):
pass
def get_off(self):
return self.offset
def set_off(self, off):
self.offset = off
def get_item_type(self, ttype):
"""
Get a particular item type
:param ttype: a string which represents the desired type
:rtype: None or the item object
"""
for i in self.map_item:
if TYPE_MAP_ITEM[ i.get_type() ] == ttype:
return i.get_item()
return None
def show(self):
"""
Print the MapList object
"""
bytecode._Print("MAP_LIST SIZE", self.size)
for i in self.map_item:
if i.item != self:
i.show()
def pretty_show(self):
"""
Print with a pretty display the MapList object
"""
bytecode._Print("MAP_LIST SIZE", self.size)
for i in self.map_item:
if i.item != self:
i.pretty_show()
def get_obj(self):
return [ x.get_obj() for x in self.map_item ]
def get_raw(self):
return pack("=I", self.size) + ''.join(x.get_raw() for x in self.map_item)
def get_class_manager(self):
return self.CM
def get_length(self):
return len(self.get_raw())
class XREF(object):
def __init__(self):
self.items = []
def add(self, x, y):
self.items.append((x, y))
class DREF(object):
def __init__(self):
self.items = []
def add(self, x, y):
self.items.append((x, y))
class DalvikVMFormat(bytecode._Bytecode):
"""
This class can parse a classes.dex file of an Android application (APK).
:param buff: a string which represents the classes.dex file
:param decompiler: associate a decompiler object to display the java source code
:type buff: string
:type decompiler: object
:Example:
DalvikVMFormat( read("classes.dex") )
"""
def __init__(self, buff, decompiler=None, config=None):
super(DalvikVMFormat, self).__init__(buff)
self.config = config
if not self.config:
self.config = {"RECODE_ASCII_STRING": CONF["RECODE_ASCII_STRING"],
"RECODE_ASCII_STRING_METH": CONF["RECODE_ASCII_STRING_METH"],
"LAZY_ANALYSIS": CONF["LAZY_ANALYSIS"]}
self.CM = ClassManager(self, self.config)
self.CM.set_decompiler(decompiler)
self._preload(buff)
self._load(buff)
def _preload(self, buff):
pass
def _load(self, buff):
self.__header = HeaderItem(0, self, ClassManager(None, self.config))
if self.__header.map_off == 0:
bytecode.Warning("no map list ...")
else:
self.map_list = MapList( self.CM, self.__header.map_off, self )
self.classes = self.map_list.get_item_type( "TYPE_CLASS_DEF_ITEM" )
self.methods = self.map_list.get_item_type( "TYPE_METHOD_ID_ITEM" )
self.fields = self.map_list.get_item_type( "TYPE_FIELD_ID_ITEM" )
self.codes = self.map_list.get_item_type( "TYPE_CODE_ITEM" )
self.strings = self.map_list.get_item_type( "TYPE_STRING_DATA_ITEM" )
self.debug = self.map_list.get_item_type( "TYPE_DEBUG_INFO_ITEM" )
self.header = self.map_list.get_item_type( "TYPE_HEADER_ITEM" )
self.classes_names = None
self.__cache_methods = None
self.__cached_methods_idx = None
def get_classes_def_item(self):
"""
This function returns the class def item
:rtype: :class:`ClassDefItem` object
"""
return self.classes
def get_methods_id_item(self):
"""
This function returns the method id item
:rtype: :class:`MethodIdItem` object
"""
return self.methods
def get_fields_id_item(self):
"""
This function returns the field id item
:rtype: :class:`FieldIdItem` object
"""
return self.fields
def get_codes_item(self):
"""
This function returns the code item
:rtype: :class:`CodeItem` object
"""
return self.codes
def get_string_data_item(self):
"""
This function returns the string data item
:rtype: :class:`StringDataItem` object
"""
return self.strings
def get_debug_info_item(self):
"""
This function returns the debug info item
:rtype: :class:`DebugInfoItem` object
"""
return self.debug
def get_header_item(self):
"""
This function returns the header item
:rtype: :class:`HeaderItem` object
"""
return self.header
def get_class_manager(self):
"""
This function returns a ClassManager object which allow you to get
access to all index references (strings, methods, fields, ....)
:rtype: :class:`ClassManager` object
"""
return self.CM
def show(self):
"""
Show the all information in the object
"""
self.map_list.show()
def pretty_show(self):
"""
Show (but pretty !) the all information in the object
"""
self.map_list.pretty_show()
def save(self):
"""
Return the dex (with the modifications) into raw format (fix checksums)
(beta: do not use !)
:rtype: string
"""
l = []
h = {}
s = {}
h_r = {}
idx = 0
for i in self.map_list.get_obj():
length = 0
if isinstance(i, list):
for j in i:
if isinstance(j, AnnotationsDirectoryItem):
if idx % 4 != 0:
idx = idx + (4 - (idx % 4))
l.append( j )
c_length = j.get_length()
h[ j ] = idx + length
h_r[ idx + length ] = j
s[ idx + length ] = c_length
length += c_length
#debug("SAVE" + str(j) + " @ 0x%x" % (idx+length))
debug("SAVE " + str(i[0]) + " @0x%x (%x)" % (idx, length))
else:
if isinstance(i, MapList):
if idx % 4 != 0:
idx = idx + (4 - (idx % 4))
l.append( i )
h[ i ] = idx
h_r[ idx ] = i
length = i.get_length()
s[idx] = length
debug("SAVE " + str(i) + " @0x%x (%x)" % (idx, length))
idx += length
self.header.file_size = idx
last_idx = 0
for i in l:
idx = h[ i ]
i.set_off( h[ i ] )
# print i, hex(h[ i ])
last_idx = idx + s[ idx ]
last_idx = 0
buff = ""
for i in l:
idx = h[ i ]
if idx != last_idx:
debug( "Adjust alignment @%x with 00 %x" % (idx, idx - last_idx) )
buff += "\x00" * (idx - last_idx)
buff += i.get_raw()
last_idx = idx + s[ idx ]
debug("GLOBAL SIZE %d" % len(buff))
return self.fix_checksums(buff)
def fix_checksums(self, buff):
"""
Fix a dex format buffer by setting all checksums
:rtype: string
"""
import zlib
import hashlib
signature = hashlib.sha1(buff[32:]).digest()
buff = buff[:12] + signature + buff[32:]
checksum = zlib.adler32(buff[12:])
buff = buff[:8] + pack("=i", checksum) + buff[12:]
debug("NEW SIGNATURE %s" % repr(signature))
debug("NEW CHECKSUM %x" % checksum)
return buff
def get_cm_field(self, idx):
"""
Get a specific field by using an index
:param idx: index of the field
:type idx: int
"""
return self.CM.get_field(idx)
def get_cm_method(self, idx):
"""
Get a specific method by using an index
:param idx: index of the method
:type idx: int
"""
return self.CM.get_method(idx)
def get_cm_string(self, idx):
"""
Get a specific string by using an index
:param idx: index of the string
:type idx: int
"""
return self.CM.get_raw_string( idx )
def get_cm_type(self, idx):
"""
Get a specific type by using an index
:param idx: index of the type
:type idx: int
"""
return self.CM.get_type( idx )
def get_classes_names(self, update=False):
"""
Return the names of classes
:param update: True indicates to recompute the list.
Maybe needed after using a MyClass.set_name().
:rtype: a list of string
"""
if self.classes_names == None or update:
self.classes_names = [ i.get_name() for i in self.classes.class_def ]
return self.classes_names
def get_classes(self):
"""
Return all classes
:rtype: a list of :class:`ClassDefItem` objects
"""
return self.classes.class_def
def get_class(self, name):
"""
Return a specific class
:param name: the name of the class
:rtype: a :class:`ClassDefItem` object
"""
for i in self.classes.class_def:
if i.get_name() == name:
return i
return None
def get_method(self, name):
"""
Return a list all methods which corresponds to the regexp
:param name: the name of the method (a python regexp)
:rtype: a list with all :class:`EncodedMethod` objects
"""
prog = re.compile(name)
l = []
for i in self.classes.class_def:
for j in i.get_methods():
if prog.match( j.get_name() ):
l.append( j )
return l
def get_field(self, name):
"""
Return a list all fields which corresponds to the regexp
:param name: the name of the field (a python regexp)
:rtype: a list with all :class:`EncodedField` objects
"""
prog = re.compile(name)
l = []
for i in self.classes.class_def:
for j in i.get_fields():
if prog.match( j.get_name() ):
l.append( j )
return l
def get_all_fields(self):
"""
Return a list of field items
:rtype: a list of :class:`FieldIdItem` objects
"""
try:
return self.fields.gets()
except AttributeError:
return []
def get_fields(self):
"""
Return all field objects
:rtype: a list of :class:`EncodedField` objects
"""
l = []
for i in self.classes.class_def:
for j in i.get_fields():
l.append( j )
return l
def get_methods(self):
"""
Return all method objects
:rtype: a list of :class:`EncodedMethod` objects
"""
l = []
for i in self.classes.class_def:
for j in i.get_methods():
l.append( j )
return l
def get_len_methods(self):
"""
Return the number of methods
:rtype: int
"""
return len( self.get_methods() )
def get_method_by_idx(self, idx):
"""
Return a specific method by using an index
:param idx: the index of the method
:type idx: int
:rtype: None or an :class:`EncodedMethod` object
"""
if self.__cached_methods_idx == None:
self.__cached_methods_idx = {}
for i in self.classes.class_def:
for j in i.get_methods():
self.__cached_methods_idx[ j.get_method_idx() ] = j
try:
return self.__cached_methods_idx[ idx ]
except KeyError:
return None
def get_method_descriptor(self, class_name, method_name, descriptor):
"""
Return the specific method
:param class_name: the class name of the method
:type class_name: string
:param method_name: the name of the method
:type method_name: string
:param descriptor: the descriptor of the method
:type descriptor: string
:rtype: None or a :class:`EncodedMethod` object
"""
key = class_name + method_name + descriptor
if self.__cache_methods == None:
self.__cache_methods = {}
for i in self.classes.class_def:
for j in i.get_methods():
self.__cache_methods[ j.get_class_name() + j.get_name() + j.get_descriptor() ] = j
try:
return self.__cache_methods[ key ]
except KeyError:
return None
def get_methods_descriptor(self, class_name, method_name):
"""
Return the specific methods of the class
:param class_name: the class name of the method
:type class_name: string
:param method_name: the name of the method
:type method_name: string
:rtype: None or a :class:`EncodedMethod` object
"""
l = []
for i in self.classes.class_def:
if i.get_name() == class_name:
for j in i.get_methods():
if j.get_name() == method_name:
l.append(j)
return l
def get_methods_class(self, class_name):
"""
Return all methods of a specific class
:param class_name: the class name
:type class_name: string
:rtype: a list with :class:`EncodedMethod` objects
"""
l = []
for i in self.classes.class_def:
for j in i.get_methods():
if class_name == j.get_class_name():
l.append( j )
return l
def get_fields_class(self, class_name):
"""
Return all fields of a specific class
:param class_name: the class name
:type class_name: string
:rtype: a list with :class:`EncodedField` objects
"""
l = []
for i in self.classes.class_def:
for j in i.get_fields():
if class_name == j.get_class_name():
l.append( j )
return l
def get_field_descriptor(self, class_name, field_name, descriptor):
"""
Return the specific field
:param class_name: the class name of the field
:type class_name: string
:param field_name: the name of the field
:type field_name: string
:param descriptor: the descriptor of the field
:type descriptor: string
:rtype: None or a :class:`EncodedField` object
"""
for i in self.classes.class_def:
if class_name == i.get_name():
for j in i.get_fields():
if field_name == j.get_name() and descriptor == j.get_descriptor():
return j
return None
def get_strings(self):
"""
Return all strings
:rtype: a list with all strings used in the format (types, names ...)
"""
return [i.get() for i in self.strings]
def get_regex_strings(self, regular_expressions):
"""
Return all target strings matched the regex
:param regular_expressions: the python regex
:type regular_expressions: string
:rtype: a list of strings matching the regex expression
"""
str_list = []
if regular_expressions.count is None:
return None
for i in self.get_strings():
if re.match(regular_expressions, i):
str_list.append(i)
return str_list
def get_format_type(self):
"""
Return the type
:rtype: a string
"""
return "DEX"
def create_xref(self, python_export=True):
"""
Create XREF for this object
:param python_export (boolean): export xref in each method
"""
gvm = self.CM.get_gvmanalysis()
for _class in self.get_classes():
key = _class.get_name()
if key in gvm.nodes:
_class.XREFfrom = XREF()
for i in gvm.GI.successors(gvm.nodes[key].id):
xref = gvm.nodes_id[i]
xref_meth = self.get_method_descriptor(xref.class_name, xref.method_name, xref.descriptor)
if python_export == True:
name = bytecode.FormatClassToPython(xref_meth.get_class_name()) + "__" + \
bytecode.FormatNameToPython(xref_meth.get_name()) + "__" + \
bytecode.FormatDescriptorToPython(xref_meth.get_descriptor())
setattr(_class.XREFfrom, name, xref_meth)
_class.XREFfrom.add(xref_meth, xref.edges[gvm.nodes[key]])
for method in _class.get_methods():
method.XREFfrom = XREF()
method.XREFto = XREF()
key = "%s %s %s" % (method.get_class_name(), method.get_name(), method.get_descriptor())
if key in gvm.nodes:
for i in gvm.G.predecessors(gvm.nodes[key].id):
xref = gvm.nodes_id[i]
xref_meth = self.get_method_descriptor(xref.class_name, xref.method_name, xref.descriptor)
if xref_meth != None:
name = bytecode.FormatClassToPython(xref_meth.get_class_name()) + "__" + \
bytecode.FormatNameToPython(xref_meth.get_name()) + "__" + \
bytecode.FormatDescriptorToPython(xref_meth.get_descriptor())
if python_export == True:
setattr(method.XREFfrom, name, xref_meth)
method.XREFfrom.add(xref_meth, xref.edges[gvm.nodes[key]])
for i in gvm.G.successors(gvm.nodes[key].id):
xref = gvm.nodes_id[i]
xref_meth = self.get_method_descriptor(xref.class_name, xref.method_name, xref.descriptor)
if xref_meth != None:
name = bytecode.FormatClassToPython(xref_meth.get_class_name()) + "__" + \
bytecode.FormatNameToPython(xref_meth.get_name()) + "__" + \
bytecode.FormatDescriptorToPython(xref_meth.get_descriptor())
if python_export == True:
setattr(method.XREFto, name, xref_meth)
method.XREFto.add(xref_meth, gvm.nodes[key].edges[xref])
def create_dref(self, python_export=True):
"""
Create DREF for this object
:param python_export (boolean): export dref in each field
"""
vmx = self.CM.get_vmanalysis()
for _class in self.get_classes():
for field in _class.get_fields():
field.DREFr = DREF()
field.DREFw = DREF()
paths = vmx.tainted_variables.get_field( field.get_class_name(), field.get_name(), field.get_descriptor() )
if paths != None:
access = {}
access["R"] = {}
access["W"] = {}
for path in paths.get_paths():
access_val, idx = path[0]
m_idx = path[1]
if access_val == 'R':
dref_meth = self.get_method_by_idx( m_idx )
name = bytecode.FormatClassToPython( dref_meth.get_class_name() ) + "__" + \
bytecode.FormatNameToPython( dref_meth.get_name() ) + "__" + \
bytecode.FormatDescriptorToPython( dref_meth.get_descriptor() )
if python_export == True:
setattr( field.DREFr, name, dref_meth )
try:
access["R"][ dref_meth ].append( idx )
except KeyError:
access["R"][ dref_meth ] = []
access["R"][ dref_meth ].append( idx )
else:
dref_meth = self.get_method_by_idx( m_idx )
name = bytecode.FormatClassToPython( dref_meth.get_class_name() ) + "__" + \
bytecode.FormatNameToPython( dref_meth.get_name() ) + "__" + \
bytecode.FormatDescriptorToPython( dref_meth.get_descriptor() )
if python_export == True:
setattr( field.DREFw, name, dref_meth )
try:
access["W"][ dref_meth ].append( idx )
except KeyError:
access["W"][ dref_meth ] = []
access["W"][ dref_meth ].append( idx )
for i in access["R"]:
field.DREFr.add( i, access["R"][i] )
for i in access["W"]:
field.DREFw.add( i, access["W"][i] )
def create_python_export(self):
"""
Export classes/methods/fields' names in the python namespace
"""
for _class in self.get_classes():
self._create_python_export_class(_class)
def _delete_python_export_class(self, _class):
self._create_python_export_class( _class, True)
def _create_python_export_class(self, _class, delete=False):
if _class != None:
### Class
name = "CLASS_" + bytecode.FormatClassToPython( _class.get_name() )
if delete:
delattr( self, name )
return
else:
setattr( self, name, _class )
### Methods
m = {}
for method in _class.get_methods():
if method.get_name() not in m:
m[ method.get_name() ] = []
m[ method.get_name() ].append( method )
for i in m:
if len(m[i]) == 1:
j = m[i][0]
name = "METHOD_" + bytecode.FormatNameToPython( j.get_name() )
setattr( _class, name, j )
else:
for j in m[i]:
name = "METHOD_" + bytecode.FormatNameToPython( j.get_name() ) + "_" + bytecode.FormatDescriptorToPython( j.get_descriptor() )
setattr( _class, name, j )
### Fields
f = {}
for field in _class.get_fields():
if field.get_name() not in f:
f[ field.get_name() ] = []
f[ field.get_name() ].append( field )
for i in f:
if len(f[i]) == 1:
j = f[i][0]
name = "FIELD_" + bytecode.FormatNameToPython( j.get_name() )
setattr( _class, name, j )
else:
for j in f[i]:
name = "FIELD_" + bytecode.FormatNameToPython( j.get_name() ) + "_" + bytecode.FormatDescriptorToPython( j.get_descriptor() )
setattr( _class, name, j )
def get_BRANCH_DVM_OPCODES(self):
return BRANCH_DVM_OPCODES
def get_determineNext(self):
return determineNext
def get_determineException(self):
return determineException
def get_DVM_TOSTRING(self):
return DVM_TOSTRING()
def set_decompiler(self, decompiler):
self.CM.set_decompiler(decompiler)
def set_vmanalysis(self, vmanalysis):
self.CM.set_vmanalysis(vmanalysis)
def set_gvmanalysis(self, gvmanalysis):
self.CM.set_gvmanalysis(gvmanalysis)
def disassemble(self, offset, size):
"""
Disassembles a given offset in the DEX file
:param dex: the filename of the android dex file
:type filename: string
:param offset: offset to disassemble in the file (from the beginning of the file)
:type offset: int
:param size:
:type size:
"""
for i in DCode(self.CM, offset, size, self.get_buff()[offset:offset + size]).get_instructions():
yield i
def _get_class_hierarchy(self):
ids = {}
present = {}
r_ids = {}
to_add = {}
els = []
for current_class in self.get_classes():
s_name = current_class.get_superclassname()[1:-1]
c_name = current_class.get_name()[1:-1]
if s_name not in ids:
ids[s_name] = len(ids) + 1
r_ids[ids[s_name]] = s_name
if c_name not in ids:
ids[c_name] = len(ids) + 1
els.append([ids[c_name], ids[s_name], c_name])
present[ids[c_name]] = True
for i in els:
if i[1] not in present:
to_add[i[1]] = r_ids[i[1]]
for i in to_add:
els.append([i, 0, to_add[i]])
treeMap = {}
Root = bytecode.Node(0, "Root")
treeMap[Root.id] = Root
for element in els:
nodeId, parentId, title = element
if not nodeId in treeMap:
treeMap[nodeId] = bytecode.Node(nodeId, title)
else:
treeMap[nodeId].id = nodeId
treeMap[nodeId].title = title
if not parentId in treeMap:
treeMap[parentId] = bytecode.Node(0, '')
treeMap[parentId].children.append(treeMap[nodeId])
return Root
def print_classes_hierarchy(self):
def print_map(node, l, lvl=0):
for n in node.children:
if lvl == 0:
l.append("%s" % (n.title))
else:
l.append("%s %s" % ('\t' * lvl, n.title))
if len(n.children) > 0:
print_map(n, l, lvl + 1)
l = []
print_map(self._get_class_hierarchy(), l)
return l
def list_classes_hierarchy(self):
def print_map(node, l):
if node.title not in l:
l[node.title] = []
for n in node.children:
if len(n.children) > 0:
w = {}
w[n.title] = []
l[node.title].append(w)
print_map(n, w)
else:
l[node.title].append(n.title)
l = {}
print_map(self._get_class_hierarchy(), l)
return l
def get_format(self):
objs = self.map_list.get_obj()
h = {}
index = {}
self._get_objs(h, index, objs)
return h, index
def _get_objs(self, h, index, objs):
for i in objs:
if isinstance(i, list):
self._get_objs(h, index, i)
else:
try:
if i != None:
h[i] = {}
index[i] = i.offset
except AttributeError:
pass
try:
if not isinstance(i, MapList):
next_objs = i.get_obj()
if isinstance(next_objs, list):
self._get_objs(h[i], index, next_objs)
except AttributeError:
pass
def colorize_operands(self, operands, colors):
for operand in operands:
if operand[0] == OPERAND_REGISTER:
yield "%sv%d%s" % (colors["registers"], operand[1], colors["normal"])
elif operand[0] == OPERAND_LITERAL:
yield "%s%d%s" % (colors["literal"], operand[1], colors["normal"])
elif operand[0] == OPERAND_RAW:
yield "%s%s%s" % (colors["raw"], operand[1], colors["normal"])
elif operand[0] == OPERAND_OFFSET:
yield "%s%d%s" % (colors["offset"], operand[1], colors["normal"])
elif operand[0] & OPERAND_KIND:
if operand[0] == (OPERAND_KIND + KIND_STRING):
yield "%s%s%s" % (colors["string"], operand[2], colors["normal"])
elif operand[0] == (OPERAND_KIND + KIND_METH):
yield "%s%s%s" % (colors["meth"], operand[2], colors["normal"])
elif operand[0] == (OPERAND_KIND + KIND_FIELD):
yield "%s%s%s" % (colors["field"], operand[2], colors["normal"])
elif operand[0] == (OPERAND_KIND + KIND_TYPE):
yield "%s%s%s" % (colors["type"], operand[2], colors["normal"])
else:
yield "%s" % repr(operands[2])
else:
yield "%s" % repr(operands[1])
def get_operand_html(self, operand, registers_colors, colors, escape_fct, wrap_fct):
if operand[0] == OPERAND_REGISTER:
return "<FONT color=\"%s\">v%s</FONT>" % (registers_colors[operand[1]], operand[1])
elif operand[0] == OPERAND_LITERAL:
return "<FONT color=\"%s\">0x%x</FONT>" % (colors["literal"], operand[1])
elif operand[0] == OPERAND_RAW:
if len(operand[1]) > 32:
wrapped = wrap_fct(operand[1], 32)
wrapped_adjust = "<br/>" + "<br/>".join(escape_fct(repr(i)[1:-1]) for i in wrapped)
return "<FONT color=\"%s\">%s</FONT>" % (colors["raw"], wrapped_adjust)
return "<FONT color=\"%s\">%s</FONT>" % (colors["raw"], escape_fct(repr(operand[1])[1:-1]))
elif operand[0] == OPERAND_OFFSET:
return "<FONT FACE=\"Times-Italic\" color=\"%s\">0x%x</FONT>" % (colors["offset"], operand[1])
elif operand[0] & OPERAND_KIND:
if operand[0] == (OPERAND_KIND + KIND_STRING):
if len(operand[2]) > 32:
wrapped = wrap_fct(operand[2], 32)
wrapped_adjust = "<br/>" + "<br/>".join(escape_fct(i) for i in wrapped)
return "<FONT color=\"%s\">%s</FONT>" % (colors["string"], wrapped_adjust)
return "<FONT color=\"%s\">%s</FONT>" % (colors["string"], escape_fct(operand[2]))
elif operand[0] == (OPERAND_KIND + KIND_METH):
return "<FONT color=\"%s\">%s</FONT>" % (colors["method"], escape_fct(operand[2]))
elif operand[0] == (OPERAND_KIND + KIND_FIELD):
return "<FONT color=\"%s\">%s</FONT>" % (colors["field"], escape_fct(operand[2]))
elif operand[0] == (OPERAND_KIND + KIND_TYPE):
return "<FONT color=\"%s\">%s</FONT>" % (colors["type"], escape_fct(operand[2]))
return escape_fct(str(operand[2]))
return escape_fct(str(operand[1]))
class OdexHeaderItem(object):
"""
This class can parse the odex header
:param buff: a Buff object string which represents the odex dependencies
"""
def __init__(self, buff):
buff.set_idx(8)
self.dex_offset = unpack("=I", buff.read(4))[0]
self.dex_length = unpack("=I", buff.read(4))[0]
self.deps_offset = unpack("=I", buff.read(4))[0]
self.deps_length = unpack("=I", buff.read(4))[0]
self.aux_offset = unpack("=I", buff.read(4))[0]
self.aux_length = unpack("=I", buff.read(4))[0]
self.flags = unpack("=I", buff.read(4))[0]
self.padding = unpack("=I", buff.read(4))[0]
def show(self):
print "dex_offset:%x dex_length:%x deps_offset:%x deps_length:%x aux_offset:%x aux_length:%x flags:%x" % (self.dex_offset,
self.dex_length,
self.deps_offset,
self.deps_length,
self.aux_offset,
self.aux_length,
self.flags)
def get_raw(self):
return pack("=I", self.dex_offset) + \
pack("=I", self.dex_length) + \
pack("=I", self.deps_offset) + \
pack("=I", self.deps_length) + \
pack("=I", self.aux_offset) + \
pack("=I", self.aux_length) + \
pack("=I", self.flags) + \
pack("=I", self.padding)
class OdexDependencies(object):
"""
This class can parse the odex dependencies
:param buff: a Buff object string which represents the odex dependencies
"""
def __init__(self, buff):
self.modification_time = unpack("=I", buff.read(4))[0]
self.crc = unpack("=I", buff.read(4))[0]
self.dalvik_build = unpack("=I", buff.read(4))[0]
self.dependency_count = unpack("=I", buff.read(4))[0]
self.dependencies = []
self.dependency_checksums = []
for i in range(0, self.dependency_count):
string_length = unpack("=I", buff.read(4))[0]
name_dependency = buff.read(string_length)
self.dependencies.append(name_dependency)
self.dependency_checksums.append(buff.read(20))
def get_dependencies(self):
"""
Return the list of dependencies
:rtype: a list of strings
"""
return self.dependencies
def get_raw(self):
dependencies = ""
for idx, value in enumerate(self.dependencies):
dependencies += pack("=I", len(value)) + \
pack("=%ds" % len(value), value) + \
pack("=20s", self.dependency_checksums[idx])
return pack("=I", self.modification_time) + \
pack("=I", self.crc) + \
pack("=I", self.dalvik_build) + \
pack("=I", self.dependency_count) + \
dependencies
class DalvikOdexVMFormat(DalvikVMFormat):
"""
This class can parse an odex file
:param buff: a string which represents the odex file
:param decompiler: associate a decompiler object to display the java source code
:type buff: string
:type decompiler: object
:Example:
DalvikOdexVMFormat( read("classes.odex") )
"""
def _preload(self, buff):
self.orig_buff = buff
self.magic = buff[:8]
if self.magic == ODEX_FILE_MAGIC_35 or self.magic == ODEX_FILE_MAGIC_36:
self.odex_header = OdexHeaderItem(self)
self.set_idx(self.odex_header.deps_offset)
self.dependencies = OdexDependencies(self)
self.padding = buff[self.odex_header.deps_offset + self.odex_header.deps_length:]
self.set_idx(self.odex_header.dex_offset)
self.set_buff(self.read(self.odex_header.dex_length))
self.set_idx(0)
def save(self):
"""
Do not use !
"""
dex_raw = super(DalvikOdexVMFormat, self).save()
return self.magic + self.odex_header.get_raw() + dex_raw + self.dependencies.get_raw() + self.padding
def get_buff(self):
return self.magic + self.odex_header.get_raw() + super(DalvikOdexVMFormat, self).get_buff() + self.dependencies.get_raw() + self.padding
def get_dependencies(self):
"""
Return the odex dependencies object
:rtype: an OdexDependencies object
"""
return self.dependencies
def get_format_type(self):
"""
Return the type
:rtype: a string
"""
return "ODEX"
def get_params_info(nb, proto):
i_buffer = "# Parameters:\n"
ret = proto.split(')')
params = ret[0][1:].split()
if params:
i_buffer += "# - local registers: v%d...v%d\n" % (0, nb - len(params) - 1)
j = 0
for i in xrange(nb - len(params), nb):
i_buffer += "# - v%d:%s\n" % (i, get_type(params[j]))
j += 1
else:
i_buffer += "# local registers: v%d...v%d\n" % (0, nb - 1)
i_buffer += "#\n# - return:%s\n\n" % get_type(ret[1])
return i_buffer
def get_bytecodes_method(dex_object, ana_object, method):
mx = ana_object.get_method(method)
return get_bytecodes_methodx(method, mx)
def get_bytecodes_methodx(method, mx):
basic_blocks = mx.basic_blocks.gets()
i_buffer = ""
idx = 0
nb = 0
i_buffer += "# %s->%s%s [access_flags=%s]\n#\n" % (method.get_class_name(), method.get_name(), method.get_descriptor(), method.get_access_flags_string())
if method.code != None:
i_buffer += get_params_info(method.code.get_registers_size(), method.get_descriptor())
for i in basic_blocks:
bb_buffer = ""
ins_buffer = ""
bb_buffer += "%s : " % (i.name)
instructions = i.get_instructions()
for ins in instructions:
ins_buffer += "\t%-8d(%08x) " % (nb, idx)
ins_buffer += "%-20s %s" % (ins.get_name(), ins.get_output(idx))
op_value = ins.get_op_value()
if ins == instructions[-1] and i.childs != []:
# packed/sparse-switch
if (op_value == 0x2b or op_value == 0x2c) and len(i.childs) > 1:
values = i.get_special_ins(idx).get_values()
bb_buffer += "[ D:%s " % (i.childs[0][2].name)
bb_buffer += ' '.join("%d:%s" % (values[j], i.childs[j + 1][2].name) for j in range(0, len(i.childs) - 1)) + " ]"
else:
#if len(i.childs) == 2:
# i_buffer += "%s[ %s%s " % (branch_false_color, i.childs[0][2].name, branch_true_color))
# print_fct(' '.join("%s" % c[2].name for c in i.childs[1:]) + " ]%s" % normal_color)
#else:
bb_buffer += "[ " + ' '.join("%s" % c[2].name for c in i.childs) + " ]"
idx += ins.get_length()
nb += 1
ins_buffer += "\n"
if i.get_exception_analysis() != None:
ins_buffer += "\t%s\n" % (i.exception_analysis.show_buff())
i_buffer += bb_buffer + "\n" + ins_buffer + "\n"
return i_buffer
def auto(filename, raw=None):
"""
:param filename:
:param raw:
:type filename:
:type raw:
"""
data_raw = raw
if raw == None:
data_raw = read(filename)
ret_type = is_android_raw(data_raw[:10])
if ret_type == "DEX":
return DalvikVMFormat(data_raw)
elif ret_type == "ODEX":
return DalvikOdexVMFormat(data_raw)
return None
|
py | b4085f6edc32a903264f9367cbbfae03a439ad7b | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from ..signal import signal_phase
from .rsp_fixpeaks import _rsp_fixpeaks_retrieve
def rsp_phase(peaks, troughs=None, desired_length=None):
"""Compute respiratory phase (inspiration and expiration).
Finds the respiratory phase, labelled as 1 for inspiration and 0 for expiration.
Parameters
----------
peaks : list or array or DataFrame or Series or dict
The samples at which the inhalation peaks occur. If a dict or a DataFrame is passed, it is
assumed that these containers were obtained with `rsp_findpeaks()`.
troughs : list or array or DataFrame or Series or dict
The samples at which the inhalation troughs occur. If a dict or a DataFrame is passed, it is
assumed that these containers were obtained with `rsp_findpeaks()`.
desired_length : int
By default, the returned respiration rate has the same number of elements as `peaks`. If set
to an integer, the returned rate will be interpolated between `peaks` over `desired_length`
samples. Has no effect if a DataFrame is passed in as the `peaks` argument.
Returns
-------
signals : DataFrame
A DataFrame of same length as `rsp_signal` containing the following columns:
- *"RSP_Inspiration"*: breathing phase, marked by "1" for inspiration and "0" for expiration.
- *"RSP_Phase_Completion"*: breathing phase completion, expressed in percentage (from 0 to 1),
representing the stage of the current respiratory phase.
See Also
--------
rsp_clean, rsp_peaks, rsp_amplitude, rsp_process, rsp_plot
Examples
--------
>>> import neurokit2 as nk
>>>
>>> rsp = nk.rsp_simulate(duration=30, respiratory_rate=15)
>>> cleaned = nk.rsp_clean(rsp, sampling_rate=1000)
>>> peak_signal, info = nk.rsp_peaks(cleaned)
>>>
>>> phase = nk.rsp_phase(peak_signal)
>>> fig = nk.signal_plot([rsp, phase], standardize=True)
>>> fig #doctest: +SKIP
"""
# Format input.
peaks, troughs, desired_length = _rsp_fixpeaks_retrieve(peaks, troughs, desired_length)
# Phase
inspiration = np.full(desired_length, np.nan)
inspiration[peaks] = 0.0
inspiration[troughs] = 1.0
last_element = np.where(~np.isnan(inspiration))[0][-1] # Avoid filling beyond the last peak/trough
inspiration[0:last_element] = pd.Series(inspiration).fillna(method="pad").values[0:last_element]
# Phase Completion
completion = signal_phase(inspiration, method="percent")
out = pd.DataFrame({"RSP_Phase": inspiration, "RSP_Phase_Completion": completion})
return out
|
py | b4085f96452008e81a70d84886aa92d820e16e7a | import gibson2
from gibson2.envs.igibson_env import iGibsonEnv
from time import time
import os
from gibson2.utils.assets_utils import download_assets, download_demo_data
def test_env():
download_assets()
download_demo_data()
config_filename = os.path.join(
gibson2.root_path, 'test', 'test_house.yaml')
env = iGibsonEnv(config_file=config_filename, mode='headless')
try:
for j in range(2):
env.reset()
for i in range(300): # 300 steps, 30s world time
s = time()
action = env.action_space.sample()
ts = env.step(action)
print('ts', 1 / (time() - s))
if ts[2]:
print("Episode finished after {} timesteps".format(i + 1))
break
finally:
env.close()
def test_env_reload():
download_assets()
download_demo_data()
config_filename = os.path.join(
gibson2.root_path, 'test', 'test_house.yaml')
env = iGibsonEnv(config_file=config_filename, mode='headless')
try:
for i in range(3):
env.reload(config_filename)
env.reset()
for i in range(300): # 300 steps, 30s world time
s = time()
action = env.action_space.sample()
ts = env.step(action)
print('ts', 1 / (time() - s))
if ts[2]:
print("Episode finished after {} timesteps".format(i + 1))
break
finally:
env.close()
def test_env_reset():
download_assets()
download_demo_data()
config_filename = os.path.join(
gibson2.root_path, 'test', 'test_house.yaml')
env = iGibsonEnv(config_file=config_filename, mode='headless')
class DummyTask(object):
def __init__(self):
self.reset_scene_called = False
self.reset_agent_called = False
self.get_task_obs_called = False
def get_task_obs(self, env):
self.get_task_obs_called = True
def reset_scene(self, env):
self.reset_scene_called = True
def reset_agent(self, env):
self.reset_agent_called = True
env.task = DummyTask()
env.reset()
assert env.task.reset_scene_called
assert env.task.reset_agent_called
assert env.task.get_task_obs_called
|
py | b408611446079ce00148aca97f8d908987dfafdb | from __future__ import annotations
import asyncio
from datetime import datetime, timedelta
from typing import Generator, cast
import discord
from redbot.core import checks, commands
from redbot.core.utils.antispam import AntiSpam
from redbot.core.utils.chat_formatting import pagify
from .abcs import MixedMeta
from .checks import aa_active
try:
from redbot.core.commands import GuildContext
except ImportError:
from redbot.core.commands import Context as GuildContext # type: ignore
class AutoRooms(MixedMeta):
"""
Automagical Discord Voice Channels
"""
async def ar_cleanup(self, guild: discord.Guild):
if await self.bot.cog_disabled_in_guild_raw(self.qualified_name, guild.id):
return
for channel in guild.voice_channels:
conf = self.ar_config.channel(channel)
if not await conf.clone():
continue
if (not channel.members) and (
channel.created_at + timedelta(seconds=0.5)
) < datetime.utcnow():
try:
await channel.delete(reason="autoroom cleaning")
except discord.Forbidden:
pass
except discord.HTTPException:
pass
else:
await conf.clear()
@commands.Cog.listener("on_voice_state_update")
async def on_voice_state_update_ar(
self,
member: discord.Member,
before: discord.VoiceState,
after: discord.VoiceState,
):
"""
handles logic
"""
if before.channel == after.channel:
return
if member.id not in self._antispam:
self._antispam[member.id] = AntiSpam(self.antispam_intervals)
if (
(not self._antispam[member.id].spammy)
and after.channel
and (await self.ar_config.guild(after.channel.guild).active())
):
conf = self.ar_config.channel(after.channel)
if await conf.autoroom() or await conf.gameroom():
await self.generate_room_for(who=member, source=after.channel)
if before.channel:
await self.ar_cleanup(before.channel.guild)
@staticmethod
def _ar_get_overwrites(
source: discord.VoiceChannel, *, who: discord.Member, ownership: bool
) -> dict:
overwrites = dict(source.overwrites)
if ownership:
if who in overwrites:
overwrites[who].update(manage_channels=True, manage_roles=True)
else:
overwrites.update(
{
who: discord.PermissionOverwrite(
manage_channels=True, manage_roles=True
)
}
)
# Note: Connect is not optional. Even with manage_channels,
# the bot cannot edit or delete the channel
# if it does not have this. This is *not* documented, and was discovered by trial
# and error with a weird edge case someone had.
if source.guild.me in overwrites:
overwrites[source.guild.me].update(
manage_channels=True, manage_roles=True, connect=True
)
else:
overwrites.update(
{
source.guild.me: discord.PermissionOverwrite(
manage_channels=True, manage_roles=True, connect=True
)
}
)
return overwrites
async def generate_room_for(
self, *, who: discord.Member, source: discord.VoiceChannel
):
"""
makes autorooms
"""
# avoid object creation for comparison, it's slower
# manage_channels + move_members + connect
# i.e 16 | 16777216 | = 17825808
if not source.guild.me.guild_permissions.value & 17825808 == 17825808:
return
if await self.bot.cog_disabled_in_guild_raw(
self.qualified_name, source.guild.id
):
return
cdata = await self.ar_config.channel(source).all(acquire_lock=False)
ownership = cdata["ownership"]
if ownership is None:
ownership = await self.ar_config.guild(source.guild).ownership()
category = source.category
overwrites: dict = self._ar_get_overwrites(source, who=who, ownership=ownership)
if cdata["gameroom"]:
cname = "???"
if activity := discord.utils.get(
who.activities, type=discord.ActivityType.playing
):
assert activity is not None, "mypy" # nosec # future remove
cname = activity.name
elif cdata["creatorname"]:
cname = f"{source.name} {who.name}"
# Stuff here might warrant a schema change to do this better.
# Don't add this yet.
# elif cdata["personalnamed"]:
# cname = f"{who}'s room"
# elif cdata["randomname"]:
# pass # TODO
else:
cname = source.name
try:
chan = await source.guild.create_voice_channel(
cname, category=category, overwrites=overwrites
)
except discord.Forbidden:
await self.ar_config.guild(source.guild).active.set(False)
return
except discord.HTTPException:
pass
else:
await self.ar_config.channel(chan).clone.set(True)
if who.id not in self._antispam:
self._antispam[who.id] = AntiSpam(self.antispam_intervals)
self._antispam[who.id].stamp()
await who.move_to(chan, reason="autoroom")
await asyncio.sleep(0.5)
await chan.edit(bitrate=source.bitrate, user_limit=source.user_limit)
# TODO:
# discord.HTTP to avoid needing the edit
# This extra API call is avoidable when working with the lower level tools.
@commands.bot_has_permissions(manage_channels=True)
@checks.admin_or_permissions(manage_channels=True)
@commands.group(autohelp=True)
async def autoroomset(self, ctx: GuildContext):
"""
Commands for configuring autorooms
"""
pass
@aa_active()
@checks.admin_or_permissions(manage_channels=True)
@autoroomset.command(name="channelsettings")
async def setchannelsettings(
self, ctx: GuildContext, channel: discord.VoiceChannel
):
"""
Interactive prompt for editing the autoroom behavior for specific
channels
"""
conf = self.ar_config.channel(channel)
if not await conf.autoroom():
return await ctx.send("That isn't an autoroom")
await ctx.send(
(
"Game rooms require the user joining to be playing "
"a game, but get a base name of the game discord "
"detects them playing. Game rooms also do not get"
"anything prepended to their name."
"\nIs this a game room?(y/n)"
)
)
def mcheck(m: discord.Message):
return m.author == ctx.author and m.channel == ctx.channel
try:
message = await self.bot.wait_for("message", check=mcheck, timeout=30)
except asyncio.TimeoutError:
await ctx.send("I can't wait forever, lets get to the next question.")
else:
if message.clean_content.lower()[:1] == "y":
await conf.gameroom.set(True)
else:
await conf.gameroom.set(False)
await message.add_reaction("\N{WHITE HEAVY CHECK MARK}")
await ctx.send(
(
"There are three options for channel ownership\n"
"1. Use the server default\n"
"2. Override the default granting ownership\n"
"3. Override the default denying ownership\n"
"Please respond with the corresponding number to "
"the desired behavior"
)
)
try:
message = await self.bot.wait_for("message", check=mcheck, timeout=30)
except asyncio.TimeoutError:
await ctx.send("I can't wait forever, lets get to the next question.")
else:
to_set = {"1": None, "2": True, "3": False}.get(
message.clean_content[:1], None
)
await conf.ownership.set(to_set)
await message.add_reaction("\N{WHITE HEAVY CHECK MARK}")
@checks.admin_or_permissions(manage_channels=True)
@autoroomset.command(name="toggleactive")
async def autoroomtoggle(self, ctx: GuildContext, val: bool = None):
"""
turns autorooms on and off
"""
if val is None:
val = not await self.ar_config.guild(ctx.guild).active()
await self.ar_config.guild(ctx.guild).active.set(val)
message = (
"Autorooms are now activated" if val else "Autorooms are now deactivated"
)
await ctx.send(message)
@aa_active()
@checks.admin_or_permissions(manage_channels=True)
@autoroomset.command(name="makeclone")
async def makeclone(self, ctx: GuildContext, channel: discord.VoiceChannel):
"""Takes a channel, turns that voice channel into an autoroom"""
await self.ar_config.channel(channel).autoroom.set(True)
await ctx.tick()
@checks.admin_or_permissions(manage_channels=True)
@autoroomset.command(name="remclone")
async def remclone(self, ctx, channel: discord.VoiceChannel):
"""Takes a channel, removes that channel from the clone list"""
await self.ar_config.channel(channel).clear()
await ctx.tick()
@aa_active()
@checks.admin_or_permissions(manage_channels=True)
@autoroomset.command(name="listautorooms")
async def listclones(self, ctx: GuildContext):
"""Lists the current autorooms"""
clist = []
for c in ctx.guild.voice_channels:
if await self.ar_config.channel(c).autoroom():
clist.append("({0.id}) {0.name}".format(c))
output = ", ".join(clist)
page_gen = cast(Generator[str, None, None], pagify(output))
try:
for page in page_gen:
await ctx.send(page)
finally:
page_gen.close()
@aa_active()
@checks.admin_or_permissions(manage_channels=True)
@autoroomset.command(name="toggleowner")
async def toggleowner(self, ctx: GuildContext, val: bool = None):
"""toggles if the creator of the autoroom owns it
requires the "Manage Channels" permission
Defaults to false"""
if val is None:
val = not await self.ar_config.guild(ctx.guild).ownership()
await self.ar_config.guild(ctx.guild).ownership.set(val)
message = (
"Autorooms are now owned be their creator"
if val
else "Autorooms are no longer owned by their creator"
)
await ctx.send(message)
@aa_active()
@checks.admin_or_permissions(manage_channels=True)
@autoroomset.command(name="creatorname")
async def togglecreatorname(
self, ctx: GuildContext, channel: discord.VoiceChannel, val: bool = None
):
"""Toggles if an autoroom will get the owner name after the channel name."""
if val is None:
val = not await self.ar_config.channel(channel).creatorname()
await self.ar_config.channel(channel).creatorname.set(val)
message = (
"This channel will be generated by appending the creator's name"
if val
else "This channel will not be generated by appending the creator's name"
)
await ctx.send(message)
|
py | b40862463603d05811d8943001e976233c70a992 | """climsoftweb URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
#from django.contrib import admin
from django.conf import settings
from django.urls import path, include
from accounts.forms import LoginForm
from accounts.views import LoginUser, LogoutUser
from main import views
urlpatterns = [
#path('admin/', admin.site.urls),
path('', views.mainmenu, name='mainmenu'),
path('login/', name='login', view=LoginUser.as_view(
authentication_form=LoginForm,
template_name='accounts/login.html',
# extra_context={'databases': ['inam_climsoftweb_db']},
)),
path('logout/', name='logout_user', view=LogoutUser.as_view(
next_page='/login',
)),
# main
path('user-admin/', views.user_admin, name='user_admin'),
path('user-profile/', views.user_profile, name='user_profile'),
path('change-password/', views.change_password, name='change_password'),
path('language/', views.language, name='language'),
# apps
path('keyentry/', include('keyentry.urls')),
path('metadata/', include('metadata.urls')),
]
if settings.DEBUG and 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns += [path('__debug__/', include(debug_toolbar.urls))]
|
py | b40862c77348e35da39b34f8f67c3416d3537c7f | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-12-08 13:19
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('dengue_linebot', '0015_auto_20161207_1544'),
]
operations = [
migrations.RemoveField(
model_name='responsetounrecogmsg',
name='unrecognized_msg',
),
migrations.AddField(
model_name='responsetounrecogmsg',
name='id',
field=models.AutoField(auto_created=True, default=None, primary_key=True, serialize=False, verbose_name='ID'),
preserve_default=False,
),
migrations.AddField(
model_name='responsetounrecogmsg',
name='unrecognized_msg_content',
field=models.TextField(default='NULL', unique=True),
preserve_default=False,
),
migrations.AlterField(
model_name='botreplylog',
name='receiver',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='bot_reply_log', to='dengue_linebot.LineUser'),
),
migrations.AlterField(
model_name='messagelog',
name='speaker',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='message_log', to='dengue_linebot.LineUser'),
),
]
|
py | b40863b3a988414c857fa7bd82ae2d31b1ef6b72 | import json
import peewee
import config
from typing import List
from api.view.curd import BaseCrudView, BaseCrudUserView
from app import app
from crud.schemas.user import User
from lib import mail
from model import db
from model.manage_log import ManageLogModel, MANAGE_OPERATION as MOP
from model.notif import UserNotifLastInfo
from model._post import POST_TYPES
from model.post_stats import post_stats_new
from model.user_token import UserToken
from slim.retcode import RETCODE
from model.user_model import UserModel
from slim.utils import to_bin
from api import cooldown, same_user, get_fuzz_ip, run_in_thread
from api.validate.user import ValidatePasswordResetPostDataModel, ChangePasswordDataModel, SignupDirectDataModel, \
SignupConfirmByEmailDataModel, SignupRequestByEmailDataModel, SigninDataModel, ChangeNicknameDataModel, \
RequestResetPasswordDataModel
async def same_email_post(view):
post = await view.post_data()
if 'email' in post:
return post['email'].lower().encode('utf-8')
@app.route.view('user')
class UserView(BaseCrudUserView):
model = User
@app.route.interface('POST', va_post=RequestResetPasswordDataModel)
@cooldown(config.USER_REQUEST_PASSWORD_RESET_COOLDOWN_BY_IP, b'ic_cd_user_request_reset_password_%b')
@cooldown(config.USER_REQUEST_PASSWORD_RESET_COOLDOWN_BY_ACCOUNT, b'ic_cd_user_request_reset_password_account_%b', unique_id_func=same_email_post)
async def request_password_reset(self):
"""
申请重置密码 / 忘记密码
:return:
"""
vpost: RequestResetPasswordDataModel = self._.validated_post
try:
user: UserModel = UserModel.get(UserModel.nickname == vpost.nickname, UserModel.email == vpost.email)
except UserModel.DoesNotExist:
user = None
if user:
if await user.can_request_reset_password():
key = user.gen_reset_key()
user.reset_key = key
user.save()
await mail.send_password_reset(user)
return self.finish(RETCODE.SUCCESS, {'id': user.id, 'nickname': user.nickname})
self.finish(RETCODE.FAILED)
@app.route.interface('POST', summary='密码重置验证', va_post=ValidatePasswordResetPostDataModel)
async def validate_password_reset(self):
"""
忘记密码后,进入重设流程时,通过此接口提交校验码和新密码
:return:
"""
vpost: ValidatePasswordResetPostDataModel = self._.validated_post
user = await UserModel.check_reset_key(vpost.uid, vpost.code)
if user:
info = UserModel.gen_password_and_salt(vpost.password)
user.password = info['password']
user.salt = info['salt']
user.reset_key = None
user.save()
UserToken.clear_by_user_id(user.id)
t: UserToken = await self.setup_user_token(user.id)
self.finish(RETCODE.SUCCESS, {'id': user.id, 'nickname': user.nickname, 'access_token': t.get_token()})
else:
self.finish(RETCODE.FAILED)
@app.route.interface('POST')
async def check_in(self):
""" 签到 """
if self.current_user:
data = self.current_user.check_in()
self.finish(RETCODE.SUCCESS, data)
else:
self.finish(RETCODE.FAILED)
@app.route.interface('POST', va_post=ChangePasswordDataModel)
@cooldown(config.USER_CHANGE_PASSWORD_COOLDOWN_BY_ACCOUNT, b'ic_cd_user_change_password_account_%b', unique_id_func=same_user)
async def change_password(self):
if self.current_user:
vpost: ChangePasswordDataModel = self._.validated_post
u: UserModel = self.current_user
if UserModel.auth_by_mail(u.email, vpost.old_password):
u.set_password(vpost.password)
k = u.refresh_key()
self.finish(RETCODE.SUCCESS, k['key'])
else:
self.finish(RETCODE.FAILED, {'old_password': ['旧密码不正确']})
else:
self.finish(RETCODE.PERMISSION_DENIED)
@app.route.interface('POST', summary='登出')
async def signout(self):
if self.current_user:
self.teardown_user_token(self.current_user)
self.finish(RETCODE.SUCCESS)
else:
self.finish(RETCODE.FAILED)
@app.route.interface('POST', va_post=SigninDataModel)
# @cooldown(config.USER_SIGNIN_COOLDOWN_BY_IP, b'ic_cd_user_signin_%b')
# @cooldown(config.USER_SIGNIN_COOLDOWN_BY_ACCOUNT, b'ic_cd_user_signin_account_%b', unique_id_func=same_email_post)
async def signin(self):
vpost: SigninDataModel = self._.validated_post
# check auth method
if vpost.email:
field_value = vpost.email
auth_method = UserModel.auth_by_mail
elif vpost.username:
field_value = vpost.username
auth_method = UserModel.auth_by_username
else:
return self.finish(RETCODE.FAILED, msg='必须提交用户名或邮箱中的一个作为登录凭据')
# auth and generate access token
user, success = await run_in_thread(auth_method, field_value, vpost.password)
if user:
# expires = 30 if 'remember' in data else None
t: UserToken = await self.setup_user_token(user.id)
self.finish(RETCODE.SUCCESS, {'id': user.id, 'access_token': t.get_token()})
else:
self.finish(RETCODE.FAILED, '登录失败!')
async def get(self):
await super().get()
uid = self.params.get('id', None)
# 满足条件:请求参数有用户,当前用户已登录,请求查询的是当前用户
if uid and self.current_user and (uid == self.current_user.id.hex()):
if self.ret_val['code'] == RETCODE.SUCCESS:
data = self.ret_val['data']
data['roles'] = self.roles
data['main_role'] = self.current_user.main_role
self.finish(RETCODE.SUCCESS, data)
async def update(self):
post = await self.post_data()
if 'password' in post:
# 直接的密码重置过不了校验,所以hack一下
self.new_pass = post['password']
post['password'] = '00'
await super().set()
async def after_update(self, values: 'SQLValuesToWrite', old_records: List['DataRecord'],
new_records: List['DataRecord']):
raw_post = await self.post_data()
for old_record, record in zip(old_records, new_records):
manage_try_add = lambda column, op: ManageLogModel.add_by_post_changed(
self, column, op, POST_TYPES.USER, values, old_record, record
)
# 管理日志:重置访问令牌
ManageLogModel.add_by_post_changed(self, 'key', MOP.USER_KEY_RESET, POST_TYPES.USER,
values, old_record, record, value=None)
# 管理日志:重置密码
ManageLogModel.add_by_post_changed(self, 'password', MOP.USER_PASSWORD_CHANGE, POST_TYPES.USER,
values, old_record, record, value=None)
manage_try_add('state', MOP.POST_STATE_CHANGE)
manage_try_add('visible', MOP.POST_VISIBLE_CHANGE)
manage_try_add('group', MOP.USER_GROUP_CHANGE)
manage_try_add('exp', MOP.USER_EXP_CHANGE)
def manage_try_add_resource(column, op):
if column not in values: return
uid = self.current_user.id
src = json.loads(raw_post['$src'])
# TODO: 检查一下是否真的存在
def func(info):
info['related_type'] = src['type']
info['related_id'] = to_bin(src['id'])
info['related_user_id'] = uid
ManageLogModel.add_by_post_changed(self, column, op, POST_TYPES.USER, values, old_record, record, cb=func)
manage_try_add_resource('credit', MOP.USER_CREDIT_CHANGE)
manage_try_add_resource('repute', MOP.USER_REPUTE_CHANGE)
@cooldown(config.USER_SIGNUP_COOLDOWN_BY_IP, b'ic_cd_user_signup_%b', cd_if_unsuccessed=10)
async def new(self):
return self.finish(RETCODE.FAILED)
if config.EMAIL_ACTIVATION_ENABLE:
return self.finish(RETCODE.FAILED, '此接口未开放')
return await super().new()
@app.route.interface('POST', va_post=SignupRequestByEmailDataModel, summary='注册申请(邮箱)')
@cooldown(config.USER_SIGNUP_COOLDOWN_BY_IP, b'ic_cd_user_signup_%b', cd_if_unsuccessed=10)
async def signup_request_by_email(self):
"""
提交邮件注册请求
:return:
"""
if not config.USER_ALLOW_SIGNUP:
return self.finish(RETCODE.FAILED, '注册未开放')
# 发送注册邮件
if config.EMAIL_ACTIVATION_ENABLE:
vpost: SignupRequestByEmailDataModel = self._.validated_post
code = await UserModel.gen_reg_code_by_email(vpost.email, vpost.password)
await mail.send_reg_code_email(vpost.email, code)
self.finish(RETCODE.SUCCESS)
else:
self.finish(RETCODE.FAILED, '此接口未开放')
@app.route.interface('GET', va_query=SignupConfirmByEmailDataModel, summary='检查注册码和邮箱是否匹配')
async def check_reg_code_by_email(self):
""" 检查与邮件关联的激活码是否可用 """
vquery: SignupConfirmByEmailDataModel = self._.validated_query
pw = await UserModel.check_reg_code_by_email(vquery.email, vquery.code)
self.finish(RETCODE.SUCCESS if pw else RETCODE.FAILED)
@app.route.interface('POST', va_post=SignupConfirmByEmailDataModel, summary='注册确认(邮箱)')
async def signup_confirm_by_email(self):
""" 确认并创建账户 """
vpost: SignupConfirmByEmailDataModel = self._.validated_post
password = await UserModel.check_reg_code_by_email(vpost.email, vpost.code)
if not password:
return self.finish(RETCODE.FAILED, '验证码不正确')
u = UserModel.new(None, password, {'email': vpost.email}, auto_nickname=True)
await self.signup_cleanup(u)
@app.route.interface('POST', va_post=SignupDirectDataModel, summary='注册(直接形式)')
async def signup_by_direct(self):
if self.current_user:
return self.finish(RETCODE.PERMISSION_DENIED) # 已登录用户凑什么热闹
vpost: SignupDirectDataModel = self._.validated_post
extra_values = {
'email': vpost.email,
'ip_registered': await get_fuzz_ip(self)
}
u = UserModel.new(vpost.nickname, vpost.password, extra_values=extra_values, is_for_tests=False, auto_nickname=False)
await self.signup_cleanup(u)
async def signup_cleanup(self, u):
if u:
# 添加统计记录
post_stats_new(POST_TYPES.USER, u.id)
UserNotifLastInfo.new(u.id)
if u.email:
await UserModel.reg_code_cleanup(u.email)
t: UserToken = await self.setup_user_token(u.id)
self.finish(RETCODE.SUCCESS, {'id': u.id, 'access_token': t.get_token()})
else:
self.finish(RETCODE.FAILED)
@app.route.interface('POST', va_post=ChangeNicknameDataModel, summary='使用改名卡修改昵称')
async def change_nickname(self):
u: UserModel = self.current_user
if not u:
return self.finish(RETCODE.PERMISSION_DENIED)
vpost: ChangeNicknameDataModel = self._.validated_post
if u.change_nickname_chance > 0:
try:
old_nickname = u.nickname
u.nickname = vpost.nickname
u.change_nickname_chance -= 1
u.is_new_user = False
u.save()
self.finish(RETCODE.SUCCESS, {'nickname': u.nickname, 'change_nickname_chance': u.change_nickname_chance})
# note: 虽然有点奇怪,但下面这句其实没问题 18.11.13
ManageLogModel.add_by_post_changed(self, 'nickname', MOP.USER_NICKNAME_CHANGE, POST_TYPES.USER,
True, {'nickname': old_nickname}, u)
return
except peewee.DatabaseError:
db.rollback()
self.finish(RETCODE.FAILED)
|
py | b40864631112ec29dc682a56e61a60c58ba7968c | """SCons.Platform.os2
Platform-specific initialization for OS/2 systems.
There normally shouldn't be any need to import this module directly. It
will usually be imported through the generic SCons.Platform.Platform()
selection method.
"""
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Platform/os2.py rel_2.5.0:3543:937e55cd78f7 2016/04/09 11:29:54 bdbaddog"
import win32
def generate(env):
if 'ENV' not in env:
env['ENV'] = {}
env['OBJPREFIX'] = ''
env['OBJSUFFIX'] = '.obj'
env['SHOBJPREFIX'] = '$OBJPREFIX'
env['SHOBJSUFFIX'] = '$OBJSUFFIX'
env['PROGPREFIX'] = ''
env['PROGSUFFIX'] = '.exe'
env['LIBPREFIX'] = ''
env['LIBSUFFIX'] = '.lib'
env['SHLIBPREFIX'] = ''
env['SHLIBSUFFIX'] = '.dll'
env['LIBPREFIXES'] = '$LIBPREFIX'
env['LIBSUFFIXES'] = [ '$LIBSUFFIX', '$SHLIBSUFFIX' ]
env['HOST_OS'] = 'os2'
env['HOST_ARCH'] = win32.get_architecture().arch
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
py | b408647c81f38669dd796b1461e9d4101c93542e | from .Template import Template, TemplateError, TemplateVarNotFoundError, TemplateRequiredPathNotFoundError, TemplateProcedureNotFoundError
from . import procedures
|
py | b4086526ead6ef089530a040fdd30f155705dd48 | from __future__ import print_function
import logging
from time import sleep
import boto3
from crhelper import CfnResource
logger = logging.getLogger(__name__)
helper = CfnResource(json_logging=True, log_level='DEBUG',
boto_level='CRITICAL')
try:
ec2_client = boto3.client('ec2')
ssm_client = boto3.client('ssm')
except Exception as e:
helper.init_failure(e)
def ssm_ready(instance_id):
try:
response = ssm_client.describe_instance_information(Filters=[
{'Key': 'InstanceIds', 'Values': [instance_id]}
])
logger.debug(response)
return True
except ssm_client.exceptions.InvalidInstanceId:
return False
@helper.create
def create(event, context):
logger.debug("Got Create")
response = ec2_client.describe_instances(Filters=[{
'Name': 'tag:aws:cloud9:environment', 'Values': [event['ResourceProperties']['Cloud9Environment']]
}])
instance_id = response['Reservations'][0]['Instances'][0]['InstanceId']
ec2_client.associate_iam_instance_profile(
IamInstanceProfile={
'Name': event['ResourceProperties']['InstanceProfile']},
InstanceId=instance_id)
while not ssm_ready(instance_id):
if context.get_remaining_time_in_millis() < 20000:
raise Exception(
"Timed out waiting for instance to register with SSM")
sleep(15)
return instance_id
@helper.update
@helper.delete
def no_op(_, __):
return
def handler(event, context):
helper(event, context)
|
py | b40865f17d1a3344e91bb5bc50a429242c0407f2 | from AbstractNode import AbstractNode
from board import Board
class Node(AbstractNode):
"""Implementation of node when solving n-puzzle"""
def __init__(self, state, parent = None, action = None, path_cost = 0, goal = None):
super(AbstractNode, self).__init__()
self.board = Board(state) # board configuration
self.parent = parent # Parent node
self.action = action # What brought us to this state
self.path_cost = path_cost # g in the lecture
if goal == None:
self.heuristics = 0
else:
self.heuristics = self.board.manhattanDist(goal)
self.depth = 0 # depth of this node
if parent:
self.depth = parent.depth + 1
def neighbours(self):
""" extract all possible neighbours for this state"""
possibleMoves = self.board.validMoves()
nbrs = []
cbr = self.board
for move in possibleMoves:
newConfig = cbr.swap(move)
neigh = Node(newConfig, self, move, 1)
nbrs.append(neigh)
return nbrs
def belongs(self, listOfNodes):
for node in listOfNodes:
if self.testEqual(node):
return True
return False
def testEqual(self, goal):
return self.board.isEqual(goal.board.values)
def __str__(self):
v = self.board.values
string = '-------\n' + \
'|' + str(v[0]) + '|' + str(v[1]) + '|' + str(v[2]) + '|\n' + \
'-------\n' + \
'|' + str(v[3]) + '|' + str(v[4]) + '|' + str(v[5]) + '|\n' + \
'_______\n' + \
'|' + str(v[6]) + '|' + str(v[7]) + '|' + str(v[8]) + '|\n' + \
'-------'
return string
def __lt__(self, other):
return (self.path_cost + self.heuristics) < (other.path_cost + other.heuristics)
|
py | b40866432554205ee57567afdf2527d666cba5c2 | from .converter import Converter, Converter_2022
|
py | b4086684b4839d113e8c6d3eda1b85f847f77402 | # Copyright (c) 2021 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_policy import policy as base_policy
from neutron import policy
from neutron.tests.unit.conf.policies import test_base as base
class FlavorAPITestCase(base.PolicyBaseTestCase):
def setUp(self):
super(FlavorAPITestCase, self).setUp()
self.target = {'project_id': self.project_id}
class SystemAdminTests(FlavorAPITestCase):
def setUp(self):
super(SystemAdminTests, self).setUp()
self.context = self.system_admin_ctx
def test_create_flavor(self):
self.assertTrue(
policy.enforce(self.context,
'create_flavor', self.target))
def test_get_flavor(self):
self.assertTrue(
policy.enforce(self.context, 'get_flavor', self.target))
def test_update_flavor(self):
self.assertTrue(
policy.enforce(self.context,
'update_flavor', self.target))
def test_delete_flavor(self):
self.assertTrue(
policy.enforce(self.context,
'delete_flavor', self.target))
def test_create_service_profile(self):
self.assertTrue(
policy.enforce(self.context,
'create_service_profile', self.target))
def test_get_service_profile(self):
self.assertTrue(
policy.enforce(self.context, 'get_service_profile', self.target))
def test_update_service_profile(self):
self.assertTrue(
policy.enforce(self.context,
'update_service_profile', self.target))
def test_delete_service_profile(self):
self.assertTrue(
policy.enforce(self.context,
'delete_service_profile', self.target))
def test_create_flavor_service_profile(self):
self.assertTrue(
policy.enforce(self.context,
'create_flavor_service_profile', self.target))
def test_delete_flavor_service_profile(self):
self.assertTrue(
policy.enforce(self.context,
'delete_flavor_service_profile', self.target))
class SystemMemberTests(FlavorAPITestCase):
def setUp(self):
self.skipTest("SYSTEM_MEMBER persona isn't supported in phase1 of the "
"community goal")
super(SystemMemberTests, self).setUp()
self.context = self.system_member_ctx
def test_create_flavor(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'create_flavor', self.target)
def test_get_flavor(self):
self.assertTrue(
policy.enforce(self.context, 'get_flavor', self.target))
def test_update_flavor(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'update_flavor', self.target)
def test_delete_flavor(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'delete_flavor', self.target)
def test_create_service_profile(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'create_service_profile', self.target)
def test_get_service_profile(self):
self.assertTrue(
policy.enforce(self.context, 'get_service_profile', self.target))
def test_update_service_profile(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'update_service_profile', self.target)
def test_delete_service_profile(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'delete_service_profile', self.target)
def test_create_flavor_service_profile(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'create_flavor_service_profile',
self.target)
def test_delete_flavor_service_profile(self):
self.assertRaises(
base_policy.PolicyNotAuthorized,
policy.enforce,
self.context, 'delete_flavor_service_profile',
self.target)
class SystemReaderTests(SystemMemberTests):
def setUp(self):
self.skipTest("SYSTEM_READER persona isn't supported in phase1 of the "
"community goal")
super(SystemReaderTests, self).setUp()
self.context = self.system_reader_ctx
class ProjectAdminTests(FlavorAPITestCase):
def setUp(self):
super(ProjectAdminTests, self).setUp()
self.context = self.project_admin_ctx
def test_create_flavor(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'create_flavor', self.target)
def test_update_flavor(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'update_flavor', self.target)
def test_delete_flavor(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'delete_flavor', self.target)
def test_create_service_profile(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'create_service_profile', self.target)
def test_update_service_profile(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'update_service_profile', self.target)
def test_delete_service_profile(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'delete_service_profile', self.target)
def test_create_flavor_service_profile(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'create_flavor_service_profile',
self.target)
def test_delete_flavor_service_profile(self):
self.assertRaises(
base_policy.InvalidScope,
policy.enforce,
self.context, 'delete_flavor_service_profile',
self.target)
class ProjectMemberTests(ProjectAdminTests):
def setUp(self):
super(ProjectMemberTests, self).setUp()
self.context = self.project_member_ctx
class ProjectReaderTests(ProjectMemberTests):
def setUp(self):
super(ProjectReaderTests, self).setUp()
self.context = self.project_reader_ctx
|
py | b4086697cfeda75887150084022a80623c826427 | from __future__ import print_function
from invoke import task, Collection
import os
import yaml
from subprocess import check_output
from shutil import which
import shutil
env_name = 'observer_geospatial'
notebookfolder = 'observer-geospatial-notebooks'
source = '' if os.name == 'nt' else 'source'
def rmdir(dirname):
"""Safely remove a directory, cross-platform
"""
if not os.path.exists(dirname):
return
if os.name == 'nt':
check_output('rmdir {0!s} /S /Q'.format(dirname), shell=True)
else:
check_output(['rm', '-rf', dirname])
@task
def environment(ctx, clean=False, env_name=env_name):
'''
Creates environment for Observer Geospatial Notebooks
Args:
clean: deletes environment prior to reinstallation
env_name: name of environment to install
'''
if clean:
print('deleting environment')
ctx.run('{0!s} deactivate; conda remove -n {1!s} --all'.format(source, env_name))
# Create a new environment
print('creating environment {0!s}'.format(env_name))
ctx.run("conda env create -f binder/environment.yml -n {0!s}".format(env_name))
build(ctx, env_name=env_name)
@task
def build(ctx, env_name=env_name, kernel=True):
'''
Builds an environment with appropriate extensions.
'''
ctx.run("""
{0!s} activate {1!s} &&
jupyter labextension install @jupyter-widgets/jupyterlab-manager --no-build &&
jupyter labextension install @jupyter-voila/jupyterlab-preview --no-build &&
jupyter lab clean && jupyter lab build --dev-build=False --minimize=False
""".format(source, env_name).strip().replace('\n', ''))
if kernel:
ctx.run("{0!s} activate {1!s} && ipython kernel install --name {1!s} --display-name {1!s} --sys-prefix".format(source, env_name))
@task
def notebookfiles(ctx, clean=False, notebookfolder=notebookfolder):
'''
Clones Observer Geospatial Notebooks into folder
Args:
clean: deletes Observer Geospatial Notebooks from notebookfolder prior to installation
notebookfolder: name of notebookfolder
'''
print('cleaning notebookfiles')
if clean:
rmdir(notebookfolder)
print('creating notebookfolder')
if not os.path.exists(notebookfolder):
os.makedirs(notebookfolder)
os.chdir(notebookfolder)
# list of repos used in demo
print('cloning repos into folder {}'.format(notebookfolder))
reponames = [
'Piphi5/MHM-Groups-Dashboard',
'Piphi5/Landcover-Notebook',
'IGES-Geospatial/Mosquito-Habitat-Scripts'
]
for repo in reponames:
if not os.path.isdir(repo.split('/')[1]):
ctx.run('git clone --depth 1 https://github.com/{}.git'.format(repo))
assert os.path.isdir(repo.split('/')[1]), '{} failed download'.format(repo)
# This empty file and empty folder are for showing drag and drop in jupyterlab
# ctx.run('touch move_this_file.txt; mkdir move_it_here')
@task
def clean(ctx, env_name=env_name, notebookfolder=notebookfolder):
'''
Deletes both environment and notebookfolder
Args:
env_name: name of conda environment
notebookfolder: path to folder with notebookfiles
'''
cmd = '{0!s} deactivate && conda remove --name {1!s} --all'
ctx.run(cmd.format(source, env_name))
with open("talks.yml", 'r') as stream:
talks = yaml.load(stream)
for t in talks:
rmdir(t)
rmdir(notebookfolder)
@task
def r(ctx, env_name=env_name):
'''
Installs the r kernel and associated libs.
'''
cmd = '{0!s} activate {1!s} && conda install -c conda-forge r-irkernel r-ggplot2'
ctx.run(cmd.format(source, env_name))
@task
def talk(ctx, talk_name, clean=False):
'''
Reads yaml file talks.yml and
moves files and folders specified
in yaml file to the a folder
matching the name of the talk
Args:
talk_name: name of talk in talks.yml
Note: yaml file is assumed to be
a dict of dicts of lists and
dict with the following python format:
{'talk_name':
{'folders':
{'src0': 'dest0', 'src1': 'dest1']
'files':
['file0', file1']
'rename':
{'oldname': 'newname'}
}
}
or in yaml format:
talk_name:
folders:
src0: dest0
src1: dest1
files:
- file0
- file1
rename:
oldname: newname
'''
with open("talks.yml", 'r') as stream:
talks = yaml.load(stream)
if clean:
rmdir(talk_name)
if not os.path.exists(talk_name):
os.makedirs(talk_name)
if 'files' in talks[talk_name]:
for f in talks[talk_name]['files']:
if ((f.split('/')[0] == notebookfolder) and not
os.path.exists(notebookfolder)):
notebookfiles(ctx)
os.chdir('..')
copied_path = os.path.join(talk_name, os.path.basename(f))
shutil.copy(f, copied_path)
assert os.path.isfile(copied_path), \
'{} failed to copy into {}'.format(f, talk_name)
if 'folders' in talks[talk_name]:
for src, dst in talks[talk_name]['folders'].items():
dst = os.path.join(talk_name, dst)
if not os.path.exists(dst):
shutil.copytree(src, dst)
if 'rename' in talks[talk_name]:
for old_file, new_file in talks[talk_name]['rename'].items():
moved_file = os.path.join(talk_name, os.path.basename(old_file))
if os.path.isfile(moved_file):
os.rename(moved_file, os.path.join(talk_name, new_file))
elif os.path.isfile(old_file):
shutil.copy(old_file, os.path.join(talk_name, new_file))
# Configure cross-platform settings.
ns = Collection(environment, build, notebookfiles, r, clean, talk)
ns.configure({
'run': {
'shell': which('bash') if os.name != 'nt' else which('cmd'),
'pty': False if os.name == 'nt' else True
}
})
|
py | b40867b149b11cadf43b06562deb72ebad767efe | import re
import pytest # type: ignore
from snyk import SnykClient
from snyk.__version__ import __version__
from snyk.errors import SnykError, SnykNotFoundError
from snyk.models import Organization, Project
class TestSnykClient(object):
@pytest.fixture
def client(self):
return SnykClient("token")
def test_default_api_url(self, client):
assert client.api_url == "https://snyk.io/api/v1"
def test_overriding_api_url(self):
url = "https://notsnyk.io/api/v1"
client = SnykClient("token", url)
assert client.api_url == url
def test_token_added_to_headers(self, client):
assert client.api_headers["Authorization"] == "token token"
def test_user_agent_added_to_headers(self, client):
assert client.api_headers["User-Agent"] == "pysnyk/%s" % __version__
def test_overriding_user_agent(self):
ua = "test"
client = SnykClient("token", user_agent=ua)
assert client.api_headers["User-Agent"] == ua
def test_token_added_to_post_headers(self, client):
assert client.api_post_headers["Authorization"] == "token token"
def test_post_headers_use_correct_mimetype(self, client):
assert client.api_post_headers["Content-Type"] == "application/json"
def test_get_sends_request_to_snyk(self, requests_mock, client):
requests_mock.get("https://snyk.io/api/v1/sample", text="pong")
assert client.get("sample")
def test_put_sends_request_to_snyk(self, requests_mock, client):
requests_mock.put("https://snyk.io/api/v1/sample", text="pong")
assert client.put("sample", {})
def test_delete_sends_request_to_snyk(self, requests_mock, client):
requests_mock.delete("https://snyk.io/api/v1/sample")
assert client.delete("sample")
def test_post_sends_request_to_snyk(self, requests_mock, client):
requests_mock.post("https://snyk.io/api/v1/sample")
assert client.post("sample", {})
def test_post_raises_error(self, requests_mock, client):
requests_mock.post("https://snyk.io/api/v1/sample", status_code=500, json={})
with pytest.raises(SnykError):
client.post("sample", {})
def test_put_raises_error(self, requests_mock, client):
requests_mock.put("https://snyk.io/api/v1/sample", status_code=500, json={})
with pytest.raises(SnykError):
client.put("sample", {})
def test_delete_raises_error(self, requests_mock, client):
requests_mock.delete("https://snyk.io/api/v1/sample", status_code=500, json={})
with pytest.raises(SnykError):
client.delete("sample")
def test_get_raises_error(self, requests_mock, client):
requests_mock.get("https://snyk.io/api/v1/sample", status_code=500, json={})
with pytest.raises(SnykError):
client.get("sample")
def test_empty_organizations(self, requests_mock, client):
requests_mock.get("https://snyk.io/api/v1/orgs", json={})
assert [] == client.organizations.all()
@pytest.fixture
def organizations(self):
return {
"orgs": [
{
"name": "defaultOrg",
"id": "689ce7f9-7943-4a71-b704-2ba575f01089",
"group": None,
},
{
"name": "My Other Org",
"id": "a04d9cbd-ae6e-44af-b573-0556b0ad4bd2",
"group": {
"name": "ACME Inc.",
"id": "a060a49f-636e-480f-9e14-38e773b2a97f",
},
},
]
}
@pytest.fixture
def projects(self):
return {
"projects": [
{
"name": "atokeneduser/goof",
"id": "6d5813be-7e6d-4ab8-80c2-1e3e2a454545",
"created": "2018-10-29T09:50:54.014Z",
"origin": "cli",
"type": "npm",
"readOnly": "false",
"testFrequency": "daily",
"totalDependencies": 438,
"issueCountsBySeverity": {"low": 8, "high": 13, "medium": 15},
"lastTestedDate": "2019-02-05T06:21:00.000Z",
"browseUrl": "https://app.snyk.io/org/pysnyk-test-org/project/6d5813be-7e6d-4ab8-80c2-1e3e2a454545",
}
]
}
def test_loads_organizations(self, requests_mock, client, organizations):
requests_mock.get("https://snyk.io/api/v1/orgs", json=organizations)
assert len(client.organizations.all()) == 2
def test_first_organizations(self, requests_mock, client, organizations):
requests_mock.get("https://snyk.io/api/v1/orgs", json=organizations)
org = client.organizations.first()
assert "defaultOrg" == org.name
def test_first_organizations_on_empty(self, requests_mock, client):
requests_mock.get("https://snyk.io/api/v1/orgs", json={})
with pytest.raises(SnykNotFoundError):
client.organizations.first()
def test_filter_organizations(self, requests_mock, client, organizations):
requests_mock.get("https://snyk.io/api/v1/orgs", json=organizations)
assert 1 == len(client.organizations.filter(name="defaultOrg"))
def test_filter_organizations_empty(self, requests_mock, client, organizations):
requests_mock.get("https://snyk.io/api/v1/orgs", json=organizations)
assert [] == client.organizations.filter(name="not present")
def test_loads_organization(self, requests_mock, client, organizations):
key = organizations["orgs"][0]["id"]
requests_mock.get("https://snyk.io/api/v1/orgs", json=organizations)
org = client.organizations.get(key)
assert "defaultOrg" == org.name
def test_non_existent_organization(self, requests_mock, client, organizations):
requests_mock.get("https://snyk.io/api/v1/orgs", json=organizations)
with pytest.raises(SnykNotFoundError):
client.organizations.get("not-present")
def test_organization_type(self, requests_mock, client, organizations):
requests_mock.get("https://snyk.io/api/v1/orgs", json=organizations)
assert all(type(x) is Organization for x in client.organizations.all())
def test_organization_attributes(self, requests_mock, client, organizations):
requests_mock.get("https://snyk.io/api/v1/orgs", json=organizations)
assert client.organizations.first().name == "defaultOrg"
def test_organization_load_group(self, requests_mock, client, organizations):
requests_mock.get("https://snyk.io/api/v1/orgs", json=organizations)
assert client.organizations.all()[1].group.name == "ACME Inc."
def test_empty_projects(self, requests_mock, client, organizations):
requests_mock.get("https://snyk.io/api/v1/orgs", json=organizations)
matcher = re.compile("projects$")
requests_mock.get(matcher, json={})
assert [] == client.projects.all()
def test_projects(self, requests_mock, client, organizations, projects):
requests_mock.get("https://snyk.io/api/v1/orgs", json=organizations)
matcher = re.compile("projects$")
requests_mock.get(matcher, json=projects)
assert len(client.projects.all()) == 2
assert all(type(x) is Project for x in client.projects.all())
def test_project(self, requests_mock, client, organizations, projects):
requests_mock.get("https://snyk.io/api/v1/orgs", json=organizations)
matcher = re.compile("projects$")
requests_mock.get(matcher, json=projects)
assert (
"atokeneduser/goof"
== client.projects.get("6d5813be-7e6d-4ab8-80c2-1e3e2a454545").name
)
def test_non_existent_project(self, requests_mock, client, organizations, projects):
requests_mock.get("https://snyk.io/api/v1/orgs", json=organizations)
matcher = re.compile("projects$")
requests_mock.get(matcher, json=projects)
with pytest.raises(SnykNotFoundError):
client.projects.get("not-present")
|
py | b40868789030ecb4bc388e7c50c49aef250134f8 | # errors.py
# coding: utf-8
"""
Soporte para el manejo de errores del compilador.
Una de las partes más importantes (y molestas) de escribir un compilador
es el informe confiable de los mensajes de error al usuario. Este archivo
define algunas funcionalidades genéricas para tratar los errores en todo
el proyecto del compilador. Es posible que desee expandir esto con
capacidades adicionales a los efectos de las pruebas unitarias.
Para reportar errores en su compilador, use la función error(). Por ejemplo:
error(lineno, 'Algún tipo de mensaje de error del compilador')
donde lineno es el número de línea en el que se produjo el error. Si su
compilador admite varios archivos de origen, agregue el argumento de palabra
clave de nombre de archivo.
error(lineno, 'Algún tipo de mensaje de error', filename='foo.src')
La función de utilidad errors_reported() devuelve el número total de
errores informados hasta el momento. Diferentes etapas del compilador
pueden usar esto para decidir si continuar o no procesando.
Use clear_errors() para borrar el número total de errores.
"""
import sys
_num_errors = 0
def error(lineno, message, filename=None):
"""
Reporta un error de compilación a todos los suscriptores
"""
global _num_errors
if not filename:
errmsg = "{}: {}".format(lineno, message)
else:
errmsg = "{}:{}: {}".format(filename, lineno, message)
print(errmsg, file=sys.stderr)
_num_errors += 1
def errors_reported():
"""
Retorna el número de errores reportados
"""
return _num_errors
def clear_errors():
"""
Borre la cantidad total de errores reportados.
"""
global _num_errors
_num_errors = 0
|
py | b408687ff135580581f14d55c92da61c2fe226c8 | import requests
# in nazashte budid
from bs4 import BeautifulSoup
url = "https://bama.ir/car/peykan/all-models/all-trims?page=1"
# data = requests.get(url+str(i))
data = requests.get(url)
soup = BeautifulSoup(data.text, "html.parser")
p_tags = soup.find_all('p', class_ = "shortdesc removeEmoji")
# for p_tag in p_tag:
for p_tag in p_tags:
print (p_tag.text) |
py | b40868802ca916459473bc8f2e8c15f403b72649 | from django.contrib import admin
from accounts.admin import custom_site
from dms.models import DirectMessage
class DirectMessagesAdmin(admin.ModelAdmin):
list_display = ['message_sender', 'message_receiver', 'created_on']
date_hiearchy = 'created_on'
custom_site.register(DirectMessage, DirectMessagesAdmin)
|
py | b4086ad061b4558078a8cf2c9e6f5443baefaf70 | from aiohttp import web
import aiohttp_jinja2 as aj
import jinja2
import asyncpgsa
from .routes import setup_routes
async def create_app(config:dict):
app = web.Application()
app['config'] = config
aj.setup(app,
loader=jinja2.PackageLoader('demo', 'templates'))
setup_routes(app)
app.on_startup.append(on_start)
app.on_shutdown.append(on_shutdown)
return app
async def on_start(app):
config = app['config']
app['db'] = await asyncpgsa.create_pool(dsn=config['datebase_url'])
async def on_shutdown(app):
await app['db'].close()
|
py | b4086af738a46ca7bff723220177673a25d5fc0f | from vispy.scene import SceneCanvas
canvas = SceneCanvas(keys='interactive', bgcolor='w', show=True)
grid = canvas.central_widget.add_grid(spacing=0, bgcolor='gray',
border_color='k')
view1 = grid.add_view(row=0, col=0, margin=10, bgcolor=(1, 0, 0, 0.5),
border_color=(1, 0, 0))
view2 = grid.add_view(row=0, col=1, margin=10, bgcolor=(0, 1, 0, 0.5),
border_color=(0, 1, 0))
if __name__ == '__main__':
canvas.app.run() |
py | b4086b177391efa9692b0c3537c9ee0df3190c0b | from typing import Any
from django.conf.urls import include, url
import zilencer.views
from zerver.lib.rest import rest_dispatch
i18n_urlpatterns = [] # type: Any
# Zilencer views following the REST API style
v1_api_and_json_patterns = [
url('^remotes/push/register$', rest_dispatch,
{'POST': 'zilencer.views.register_remote_push_device'}),
url('^remotes/push/unregister$', rest_dispatch,
{'POST': 'zilencer.views.unregister_remote_push_device'}),
url('^remotes/push/notify$', rest_dispatch,
{'POST': 'zilencer.views.remote_server_notify_push'}),
# Push signup doesn't use the REST API, since there's no auth.
url('^remotes/server/register$', zilencer.views.register_remote_server),
]
urlpatterns = [
url(r'^api/v1/', include(v1_api_and_json_patterns)),
url(r'^json/', include(v1_api_and_json_patterns)),
]
|
py | b4086c4873fc6a0e1a67b546842a48930d739f4d | # changegroup.py - Mercurial changegroup manipulation functions
#
# Copyright 2006 Matt Mackall <[email protected]>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from i18n import _
import util
import struct, os, bz2, zlib, tempfile
def getchunk(source):
"""return the next chunk from changegroup 'source' as a string"""
d = source.read(4)
if not d:
return ""
l = struct.unpack(">l", d)[0]
if l <= 4:
return ""
d = source.read(l - 4)
if len(d) < l - 4:
raise util.Abort(_("premature EOF reading chunk"
" (got %d bytes, expected %d)")
% (len(d), l - 4))
return d
def chunkiter(source, progress=None):
"""iterate through the chunks in source, yielding a sequence of chunks
(strings)"""
while 1:
c = getchunk(source)
if not c:
break
elif progress is not None:
progress()
yield c
def chunkheader(length):
"""return a changegroup chunk header (string)"""
return struct.pack(">l", length + 4)
def closechunk():
"""return a changegroup chunk header (string) for a zero-length chunk"""
return struct.pack(">l", 0)
class nocompress(object):
def compress(self, x):
return x
def flush(self):
return ""
bundletypes = {
"": ("", nocompress),
"HG10UN": ("HG10UN", nocompress),
"HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()),
"HG10GZ": ("HG10GZ", lambda: zlib.compressobj()),
}
def collector(cl, mmfs, files):
# Gather information about changeset nodes going out in a bundle.
# We want to gather manifests needed and filelogs affected.
def collect(node):
c = cl.read(node)
for fn in c[3]:
files.setdefault(fn, fn)
mmfs.setdefault(c[0], node)
return collect
# hgweb uses this list to communicate its preferred type
bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN']
def writebundle(cg, filename, bundletype):
"""Write a bundle file and return its filename.
Existing files will not be overwritten.
If no filename is specified, a temporary file is created.
bz2 compression can be turned off.
The bundle file will be deleted in case of errors.
"""
fh = None
cleanup = None
try:
if filename:
fh = open(filename, "wb")
else:
fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
fh = os.fdopen(fd, "wb")
cleanup = filename
header, compressor = bundletypes[bundletype]
fh.write(header)
z = compressor()
# parse the changegroup data, otherwise we will block
# in case of sshrepo because we don't know the end of the stream
# an empty chunkiter is the end of the changegroup
# a changegroup has at least 2 chunkiters (changelog and manifest).
# after that, an empty chunkiter is the end of the changegroup
empty = False
count = 0
while not empty or count <= 2:
empty = True
count += 1
for chunk in chunkiter(cg):
empty = False
fh.write(z.compress(chunkheader(len(chunk))))
pos = 0
while pos < len(chunk):
next = pos + 2**20
fh.write(z.compress(chunk[pos:next]))
pos = next
fh.write(z.compress(closechunk()))
fh.write(z.flush())
cleanup = None
return filename
finally:
if fh is not None:
fh.close()
if cleanup is not None:
os.unlink(cleanup)
def unbundle(header, fh):
if header == 'HG10UN':
return fh
elif not header.startswith('HG'):
# old client with uncompressed bundle
def generator(f):
yield header
for chunk in f:
yield chunk
elif header == 'HG10GZ':
def generator(f):
zd = zlib.decompressobj()
for chunk in f:
yield zd.decompress(chunk)
elif header == 'HG10BZ':
def generator(f):
zd = bz2.BZ2Decompressor()
zd.decompress("BZ")
for chunk in util.filechunkiter(f, 4096):
yield zd.decompress(chunk)
return util.chunkbuffer(generator(fh))
def readbundle(fh, fname):
header = fh.read(6)
if not header.startswith('HG'):
raise util.Abort(_('%s: not a Mercurial bundle file') % fname)
if not header.startswith('HG10'):
raise util.Abort(_('%s: unknown bundle version') % fname)
elif header not in bundletypes:
raise util.Abort(_('%s: unknown bundle compression type') % fname)
return unbundle(header, fh)
|
py | b4086c5b671a83c6c7e21f2d914a9af952182cfc | from application import app
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0', port=5003)
|
py | b4086c653406a1dbaca70bf0625a01fe666e6af2 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 29 17:17:44 2020
@author: fali
"""
print(__name__)
def fib(n):
"""prints fibonacci sequence
pass a number to the function fib(x)
"""
a, b = 0, 1 #Note multiple assignment!
counter = 1
while counter < n:
print (a, end=' ')
a, b = b, a+b
counter += 1
print(a)
print(__name__)
return(0)
if __name__ == "__main__":
fib(8) |
py | b4086c9024b50d2077837de756e41114c814b8fb | # This module is only being maintained for conda-build compatibility
from __future__ import absolute_import, division, print_function, unicode_literals
import warnings as _warnings
from tempfile import mkdtemp
# shim for conda-build
from .common.compat import *
PY3 = PY3
if PY3:
import configparser
else:
import ConfigParser as configparser
configparser = configparser
class TemporaryDirectory(object):
"""Create and return a temporary directory. This has the same
behavior as mkdtemp but can be used as a context manager. For
example:
with TemporaryDirectory() as tmpdir:
...
Upon exiting the context, the directory and everything contained
in it are removed.
"""
# Handle mkdtemp raising an exception
name = None
_closed = False
def __init__(self, suffix="", prefix='tmp', dir=None):
self.name = mkdtemp(suffix, prefix, dir)
def __repr__(self):
return "<{} {!r}>".format(self.__class__.__name__, self.name)
def __enter__(self):
return self.name
def cleanup(self, _warn=False, _warnings=_warnings):
from .gateways.disk.delete import rm_rf as _rm_rf
if self.name and not self._closed:
try:
_rm_rf(self.name)
except (TypeError, AttributeError) as ex:
if "None" not in '%s' % (ex,):
raise
_rm_rf(self.name)
self._closed = True
if _warn and _warnings.warn:
_warnings.warn("Implicitly cleaning up {!r}".format(self),
_warnings.ResourceWarning)
def __exit__(self, exc, value, tb):
self.cleanup()
def __del__(self):
# Issue a ResourceWarning if implicit cleanup needed
self.cleanup(_warn=True)
|
py | b4086e549a0b077c04a771135167ec2b77d07076 | # --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msgraph.cli.core import AzCommandsLoader
from azext_people_beta.generated._help import helps # pylint: disable=unused-import
try:
from azext_people_beta.manual._help import helps # pylint: disable=reimported
except ImportError as e:
if e.name.endswith('manual._help'):
pass
else:
raise e
class PeopleCommandsLoader(AzCommandsLoader):
def __init__(self, cli_ctx=None):
from msgraph.cli.core.commands import CliCommandType
from azext_people_beta.generated._client_factory import cf_people_beta_cl
people_beta_custom = CliCommandType(
operations_tmpl='azext_people_beta.custom#{}',
client_factory=cf_people_beta_cl)
parent = super(PeopleCommandsLoader, self)
parent.__init__(cli_ctx=cli_ctx, custom_command_type=people_beta_custom)
def load_command_table(self, args):
from azext_people_beta.generated.commands import load_command_table
load_command_table(self, args)
try:
from azext_people_beta.manual.commands import load_command_table as load_command_table_manual
load_command_table_manual(self, args)
except ImportError as e:
if e.name.endswith('manual.commands'):
pass
else:
raise e
return self.command_table
def load_arguments(self, command):
from azext_people_beta.generated._params import load_arguments
load_arguments(self, command)
try:
from azext_people_beta.manual._params import load_arguments as load_arguments_manual
load_arguments_manual(self, command)
except ImportError as e:
if e.name.endswith('manual._params'):
pass
else:
raise e
COMMAND_LOADER_CLS = PeopleCommandsLoader
|
py | b4086e6dd69e6ecb73eb98df3d1d5746c92cdebb | from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class PublisherConfig(AppConfig):
name = 'publisher'
verbose_name = _('Publisher')
|
py | b408703b9e572300839eb6a25a7d2ad56630775a | import numpy as np
import torch.nn as nn
import random
from torch.optim import Adam
import torch
from torch_geometric.nn import GCNConv
import torch.nn.functional as F
import argparse
import pdb
from sklearn.metrics import roc_auc_score
import multiprocessing
from torch.distributions import Categorical
import itertools
motif_size =3
# max_value = 1000
gen_interval= n_epochs_gen = 3
dis_interval= n_epochs_dis = 3
n_sample = 3
batch_size_gen = 64
batch_size_dis =64
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, required=True)
parser.add_argument('--random_splits', type=bool, default=True)
parser.add_argument('--runs', type=int, default=10)
parser.add_argument('--epochs', type=int, default=200)
parser.add_argument('--lr', type=float, default=0.0001)
parser.add_argument('--weight_decay', type=float, default=0.0005)
parser.add_argument('--early_stopping', type=int, default=10)
parser.add_argument('--hidden', type=int, default=128)
parser.add_argument('--dropout', type=float, default=0.5)
parser.add_argument('--normalize_features', type=bool, default=True)
args = parser.parse_args()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class Generator(torch.nn.Module):
def __init__(self, dataset):
super(Generator, self).__init__()
self.conv1 = GCNConv(dataset.num_features, args.hidden)
self.conv2 = GCNConv(args.hidden, args.hidden)
def reset_parameters(self):
self.conv1.reset_parameters()
self.conv2.reset_parameters()
def forward(self, x, edge_index):
x = F.relu(self.conv1(x, edge_index)) # LAYER 1
z = self.conv2(x, edge_index) # LAYER 2
return z
class Discriminator(torch.nn.Module):
def __init__(self, dataset):
super(Discriminator, self).__init__()
self.conv1 = GCNConv(dataset.num_features, args.hidden)
self.conv2 = GCNConv(args.hidden, args.hidden)
def reset_parameters(self):
self.conv1.reset_parameters()
self.conv2.reset_parameters()
def forward(self, x, edge_index):
x = F.relu(self.conv1(x, edge_index)) # LAYER 1
z = self.conv2(x, edge_index) # LAYER 2
return z
lr = 0.001
def train_gan(dataset, data, writer):
discriminator = Discriminator(dataset)
discriminator.to(device).reset_parameters()
generator = Generator(dataset)
generator.to(device).reset_parameters()
optimizer_d = Adam(discriminator.parameters(), lr=lr)
optimizer_g = Adam(generator.parameters(), lr=lr)
id2motifs = build_motifs(data)
print("start training...")
# for epoch in range(500):
# print("epoch %d" % epoch)
# loss_d = train_d(discriminator, optimizer_d, data, id2motifs, generator, writer)
# writer.add_scalar('pre/Discriminator loss', loss_d, epoch)
for epoch in range(3000):
print("epoch %d" % epoch)
# for i in range(1000):
loss_d = train_d(discriminator, optimizer_d, data, id2motifs, generator, writer)
# writer.add_scalar('pre/Discriminator loss', loss_d, i)
loss_g = train_g(generator, optimizer_g, data, id2motifs, discriminator, writer)
writer.add_scalar('pre/Generator loss', loss_g, epoch)
writer.add_scalar('pre/Discriminator loss', loss_d, epoch)
if(epoch%1==0):
# auc = evaluate(generator, data, data.test_pos_edge_index, data.test_neg_edge_index)
auc = evaluate(generator, data, data.train_pos_edge_index, data.train_neg_edge_index)
writer.add_scalar('pre/auc score', auc, epoch)
print("training completes")
# def evaluate(model, )
def train_d(model, optimizer, data, id2motifs, generator, writer):
motifs = []
labels = []
epoch=0
losses=[]
for d_epoch in range(n_epochs_gen):
# generate new subsets for the discriminator for every dis_interval iterations
if d_epoch % dis_interval == 0:
motifs, labels = prepare_data_for_d(data, id2motifs, generator)
# training30
train_size = len(motifs)
motif = motifs
label = labels
label = torch.tensor(label).to(device)
z = model(data.x, data.total_edge_index)
motif = [list(i) for i in motif]
motif = torch.tensor(motif)
# pdb.set_trace()
score = torch.sum((torch.prod(z[motif[:,[0,1]]], axis=1)+torch.prod(z[motif[:,[1,2]]], axis=1)+torch.prod(z[motif[:,[0,2]]], axis=1)), axis=1)
# pd = torch.prod(z[motif], axis=1)
# score = torch.sum( pd, axis=1)
p = torch.sigmoid(score)
loss = -(torch.sum(label * p + (1 - label) * (1 - p)))
total_edge_index = torch.cat([data.train_pos_edge_index, data.train_neg_edge_index], dim=-1)
x_j = torch.index_select(z, 0, total_edge_index[0])
x_i = torch.index_select(z, 0, total_edge_index[1])
link_logits = torch.einsum("ef,ef->e", x_i, x_j)
link_labels = get_link_labels(data.train_pos_edge_index, data.train_neg_edge_index)
# loss += F.binary_cross_entropy_with_logits(link_logits, link_labels)
# print(loss.item())
losses.append(loss)
loss.backward()
optimizer.step()
losses = torch.tensor(losses)
return losses.mean().item()
def reward_d(model, data, motif):
z = model(data.x, data.train_edge_index)
score = torch.sum(torch.prod(z[motif], axis=1), axis=1)
p = torch.sigmoid(score)
reward = 1-p
return reward
def train_g(model, optimizer, data, id2motifs, discriminator,writer):
motifs = []
epoch=0
losses=[]
for g_epoch in range(n_epochs_gen):
# generate new subsets for the generator for every gen_interval iterations
if g_epoch % gen_interval == 0:
motifs, rewards = prepare_data_for_g(data, id2motifs, model, discriminator)
# training
train_size = len(motifs)
start_list = list(range(0, train_size, batch_size_gen))
np.random.shuffle(start_list)
motif = torch.tensor([list(i) for i in motifs])
reward = rewards
reward = torch.tensor(reward).to(device)
z = model(data.x, data.train_edge_index)
score = torch.sum((torch.prod(z[motif[:,[0,1]]], axis=1)+torch.prod(z[motif[:,[1,2]]], axis=1)+torch.prod(z[motif[:,[0,2]]], axis=1)), axis=1)
# p = 1 - torch.exp(-score)
# p = torch.clamp(p, 1e-5, 1)
p = torch.sigmoid(score)
loss = -torch.mean(p*reward)
total_edge_index = torch.cat([data.train_pos_edge_index, data.train_neg_edge_index], dim=-1)
x_j = torch.index_select(z, 0, total_edge_index[0])
x_i = torch.index_select(z, 0, total_edge_index[1])
link_logits = torch.einsum("ef,ef->e", x_i, x_j)
link_labels = get_link_labels(data.train_pos_edge_index, data.train_neg_edge_index)
# loss += F.binary_cross_entropy_with_logits(link_logits, link_labels)
losses.append(loss)
loss.backward()
optimizer.step()
losses = torch.tensor(losses)
return losses.mean().item()
def prepare_data_for_d(data, id2motifs, generator):
"""generate positive and negative samples for the discriminator"""
motifs = []
labels = []
g_s_args = []
poss = []
negs = []
for i in range(data.x.size(0)):
if np.random.rand() < 0.5:
pos = random.sample(id2motifs[i], min(len(id2motifs[i]), n_sample))
poss.append(pos)
g_s_args.append((i, len(pos), True))
z = generator(data.x, data.total_edge_index)
negs, _ = sampling(g_s_args, z, data)
for pos in poss:
if len(pos) != 0:
motifs.extend(pos)
labels.extend([1] * len(pos))
motifs+=negs
labels.extend([0] * len(negs))
motifs, labels = shuffle(motifs, labels)
return motifs, labels
def shuffle(*args):
idx = list(range(len(args[0])))
random.shuffle(idx)
results = []
for array in args:
results.append([array[i] for i in idx])
return tuple(results)
# data.train_edge_index, data.train_masked_nodes
def prepare_data_for_g(data, id2motifs, generator, discriminator):
"""sample subsets for the generator"""
paths = []
g_s_args = []
for i in data.train_nodes:
if np.random.rand() < 0.5:
g_s_args.append((i, n_sample, False))
z = generator(data.x, data.total_edge_index)
motifs, paths = sampling(g_s_args, z, data)
rewards = []
rewards.append(reward_d(discriminator, data, motifs).tolist())
rewards = np.concatenate(rewards)
motifs, reward = shuffle(motifs, rewards)
return motifs, reward
def build_motifs(data):
x = data.x
id2nid = build_nid(data)
motifs = set((node, ) for node in data.train_nodes)
id2motifs = [[] for i in range(x.size(0))]
num =0
for i in range(x.size(0)):
comb = list(itertools.combinations(id2nid[i], r=2))
if(len(comb)>0):
motifs = set(tuple(sorted(list(motif) + [i])) for motif in comb)
num +=len(motifs)
for k in motifs:
id2motifs[i].append(k)
# pdb.set_trace()
print('totally %d motifs' % num)
data.id2motifs = id2motifs
return id2motifs
def build_nid(data):
row, col = data.total_edge_index
col = col.tolist()
id2nid= []
for i in range(data.x.size(0)):
id2nid.append([])
key =0
temp=[]
for i, item in enumerate(col):
if(row[i]==key):
temp.append(item)
else:
id2nid[row[i]]=temp
temp=[]
key=row[i]
id2nid = [set(nodes) for nodes in id2nid]
data.id2nid= id2nid
return id2nid
def evaluate(model, data, pos_edge_index, neg_edge_index ):
model.eval()
z = model(data.x, data.train_edge_index)
total_edge_index = torch.cat([pos_edge_index, neg_edge_index], dim=-1)
x_j = torch.index_select(z, 0, total_edge_index[0])
x_i = torch.index_select(z, 0, total_edge_index[1])
score = torch.einsum("ef,ef->e", x_j, x_i)
link_probs = torch.sigmoid(score)
link_labels = get_link_labels(pos_edge_index, neg_edge_index)
link_probs = link_probs.detach().cpu().numpy()
link_labels = link_labels.detach().cpu().numpy()
auc = (roc_auc_score(link_labels, link_probs))
return auc
def get_link_labels(pos_edge_index, neg_edge_index):
link_labels = torch.zeros(pos_edge_index.size(1) +
neg_edge_index.size(1)).float().to(device)
link_labels[:pos_edge_index.size(1)] = 1.
return link_labels
def sampling(pl, z, data): # for multiprocessing, pass multiple args in one tuple
motifs = []
paths = []
for k in pl:
root, n_sample, only_neg = k
if(root not in data.train_nodes): continue
motif = [root]
v1, v2, v3 = g_v(motif, z, data)
# print(v1,v2,v3)
# pdb.set_trace()
for i in range(n_sample):
if(np.random.rand() < 0.5): continue
motif = [root]
if (i==1):
motif.append(v1)
motif.append(v2)
motif = tuple(sorted(motif))
elif(i==2):
motif.append(v1)
motif.append(v3)
motif = tuple(sorted(motif))
elif(i==-3):
motif.append(v2)
motif.append(v3)
motif = tuple(sorted(motif))
if(len(motif)<motif_size):
continue
motifs.append(motif)
return motifs, paths
def g_v(roots, z, data):
g_v_v = z[roots[0]]
all_node =list(range(z.size(0)))
all_node.pop(roots[0])
row = torch.tensor(all_node).to(device)
x_j = torch.index_select(z, 0, row)
x_i = g_v_v.repeat(z.size(0)-1,1)
one_hop = torch.einsum("ef,ef->e", x_i, x_j)
rel_prob = torch.softmax((1-torch.exp(-one_hop)), -1)
# rel_prob = torch.softmax(one_hop,-1)
# ss = Categorical(rel_prob)
# v1 = ss.sample()
# rel_prob[v1] =0
# ss = Categorical(rel_prob)
# v2 = ss.sample()
# rel_prob[v2] =0
# ss = Categorical(rel_prob)
# v3 = ss.sample()
#
v1 = torch.multinomial(rel_prob,1).item()
rel_prob[v1] = 0
v2 = torch.multinomial(rel_prob,1).item()
rel_prob[v2] = 0
v3 = torch.multinomial(rel_prob,1).item()
# prob_dist = torch.distributions.Categorical(rel_prob)
return v1, v2, v3 |
py | b40870c2be0bb1f2bd48205ac70845726022f2a3 | # copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import string
import paddle
# from paddle.nn import functional as F
import torch
class BaseRecLabelDecode(object):
""" Convert between text-label and text-index """
def __init__(self,
character_dict_path=None,
character_type='ch',
use_space_char=False):
support_character_type = [
'ch', 'en', 'EN_symbol', 'french', 'german', 'japan', 'korean',
'it', 'es', 'pt', 'ru', 'ar', 'ta', 'ug', 'fa', 'ur', 'rs_latin',
'oc', 'rs_cyrillic', 'bg', 'uk', 'be', 'te', 'kn', 'ch_tra', 'hi',
'mr', 'ne', 'EN'
]
assert character_type in support_character_type, "Only {} are supported now but get {}".format(
support_character_type, character_type)
self.beg_str = "sos"
self.end_str = "eos"
if character_type == "en":
self.character_str = "0123456789abcdefghijklmnopqrstuvwxyz"
dict_character = list(self.character_str)
elif character_type == "EN_symbol":
# same with ASTER setting (use 94 char).
self.character_str = string.printable[:-6]
dict_character = list(self.character_str)
elif character_type in support_character_type:
self.character_str = ""
assert character_dict_path is not None, "character_dict_path should not be None when character_type is {}".format(
character_type)
with open(character_dict_path, "rb") as fin:
lines = fin.readlines()
for line in lines:
line = line.decode('utf-8').strip("\n").strip("\r\n")
self.character_str += line
if use_space_char:
self.character_str += " "
dict_character = list(self.character_str)
else:
raise NotImplementedError
self.character_type = character_type
dict_character = self.add_special_char(dict_character)
self.dict = {}
for i, char in enumerate(dict_character):
self.dict[char] = i
self.character = dict_character
def add_special_char(self, dict_character):
return dict_character
def decode(self, text_index, text_prob=None, is_remove_duplicate=False):
""" convert text-index into text-label. """
result_list = []
ignored_tokens = self.get_ignored_tokens()
batch_size = len(text_index)
for batch_idx in range(batch_size):
char_list = []
conf_list = []
for idx in range(len(text_index[batch_idx])):
if text_index[batch_idx][idx] in ignored_tokens:
continue
if is_remove_duplicate:
# only for predict
if idx > 0 and text_index[batch_idx][idx - 1] == text_index[
batch_idx][idx]:
continue
char_list.append(self.character[int(text_index[batch_idx][
idx])])
if text_prob is not None:
conf_list.append(text_prob[batch_idx][idx])
else:
conf_list.append(1)
text = ''.join(char_list)
result_list.append((text, np.mean(conf_list)))
return result_list
def get_ignored_tokens(self):
return [0] # for ctc blank
class CTCLabelDecode(BaseRecLabelDecode):
""" Convert between text-label and text-index """
def __init__(self,
character_dict_path=None,
character_type='ch',
use_space_char=False,
**kwargs):
super(CTCLabelDecode, self).__init__(character_dict_path,
character_type, use_space_char)
def __call__(self, preds, label=None, *args, **kwargs):
if isinstance(preds, torch.Tensor):
preds = preds.numpy()
preds_idx = preds.argmax(axis=2)
preds_prob = preds.max(axis=2)
text = self.decode(preds_idx, preds_prob, is_remove_duplicate=True)
if label is None:
return text
label = self.decode(label)
return text, label
def add_special_char(self, dict_character):
dict_character = ['blank'] + dict_character
return dict_character
class AttnLabelDecode(BaseRecLabelDecode):
""" Convert between text-label and text-index """
def __init__(self,
character_dict_path=None,
character_type='ch',
use_space_char=False,
**kwargs):
super(AttnLabelDecode, self).__init__(character_dict_path,
character_type, use_space_char)
def add_special_char(self, dict_character):
self.beg_str = "sos"
self.end_str = "eos"
dict_character = dict_character
dict_character = [self.beg_str] + dict_character + [self.end_str]
return dict_character
def decode(self, text_index, text_prob=None, is_remove_duplicate=False):
""" convert text-index into text-label. """
result_list = []
ignored_tokens = self.get_ignored_tokens()
[beg_idx, end_idx] = self.get_ignored_tokens()
batch_size = len(text_index)
for batch_idx in range(batch_size):
char_list = []
conf_list = []
for idx in range(len(text_index[batch_idx])):
if text_index[batch_idx][idx] in ignored_tokens:
continue
if int(text_index[batch_idx][idx]) == int(end_idx):
break
if is_remove_duplicate:
# only for predict
if idx > 0 and text_index[batch_idx][idx - 1] == text_index[
batch_idx][idx]:
continue
char_list.append(self.character[int(text_index[batch_idx][
idx])])
if text_prob is not None:
conf_list.append(text_prob[batch_idx][idx])
else:
conf_list.append(1)
text = ''.join(char_list)
result_list.append((text, np.mean(conf_list)))
return result_list
def __call__(self, preds, label=None, *args, **kwargs):
"""
text = self.decode(text)
if label is None:
return text
else:
label = self.decode(label, is_remove_duplicate=False)
return text, label
"""
if isinstance(preds, torch.Tensor):
preds = preds.numpy()
preds_idx = preds.argmax(axis=2)
preds_prob = preds.max(axis=2)
text = self.decode(preds_idx, preds_prob, is_remove_duplicate=False)
if label is None:
return text
label = self.decode(label, is_remove_duplicate=False)
return text, label
def get_ignored_tokens(self):
beg_idx = self.get_beg_end_flag_idx("beg")
end_idx = self.get_beg_end_flag_idx("end")
return [beg_idx, end_idx]
def get_beg_end_flag_idx(self, beg_or_end):
if beg_or_end == "beg":
idx = np.array(self.dict[self.beg_str])
elif beg_or_end == "end":
idx = np.array(self.dict[self.end_str])
else:
assert False, "unsupport type %s in get_beg_end_flag_idx" \
% beg_or_end
return idx
class SRNLabelDecode(BaseRecLabelDecode):
""" Convert between text-label and text-index """
def __init__(self,
character_dict_path=None,
character_type='en',
use_space_char=False,
**kwargs):
self.max_text_length = kwargs['max_text_length']
super(SRNLabelDecode, self).__init__(character_dict_path,
character_type, use_space_char)
def __call__(self, preds, label=None, *args, **kwargs):
pred = preds['predict']
char_num = len(self.character_str) + 2
if isinstance(pred, torch.Tensor):
pred = pred.numpy()
pred = np.reshape(pred, [-1, char_num])
preds_idx = np.argmax(pred, axis=1)
preds_prob = np.max(pred, axis=1)
preds_idx = np.reshape(preds_idx, [-1, self.max_text_length])
preds_prob = np.reshape(preds_prob, [-1, self.max_text_length])
text = self.decode(preds_idx, preds_prob)
if label is None:
text = self.decode(preds_idx, preds_prob, is_remove_duplicate=False)
return text
label = self.decode(label)
return text, label
def decode(self, text_index, text_prob=None, is_remove_duplicate=False):
""" convert text-index into text-label. """
result_list = []
ignored_tokens = self.get_ignored_tokens()
batch_size = len(text_index)
for batch_idx in range(batch_size):
char_list = []
conf_list = []
for idx in range(len(text_index[batch_idx])):
if text_index[batch_idx][idx] in ignored_tokens:
continue
if is_remove_duplicate:
# only for predict
if idx > 0 and text_index[batch_idx][idx - 1] == text_index[
batch_idx][idx]:
continue
char_list.append(self.character[int(text_index[batch_idx][
idx])])
if text_prob is not None:
conf_list.append(text_prob[batch_idx][idx])
else:
conf_list.append(1)
text = ''.join(char_list)
result_list.append((text, np.mean(conf_list)))
return result_list
def add_special_char(self, dict_character):
dict_character = dict_character + [self.beg_str, self.end_str]
return dict_character
def get_ignored_tokens(self):
beg_idx = self.get_beg_end_flag_idx("beg")
end_idx = self.get_beg_end_flag_idx("end")
return [beg_idx, end_idx]
def get_beg_end_flag_idx(self, beg_or_end):
if beg_or_end == "beg":
idx = np.array(self.dict[self.beg_str])
elif beg_or_end == "end":
idx = np.array(self.dict[self.end_str])
else:
assert False, "unsupport type %s in get_beg_end_flag_idx" \
% beg_or_end
return idx
|
py | b40870f77c0f3423a4a90ac43167740bdf49341b | import time
from datetime import date, timedelta
import helpers.list_validation_methods as lvm
from config import collegehumor_article_archive_link as archive_link
from helpers.enums import *
from helpers.reddit import post_to_reddit
def article_published_today(link):
""" Compares the date the article was written with today's date."""
soup = lvm.soup_session(link)
todays_date = (date.today() - timedelta(0)).strftime("%B %#d, %Y") # The # is platform specific
date_to_check = soup.find('time', attrs={'class': 'date'})
return date_to_check == todays_date
def find_article_to_parse(create_post=True):
"""Finds a list article in CollegeHumor's latest article archive and posts the list article to Reddit."""
website = ArticleType.CollegeHumor
website_name = convert_enum_to_string(website)
print(f"Searching {website_name}'s archive.")
soup = lvm.soup_session(archive_link)
for article in soup.find_all('h3', attrs={'class': 'title'}):
article_link = 'http://www.collegehumor.com' + article.find('a')['href']
if not lvm.article_title_meets_posting_requirements(website, article.text):
continue
if article_published_today(article_link):
article_list_text = get_article_list_text(article_link, lvm.get_article_list_count(article.text))
if article_list_text and not lvm.post_previously_made(article_link):
print(f"{website_name} list article found: " + article.text)
if create_post:
post_to_reddit(article.text, article_list_text, article_link, website)
return True
print(f"No {website_name} list articles were found to parse at this time.")
return False
def get_article_list_text(link_to_check, total_list_elements):
"""Concatenates the list elements of the article into a single string. Ensures proper list formatting before making a post."""
list_counter = 1
full_list = ''
soup = lvm.soup_session(link_to_check)
for article in soup.find_all('h2'):
if not article.text or article.text[0].isdigit():
continue
full_list += str(list_counter) + '. ' + article.text.strip() + '\n'
list_counter += 1
if lvm.article_text_meets_posting_requirements(ArticleType.CollegeHumor, full_list, list_counter, total_list_elements):
return full_list
if __name__ == "__main__":
start_time = round(time.time(), 2)
find_article_to_parse(create_post=False)
print("CollegeHumor script ran for " + str(round((time.time() - start_time), 2)) + " seconds.")
|
py | b408718e528f563adfea8b2802f6e768e70f95ae | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from typing import Any, List, TYPE_CHECKING
import logging
import uamqp
from ._base_handler import (
_parse_conn_str,
ServiceBusSharedKeyCredential,
ServiceBusSASTokenCredential,
BaseHandler,
)
from ._servicebus_sender import ServiceBusSender
from ._servicebus_receiver import ServiceBusReceiver
from ._common._configuration import Configuration
from ._common.utils import (
create_authentication,
generate_dead_letter_entity_name,
strip_protocol_from_uri,
)
from ._common.constants import ServiceBusSubQueue
if TYPE_CHECKING:
from azure.core.credentials import TokenCredential
_LOGGER = logging.getLogger(__name__)
class ServiceBusClient(object):
"""The ServiceBusClient class defines a high level interface for
getting ServiceBusSender and ServiceBusReceiver.
:ivar fully_qualified_namespace: The fully qualified host name for the Service Bus namespace.
The namespace format is: `<yournamespace>.servicebus.windows.net`.
:vartype fully_qualified_namespace: str
:param str fully_qualified_namespace: The fully qualified host name for the Service Bus namespace.
The namespace format is: `<yournamespace>.servicebus.windows.net`.
:param ~azure.core.credentials.TokenCredential credential: The credential object used for authentication which
implements a particular interface for getting tokens. It accepts
credential objects generated by the azure-identity library and objects that implement the
`get_token(self, *scopes)` method.
:keyword bool logging_enable: Whether to output network trace logs to the logger. Default is `False`.
:keyword transport_type: The type of transport protocol that will be used for communicating with
the Service Bus service. Default is `TransportType.Amqp` in which case port 5671 is used.
If the port 5671 is unavailable/blocked in the network environment, `TransportType.AmqpOverWebsocket` could
be used instead which uses port 443 for communication.
:paramtype transport_type: ~azure.servicebus.TransportType
:keyword dict http_proxy: HTTP proxy settings. This must be a dictionary with the following
keys: `'proxy_hostname'` (str value) and `'proxy_port'` (int value).
Additionally the following keys may also be present: `'username', 'password'`.
:keyword str user_agent: If specified, this will be added in front of the built-in user agent string.
:keyword int retry_total: The total number of attempts to redo a failed operation when an error occurs.
Default value is 3.
:keyword float retry_backoff_factor: Delta back-off internal in the unit of second between retries.
Default value is 0.8.
:keyword float retry_backoff_max: Maximum back-off interval in the unit of second. Default value is 120.
.. admonition:: Example:
.. literalinclude:: ../samples/sync_samples/sample_code_servicebus.py
:start-after: [START create_sb_client_sync]
:end-before: [END create_sb_client_sync]
:language: python
:dedent: 4
:caption: Create a new instance of the ServiceBusClient.
"""
def __init__(self, fully_qualified_namespace, credential, **kwargs):
# type: (str, TokenCredential, Any) -> None
# If the user provided http:// or sb://, let's be polite and strip that.
self.fully_qualified_namespace = strip_protocol_from_uri(
fully_qualified_namespace.strip()
)
self._credential = credential
self._config = Configuration(**kwargs)
self._connection = None
# Optional entity name, can be the name of Queue or Topic. Intentionally not advertised, typically be needed.
self._entity_name = kwargs.get("entity_name")
self._auth_uri = "sb://{}".format(self.fully_qualified_namespace)
if self._entity_name:
self._auth_uri = "{}/{}".format(self._auth_uri, self._entity_name)
# Internal flag for switching whether to apply connection sharing, pending fix in uamqp library
self._connection_sharing = False
self._handlers = [] # type: List[BaseHandler]
def __enter__(self):
if self._connection_sharing:
self._create_uamqp_connection()
return self
def __exit__(self, *args):
self.close()
def _create_uamqp_connection(self):
auth = create_authentication(self)
self._connection = uamqp.Connection(
hostname=self.fully_qualified_namespace,
sasl=auth,
debug=self._config.logging_enable,
)
def close(self):
# type: () -> None
"""
Close down the ServiceBus client.
All spawned senders, receivers and underlying connection will be shutdown.
:return: None
"""
for handler in self._handlers:
try:
handler.close()
except Exception as exception: # pylint: disable=broad-except
_LOGGER.error(
"Client has met an exception when closing the handler: %r. Exception: %r.",
handler._container_id, # pylint: disable=protected-access
exception,
)
del self._handlers[:]
if self._connection_sharing and self._connection:
self._connection.destroy()
@classmethod
def from_connection_string(cls, conn_str, **kwargs):
# type: (str, Any) -> ServiceBusClient
"""
Create a ServiceBusClient from a connection string.
:param str conn_str: The connection string of a Service Bus.
:keyword bool logging_enable: Whether to output network trace logs to the logger. Default is `False`.
:keyword transport_type: The type of transport protocol that will be used for communicating with
the Service Bus service. Default is `TransportType.Amqp` in which case port 5671 is used.
If the port 5671 is unavailable/blocked in the network environment, `TransportType.AmqpOverWebsocket` could
be used instead which uses port 443 for communication.
:paramtype transport_type: ~azure.servicebus.TransportType
:keyword dict http_proxy: HTTP proxy settings. This must be a dictionary with the following
keys: `'proxy_hostname'` (str value) and `'proxy_port'` (int value).
Additionally the following keys may also be present: `'username', 'password'`.
:keyword str user_agent: If specified, this will be added in front of the built-in user agent string.
:keyword int retry_total: The total number of attempts to redo a failed operation when an error occurs.
Default value is 3.
:keyword float retry_backoff_factor: Delta back-off internal in the unit of second between retries.
Default value is 0.8.
:keyword float retry_backoff_max: Maximum back-off interval in the unit of second. Default value is 120.
:rtype: ~azure.servicebus.ServiceBusClient
.. admonition:: Example:
.. literalinclude:: ../samples/sync_samples/sample_code_servicebus.py
:start-after: [START create_sb_client_from_conn_str_sync]
:end-before: [END create_sb_client_from_conn_str_sync]
:language: python
:dedent: 4
:caption: Create a new instance of the ServiceBusClient from connection string.
"""
host, policy, key, entity_in_conn_str, token, token_expiry = _parse_conn_str(
conn_str
)
if token and token_expiry:
credential = ServiceBusSASTokenCredential(token, token_expiry)
elif policy and key:
credential = ServiceBusSharedKeyCredential(policy, key) # type: ignore
return cls(
fully_qualified_namespace=host,
entity_name=entity_in_conn_str or kwargs.pop("entity_name", None),
credential=credential, # type: ignore
**kwargs
)
def get_queue_sender(self, queue_name, **kwargs):
# type: (str, Any) -> ServiceBusSender
"""Get ServiceBusSender for the specific queue.
:param str queue_name: The path of specific Service Bus Queue the client connects to.
:rtype: ~azure.servicebus.ServiceBusSender
.. admonition:: Example:
.. literalinclude:: ../samples/sync_samples/sample_code_servicebus.py
:start-after: [START create_servicebus_sender_from_sb_client_sync]
:end-before: [END create_servicebus_sender_from_sb_client_sync]
:language: python
:dedent: 4
:caption: Create a new instance of the ServiceBusSender from ServiceBusClient.
"""
# pylint: disable=protected-access
if self._entity_name and queue_name != self._entity_name:
raise ValueError(
"The queue name provided does not match the EntityPath in "
"the connection string used to construct the ServiceBusClient."
)
handler = ServiceBusSender(
fully_qualified_namespace=self.fully_qualified_namespace,
queue_name=queue_name,
credential=self._credential,
logging_enable=self._config.logging_enable,
transport_type=self._config.transport_type,
http_proxy=self._config.http_proxy,
connection=self._connection,
user_agent=self._config.user_agent,
retry_total=self._config.retry_total,
retry_backoff_factor=self._config.retry_backoff_factor,
retry_backoff_max=self._config.retry_backoff_max,
**kwargs
)
self._handlers.append(handler)
return handler
def get_queue_receiver(self, queue_name, **kwargs):
# type: (str, Any) -> ServiceBusReceiver
"""Get ServiceBusReceiver for the specific queue.
:param str queue_name: The path of specific Service Bus Queue the client connects to.
:keyword session_id: A specific session from which to receive. This must be specified for a
sessionful queue, otherwise it must be None. In order to receive messages from the next available
session, set this to ~azure.servicebus.NEXT_AVAILABLE_SESSION.
:paramtype session_id: Union[str, ~azure.servicebus.NEXT_AVAILABLE_SESSION]
:keyword Optional[Union[ServiceBusSubQueue, str]] sub_queue: If specified, the subqueue this receiver will
connect to.
This includes the DEAD_LETTER and TRANSFER_DEAD_LETTER queues, holds messages that can't be delivered to any
receiver or messages that can't be processed.
The default is None, meaning connect to the primary queue. Can be assigned values from `ServiceBusSubQueue`
enum or equivalent string values "deadletter" and "transferdeadletter".
:keyword receive_mode: The receive_mode with which messages will be retrieved from the entity. The two options
are PEEK_LOCK and RECEIVE_AND_DELETE. Messages received with PEEK_LOCK must be settled within a given
lock period before they will be removed from the queue. Messages received with RECEIVE_AND_DELETE
will be immediately removed from the queue, and cannot be subsequently rejected or re-received if
the client fails to process the message. The default receive_mode is PEEK_LOCK.
:paramtype receive_mode: Union[~azure.servicebus.ServiceBusReceiveMode, str]
:keyword Optional[float] max_wait_time: The timeout in seconds between received messages after which the
receiver will automatically stop receiving. The default value is None, meaning no timeout.
:keyword Optional[~azure.servicebus.AutoLockRenewer] auto_lock_renewer: An ~azure.servicebus.AutoLockRenewer
can be provided such that messages are automatically registered on receipt. If the receiver is a session
receiver, it will apply to the session instead.
:keyword int prefetch_count: The maximum number of messages to cache with each request to the service.
This setting is only for advanced performance tuning. Increasing this value will improve message throughput
performance but increase the chance that messages will expire while they are cached if they're not
processed fast enough.
The default value is 0, meaning messages will be received from the service and processed one at a time.
In the case of prefetch_count being 0, `ServiceBusReceiver.receive` would try to cache `max_message_count`
(if provided) within its request to the service.
:rtype: ~azure.servicebus.ServiceBusReceiver
.. admonition:: Example:
.. literalinclude:: ../samples/sync_samples/sample_code_servicebus.py
:start-after: [START create_servicebus_receiver_from_sb_client_sync]
:end-before: [END create_servicebus_receiver_from_sb_client_sync]
:language: python
:dedent: 4
:caption: Create a new instance of the ServiceBusReceiver from ServiceBusClient.
"""
if self._entity_name and queue_name != self._entity_name:
raise ValueError(
"The queue name provided does not match the EntityPath in "
"the connection string used to construct the ServiceBusClient."
)
sub_queue = kwargs.get("sub_queue", None)
if sub_queue and kwargs.get("session_id"):
raise ValueError(
"session_id and sub_queue can not be specified simultaneously. "
"To connect to the sub queue of a sessionful queue, "
"please set sub_queue only as sub_queue does not support session."
)
try:
queue_name = generate_dead_letter_entity_name(
queue_name=queue_name,
transfer_deadletter=(
ServiceBusSubQueue(sub_queue)
== ServiceBusSubQueue.TRANSFER_DEAD_LETTER
),
)
except ValueError:
if (
sub_queue
): # If we got here and sub_queue is defined, it's an incorrect value or something unrelated.
raise
# pylint: disable=protected-access
handler = ServiceBusReceiver(
fully_qualified_namespace=self.fully_qualified_namespace,
entity_name=queue_name,
credential=self._credential,
logging_enable=self._config.logging_enable,
transport_type=self._config.transport_type,
http_proxy=self._config.http_proxy,
connection=self._connection,
user_agent=self._config.user_agent,
retry_total=self._config.retry_total,
retry_backoff_factor=self._config.retry_backoff_factor,
retry_backoff_max=self._config.retry_backoff_max,
**kwargs
)
self._handlers.append(handler)
return handler
def get_topic_sender(self, topic_name, **kwargs):
# type: (str, Any) -> ServiceBusSender
"""Get ServiceBusSender for the specific topic.
:param str topic_name: The path of specific Service Bus Topic the client connects to.
:rtype: ~azure.servicebus.ServiceBusSender
.. admonition:: Example:
.. literalinclude:: ../samples/sync_samples/sample_code_servicebus.py
:start-after: [START create_topic_sender_from_sb_client_sync]
:end-before: [END create_topic_sender_from_sb_client_sync]
:language: python
:dedent: 4
:caption: Create a new instance of the ServiceBusSender from ServiceBusClient.
"""
if self._entity_name and topic_name != self._entity_name:
raise ValueError(
"The topic name provided does not match the EntityPath in "
"the connection string used to construct the ServiceBusClient."
)
handler = ServiceBusSender(
fully_qualified_namespace=self.fully_qualified_namespace,
topic_name=topic_name,
credential=self._credential,
logging_enable=self._config.logging_enable,
transport_type=self._config.transport_type,
http_proxy=self._config.http_proxy,
connection=self._connection,
user_agent=self._config.user_agent,
retry_total=self._config.retry_total,
retry_backoff_factor=self._config.retry_backoff_factor,
retry_backoff_max=self._config.retry_backoff_max,
**kwargs
)
self._handlers.append(handler)
return handler
def get_subscription_receiver(self, topic_name, subscription_name, **kwargs):
# type: (str, str, Any) -> ServiceBusReceiver
"""Get ServiceBusReceiver for the specific subscription under the topic.
:param str topic_name: The name of specific Service Bus Topic the client connects to.
:param str subscription_name: The name of specific Service Bus Subscription
under the given Service Bus Topic.
:keyword session_id: A specific session from which to receive. This must be specified for a
sessionful subscription, otherwise it must be None. In order to receive messages from the next available
session, set this to ~azure.servicebus.NEXT_AVAILABLE_SESSION.
:paramtype session_id: Union[str, ~azure.servicebus.NEXT_AVAILABLE_SESSION]
:keyword Optional[Union[ServiceBusSubQueue, str]] sub_queue: If specified, the subqueue this receiver will
connect to.
This includes the DEAD_LETTER and TRANSFER_DEAD_LETTER queues, holds messages that can't be delivered to any
receiver or messages that can't be processed.
The default is None, meaning connect to the primary queue. Can be assigned values from `ServiceBusSubQueue`
enum or equivalent string values "deadletter" and "transferdeadletter".
:keyword receive_mode: The receive_mode with which messages will be retrieved from the entity. The two options
are PEEK_LOCK and RECEIVE_AND_DELETE. Messages received with PEEK_LOCK must be settled within a given
lock period before they will be removed from the subscription. Messages received with RECEIVE_AND_DELETE
will be immediately removed from the subscription, and cannot be subsequently rejected or re-received if
the client fails to process the message. The default receive_mode is PEEK_LOCK.
:paramtype receive_mode: Union[~azure.servicebus.ServiceBusReceiveMode, str]
:keyword Optional[float] max_wait_time: The timeout in seconds between received messages after which the
receiver will automatically stop receiving. The default value is None, meaning no timeout.
:keyword Optional[~azure.servicebus.AutoLockRenewer] auto_lock_renewer: An ~azure.servicebus.AutoLockRenewer
can be provided such that messages are automatically registered on receipt. If the receiver is a session
receiver, it will apply to the session instead.
:keyword int prefetch_count: The maximum number of messages to cache with each request to the service.
This setting is only for advanced performance tuning. Increasing this value will improve message throughput
performance but increase the chance that messages will expire while they are cached if they're not
processed fast enough.
The default value is 0, meaning messages will be received from the service and processed one at a time.
In the case of prefetch_count being 0, `ServiceBusReceiver.receive` would try to cache `max_message_count`
(if provided) within its request to the service.
:rtype: ~azure.servicebus.ServiceBusReceiver
.. admonition:: Example:
.. literalinclude:: ../samples/sync_samples/sample_code_servicebus.py
:start-after: [START create_subscription_receiver_from_sb_client_sync]
:end-before: [END create_subscription_receiver_from_sb_client_sync]
:language: python
:dedent: 4
:caption: Create a new instance of the ServiceBusReceiver from ServiceBusClient.
"""
# pylint: disable=protected-access
if self._entity_name and topic_name != self._entity_name:
raise ValueError(
"The topic name provided does not match the EntityPath in "
"the connection string used to construct the ServiceBusClient."
)
sub_queue = kwargs.get("sub_queue", None)
if sub_queue and kwargs.get("session_id"):
raise ValueError(
"session_id and sub_queue can not be specified simultaneously. "
"To connect to the sub queue of a sessionful subscription, "
"please set sub_queue only as sub_queue is not sessionful."
)
try:
entity_name = generate_dead_letter_entity_name(
topic_name=topic_name,
subscription_name=subscription_name,
transfer_deadletter=(
ServiceBusSubQueue(sub_queue)
== ServiceBusSubQueue.TRANSFER_DEAD_LETTER
),
)
handler = ServiceBusReceiver(
fully_qualified_namespace=self.fully_qualified_namespace,
entity_name=entity_name,
credential=self._credential,
logging_enable=self._config.logging_enable,
transport_type=self._config.transport_type,
http_proxy=self._config.http_proxy,
connection=self._connection,
user_agent=self._config.user_agent,
retry_total=self._config.retry_total,
retry_backoff_factor=self._config.retry_backoff_factor,
retry_backoff_max=self._config.retry_backoff_max,
**kwargs
)
except ValueError:
if (
sub_queue
): # If we got here and sub_queue is defined, it's an incorrect value or something unrelated.
raise
handler = ServiceBusReceiver(
fully_qualified_namespace=self.fully_qualified_namespace,
topic_name=topic_name,
subscription_name=subscription_name,
credential=self._credential,
logging_enable=self._config.logging_enable,
transport_type=self._config.transport_type,
http_proxy=self._config.http_proxy,
connection=self._connection,
user_agent=self._config.user_agent,
retry_total=self._config.retry_total,
retry_backoff_factor=self._config.retry_backoff_factor,
retry_backoff_max=self._config.retry_backoff_max,
**kwargs
)
self._handlers.append(handler)
return handler
|
py | b4087280bfab10898b3c8d1a2809e8e15f3a7878 | from typing import Any, ClassVar, Dict, List, Optional, TYPE_CHECKING
# import json
from ambassador.ir.irfilter import IRFilter
from ..config import Config
from .irresource import IRResource
from .irhttpmapping import IRHTTPMapping
from .irtls import IRAmbassadorTLS
from .irtlscontext import IRTLSContext
from .ircors import IRCORS
from .irbuffer import IRBuffer
if TYPE_CHECKING:
from .ir import IR
class IRAmbassador (IRResource):
AModTransparentKeys: ClassVar = [
'admin_port',
'auth_enabled',
'default_label_domain',
'default_labels',
'diag_port',
'diagnostics',
'enable_ipv6',
'enable_ipv4',
'liveness_probe',
'readiness_probe',
'service_port',
'statsd',
'use_proxy_proto',
'use_remote_address',
'x_forwarded_proto_redirect'
]
service_port: int
diag_port: int
# Set up the default probes and such.
default_liveness_probe: ClassVar[Dict[str, str]] = {
"prefix": "/ambassador/v0/check_alive",
"rewrite": "/ambassador/v0/check_alive",
}
default_readiness_probe: ClassVar[Dict[str, str]] = {
"prefix": "/ambassador/v0/check_ready",
"rewrite": "/ambassador/v0/check_ready",
}
default_diagnostics: ClassVar[Dict[str, str]] = {
"prefix": "/ambassador/v0/",
"rewrite": "/ambassador/v0/",
}
def __init__(self, ir: 'IR', aconf: Config,
rkey: str="ir.ambassador",
kind: str="IRAmbassador",
name: str="ir.ambassador",
use_remote_address: bool=True,
**kwargs) -> None:
# print("IRAmbassador __init__ (%s %s %s)" % (kind, name, kwargs))
super().__init__(
ir=ir, aconf=aconf, rkey=rkey, kind=kind, name=name,
service_port=80,
admin_port=8001,
diag_port=8877,
auth_enabled=None,
enable_ipv6=False,
enable_ipv4=True,
liveness_probe={"enabled": True},
readiness_probe={"enabled": True},
diagnostics={"enabled": True},
use_proxy_proto=False,
use_remote_address=use_remote_address,
x_forwarded_proto_redirect=False,
**kwargs
)
def setup(self, ir: 'IR', aconf: Config) -> bool:
# We're interested in the 'ambassador' module from the Config, if any...
amod = aconf.get_module("ambassador")
# Is there a TLS module in the Ambassador module?
if amod:
self.sourced_by(amod)
self.referenced_by(amod)
amod_tls = amod.get('tls', None)
if amod_tls:
# XXX What a hack. IRAmbassadorTLS.from_resource() should be able to make
# this painless.
new_args = dict(amod_tls)
new_rkey = new_args.pop('rkey', amod.rkey)
new_kind = new_args.pop('kind', 'Module')
new_name = new_args.pop('name', 'tls-from-ambassador-module')
new_location = new_args.pop('location', amod.location)
# Overwrite any existing TLS module.
ir.tls_module = IRAmbassadorTLS(ir, aconf,
rkey=new_rkey,
kind=new_kind,
name=new_name,
location=new_location,
**new_args)
# ir.logger.debug("IRAmbassador saving TLS module: %s" % ir.tls_module.as_json())
if ir.tls_module:
self.logger.debug("final TLS module: %s" % ir.tls_module.as_json())
# Stash a sane rkey and location for contexts we create.
ctx_rkey = ir.tls_module.get('rkey', self.rkey)
ctx_location = ir.tls_module.get('location', self.location)
# The TLS module 'server' and 'client' blocks are actually a _single_ TLSContext
# to Ambassador.
server = ir.tls_module.pop('server', None)
client = ir.tls_module.pop('client', None)
if server and server.get('enabled', True):
# We have a server half. Excellent.
ctx = IRTLSContext.from_legacy(ir, 'server', ctx_rkey, ctx_location,
cert=server, termination=True, validation_ca=client)
if ctx.is_active():
ir.save_tls_context(ctx)
# Other blocks in the TLS module weren't ever really documented, so I seriously doubt
# that they're a factor... but, weirdly, we have a test for them...
for legacy_name, legacy_ctx in ir.tls_module.as_dict().items():
if (legacy_name.startswith('_') or
(legacy_name == 'name') or
(legacy_name == 'location') or
(legacy_name == 'kind') or
(legacy_name == 'enabled')):
continue
ctx = IRTLSContext.from_legacy(ir, legacy_name, ctx_rkey, ctx_location,
cert=legacy_ctx, termination=False, validation_ca=None)
if ctx.is_active():
ir.save_tls_context(ctx)
# Finally, check TLSContext resources to see if we should enable TLS termination.
for ctx in ir.get_tls_contexts():
if ctx.get('hosts', None):
# This is a termination context
self.logger.debug("TLSContext %s is a termination context, enabling TLS termination" % ctx.name)
self.service_port = 443
if ctx.get('ca_cert', None):
# Client-side TLS is enabled.
self.logger.debug("TLSContext %s enables client certs!" % ctx.name)
# After that, check for port definitions, probes, etc., and copy them in
# as we find them.
for key in IRAmbassador.AModTransparentKeys:
if amod and (key in amod):
# Yes. It overrides the default.
self[key] = amod[key]
# If we don't have a default label domain, force it to 'ambassador'.
if not self.get('default_label_domain'):
self.default_label_domain = 'ambassador'
# Likewise, if we have no default labels, force an empty dict (it makes life easier
# on other modules).
if not self.get('default_labels'):
self.default_labels: Dict[str, Any] = {}
# Next up: diag port & services.
diag_port = aconf.module_lookup('ambassador', 'diag_port', 8877)
diag_service = "127.0.0.1:%d" % diag_port
for name, cur, dflt in [
("liveness", self.liveness_probe, IRAmbassador.default_liveness_probe),
("readiness", self.readiness_probe, IRAmbassador.default_readiness_probe),
("diagnostics", self.diagnostics, IRAmbassador.default_diagnostics)
]:
if cur and cur.get("enabled", False):
if not cur.get('prefix', None):
cur['prefix'] = dflt['prefix']
if not cur.get('rewrite', None):
cur['rewrite'] = dflt['rewrite']
if not cur.get('service', None):
cur['service'] = diag_service
if amod and ('enable_grpc_http11_bridge' in amod):
self.grpc_http11_bridge = IRFilter(ir=ir, aconf=aconf,
kind='ir.grpc_http1_bridge',
name='grpc_http1_bridge',
config=dict())
self.grpc_http11_bridge.sourced_by(amod)
ir.save_filter(self.grpc_http11_bridge)
# Buffer.
if amod and ('buffer' in amod):
self.buffer = IRBuffer(ir=ir, aconf=aconf, location=self.location, **amod.buffer)
if self.buffer:
ir.save_filter(self.buffer)
else:
return False
# Finally, default CORS stuff.
if amod and ('cors' in amod):
self.cors = IRCORS(ir=ir, aconf=aconf, location=self.location, **amod.cors)
if self.cors:
self.cors.referenced_by(self)
else:
return False
return True
def add_mappings(self, ir: 'IR', aconf: Config):
for name, cur in [
( "liveness", self.liveness_probe ),
( "readiness", self.readiness_probe ),
( "diagnostics", self.diagnostics )
]:
if cur and cur.get("enabled", False):
name = "internal_%s_probe_mapping" % name
mapping = IRHTTPMapping(ir, aconf, rkey=self.rkey, name=name, location=self.location,
timeout_ms=10000, **cur)
mapping.referenced_by(self)
ir.add_mapping(aconf, mapping)
def get_default_label_domain(self) -> str:
return self.default_label_domain
def get_default_labels(self, domain: Optional[str]=None) -> Optional[List]:
if not domain:
domain = self.get_default_label_domain()
domain_info = self.default_labels.get(domain, {})
self.logger.debug("default_labels info for %s: %s" % (domain, domain_info))
return domain_info.get('defaults')
def get_default_label_prefix(self, domain: Optional[str]=None) -> Optional[List]:
if not domain:
domain = self.get_default_label_domain()
domain_info = self.default_labels.get(domain, {})
return domain_info.get('label_prefix')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.