repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
emd/boutanalysis | profiles.py | 1 | 6970 | '''Create 1D and 2D profiles to be used in BOUT++ simulations'''
import numpy as np
from scipy import interpolate
from boututils import file_import, DataFile
from boutanalysis import grid
def csv_import(path, column, skip_header=1):
'''Import a 1D profile from a CSV file.
Parameters:
path -- string, path to the file containing the 1D profile
column -- int, if the CSV file contains several columns,
specify the index of the column containing the desired
profile, with 0 corresponding to the first column
skip_header -- int, the number of header rows to skip
before the profile information begins
Returns:
An array containing the 1D profile contained in the CSV file.
'''
return np.genfromtxt(open(path), usecols=column, skip_header=skip_header)
def interpolate2grid(prof, psi, grid_path, dim=2, pf='decreasing'):
'''Interpolate given profile onto the specified BOUT++ grid.
Parameters:
prof -- 1D array, containing the profile (Q) to interpolate
onto the BOUT++ grid. The profile should be a flux function
such that Q = Q(psi)
psi -- 1D array, containing the normalized poloidal flux
(i.e. 0 on the magnetic axis and 1 on the separatrix)
grid_path -- str, path to the grid file to interpolate onto
dim -- int, valid options are: 1, 2
dim specifies dimension of resulting interpolated profile;
for example, many of the BOUT++ equilibrium profiles
(e.g. pressure, jpar, etc) are specified as 2D grids.
Dimensions of 3 or higher (which are *not* needed for
axisymmetric equilibria) are not supported.
pf -- str, valid options are: 'decreasing', 'flat', 'none'.
As the flux decreases moving radially outwards in
the private flux (PF) region, a simple flux function formalism is
no longer valid in this region. Instead, we must determine
an appropriate model for the PF region.
-- 'decreasing':
Create a dummy flux variable, psi', defined as
psi' = 1 + |psi - 1|
and then use the mapping Q_{PF}(psi') = Q_{non-PF}
such that the profile in the PF region behaves
similarly to the profile in the SOL
-- 'flat':
Gives a flat profile in the PF region, with the
constant value determined by the value of the
profile on the separatrix
-- 'none':
Simply uses the flux function formalism, i.e. Q = Q(psi).
For equilibrium values, such as the pressure, this
will give an unphysical grid and is *NOT* recommended
Returns:
An array of the speficied dimension interpolated onto the
BOUT++ grid. This array can subsequently be written to
the grid file for use in simulations.
'''
g = file_import(grid_path)
if dim == 1:
psi_grid = grid.grid2psi(g, vector=True)
prof_interp = interpolate.spline(psi, prof, psi_grid)
# TODO: Generalize this to double null
elif dim == 2:
prof_interp = np.zeros(g['Rxy'].shape)
# PF region:
# Determine the poloidal indices of the PF region
pf_ind1 = np.arange(0, (g['jyseps1_1'] + 1))
pf_ind2 = np.arange((g['jyseps2_2'] + 1), prof_interp.shape[1])
pol_ind = np.concatenate((pf_ind1, pf_ind2))
# Restricting ourselves to the poloidal PF domain identified above,
# the PF region is fully specified by radial indices where psi < 1
psi_grid = grid.grid2psi(g, vector=True, yind=pol_ind[0])
rad_ind = np.where(psi_grid < 1.0)
sep_ind = np.max(rad_ind) + 1
if pf == 'decreasing':
psi_dummy = 1.0 + np.abs(psi_grid[rad_ind] - 1)
prof_interp[0:sep_ind, pol_ind] = interpolate.spline(
psi, prof, psi_dummy)[:, np.newaxis]
elif pf == 'flat':
prof_interp[0:sep_ind, pol_ind] = interpolate.spline(
psi, prof, 1.0)
elif pf == 'none':
prof_interp[0:sep_ind, pol_ind] = interpolate.spline(
psi, prof, psi_grid[rad_ind])[:, np.newaxis]
# Non-PF region 1:
# This region lies in the poloidal PF domain identified above,
# but it does *not* satisfy psi < 1 (that is, this region is
# in the SOL)
rad_ind = np.where(psi_grid >= 1.0)
prof_interp[sep_ind:, pol_ind] = interpolate.spline(
psi, prof, psi_grid[rad_ind])[:, np.newaxis]
# Non-PF region 2:
# The entire radial domain in this region (core and SOL)
# are *not* in the PF region
psi_grid = grid.grid2psi(g, vector=True)
pol_ind = np.arange(g['jyseps1_1'] + 1, g['jyseps2_2'])
prof_interp[:, pol_ind] = interpolate.spline(
psi, prof, psi_grid)[:, np.newaxis]
else:
raise ValueError('Interpolation not supported above 2D')
return prof_interp
def write2grid(prof, prof_name, grid_path, overwrite=False):
'''Write profile to a specified grid file.
Parameters:
prof -- 1D or 2D array, an equilibrium quantity to be
written to the grid file. An easy way to generate
an appropriate profile is to use the interpolate2grid(...)
routine above
prof_name -- str, name assigned to profile in grid file
grid_path -- str, path to the grid file to interpolate onto
overwrite -- bool, if False, this will prevent overwiting
a preexisting grid variable with the same name as prof_name
Returns:
True if the write is successful, False otherwise.
'''
grid_file = DataFile(grid_path, write=True)
if not overwrite and (prof_name in grid_file.list()):
raise ValueError(prof_name + ' is already in use in '
+ grid_path
+ '. Specify overwrite=True to overwrite existing profile')
grid_file.write(prof_name, prof)
return 0
def main():
# Grid file to modify
grid_path = '/global/homes/e/emd/cmod/1110201023/kinetic/grids/x516y128_psiin085_psiout105_pf095_6field.nc'
# Profile information
dir = '/global/homes/e/emd/cmod/1110201023/kinetic/profiles/'
info = '.1110201023.00900.psin105'
vars = ['Ne', 'Ni', 'Te', 'Ti']
# BOUT++ 6-field simulations require units:
# [density] = 1e20 m^{-3}
# [temperature] = eV
# whereas the p-file has units
# [density] = 1e20 m^{-3}
# [temperature] = keV
# These scale factors are used to make the appropriate conversions
scale = [1e0, 1e0, 1e3, 1e3]
for i, var in enumerate(vars):
profile_path = dir + var + info
psi = csv_import(profile_path, 0)
profile = scale[i] * csv_import(profile_path, 1)
profile_grid = interpolate2grid(profile, psi, grid_path)
status = write2grid(profile_grid, var + 'exp', grid_path)
return 0
if __name__ == '__main__':
main()
| lgpl-3.0 | -1,307,815,740,842,791,700 | 34.74359 | 111 | 0.621377 | false |
OpenBazaar/openbazaar-go | qa/eth_refund_direct.py | 1 | 9272 | import requests
import json
import time
from collections import OrderedDict
from test_framework.test_framework import OpenBazaarTestFramework, TestFailure
class EthRefundDirectTest(OpenBazaarTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 3
def run_test(self):
alice = self.nodes[1]
bob = self.nodes[2]
# generate some coins and send them to bob
time.sleep(4)
api_url = bob["gateway_url"] + "wallet/address/" + self.cointype
r = requests.get(api_url)
if r.status_code == 200:
resp = json.loads(r.text)
address = resp["address"]
elif r.status_code == 404:
raise TestFailure("EthRefundDirectTest - FAIL: Address endpoint not found")
else:
raise TestFailure("EthRefundDirectTest - FAIL: Unknown response")
time.sleep(2)
# generate some coins and send them to alice
time.sleep(4)
api_url = alice["gateway_url"] + "wallet/address/" + self.cointype
r = requests.get(api_url)
if r.status_code == 200:
resp = json.loads(r.text)
address = resp["address"]
elif r.status_code == 404:
raise TestFailure("EthRefundDirectTest - FAIL: Address endpoint not found")
else:
raise TestFailure("EthRefundDirectTest - FAIL: Unknown response")
time.sleep(20)
# post profile for alice
with open('testdata/v5/profile.json') as profile_file:
profile_json = json.load(profile_file, object_pairs_hook=OrderedDict)
api_url = alice["gateway_url"] + "ob/profile"
requests.post(api_url, data=json.dumps(profile_json, indent=4))
# post listing to alice
with open('testdata/v5/eth_listing.json') as listing_file:
listing_json = json.load(listing_file, object_pairs_hook=OrderedDict)
listing_json["item"]["priceCurrency"]["code"] = "T" + self.cointype
listing_json["metadata"]["acceptedCurrencies"] = ["T" + self.cointype]
api_url = alice["gateway_url"] + "ob/listing"
r = requests.post(api_url, data=json.dumps(listing_json, indent=4))
if r.status_code == 404:
raise TestFailure("EthRefundDirectTest - FAIL: Listing post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("EthRefundDirectTest - FAIL: Listing POST failed. Reason: %s", resp["reason"])
time.sleep(4)
# get listing hash
api_url = alice["gateway_url"] + "ob/listings/" + alice["peerId"]
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("EthRefundDirectTest - FAIL: Couldn't get listing index")
resp = json.loads(r.text)
listingId = resp[0]["hash"]
# bob send order
with open('testdata/v5/order_direct.json') as order_file:
order_json = json.load(order_file, object_pairs_hook=OrderedDict)
order_json["items"][0]["listingHash"] = listingId
order_json["paymentCoin"] = "T" + self.cointype
api_url = bob["gateway_url"] + "ob/purchase"
r = requests.post(api_url, data=json.dumps(order_json, indent=4))
if r.status_code == 404:
raise TestFailure("EthRefundDirectTest - FAIL: Purchase post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("EthRefundDirectTest - FAIL: Purchase POST failed. Reason: %s", resp["reason"])
resp = json.loads(r.text)
orderId = resp["orderId"]
payment_address = resp["paymentAddress"]
payment_amount = resp["amount"]
# check the purchase saved correctly
api_url = bob["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("EthRefundDirectTest - FAIL: Couldn't load order from Bob")
resp = json.loads(r.text)
if resp["state"] != "AWAITING_PAYMENT":
raise TestFailure("EthRefundDirectTest - FAIL: Bob purchase saved in incorrect state")
if resp["funded"] == True:
raise TestFailure("EthRefundDirectTest - FAIL: Bob incorrectly saved as funded")
# check the sale saved correctly
api_url = alice["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("EthRefundDirectTest - FAIL: Couldn't load order from Alice")
resp = json.loads(r.text)
if resp["state"] != "AWAITING_PAYMENT":
raise TestFailure("EthRefundDirectTest - FAIL: Alice purchase saved in incorrect state")
if resp["funded"] == True:
raise TestFailure("EthRefundDirectTest - FAIL: Alice incorrectly saved as funded")
# fund order
spend = {
"currencyCode": "T" + self.cointype,
"address": payment_address,
"amount": payment_amount["amount"],
"feeLevel": "NORMAL",
"requireAssociateOrder": True,
"orderID": orderId
}
api_url = bob["gateway_url"] + "ob/orderspend"
r = requests.post(api_url, data=json.dumps(spend, indent=4))
if r.status_code == 404:
raise TestFailure("EthRefundDirectTest - FAIL: Spend post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("EthRefundDirectTest - FAIL: Spend POST failed. Reason: %s", resp["reason"])
time.sleep(20)
# check bob detected payment
api_url = bob["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("EthRefundDirectTest - FAIL: Couldn't load order from Bob")
resp = json.loads(r.text)
if resp["state"] != "AWAITING_FULFILLMENT":
raise TestFailure("EthRefundDirectTest - FAIL: Bob failed to detect his payment")
if resp["funded"] == False:
raise TestFailure("EthRefundDirectTest - FAIL: Bob incorrectly saved as unfunded")
# check alice detected payment
api_url = alice["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("EthRefundDirectTest - FAIL: Couldn't load order from Alice")
resp = json.loads(r.text)
if resp["state"] != "AWAITING_FULFILLMENT":
raise TestFailure("EthRefundDirectTest - FAIL: Alice failed to detect payment")
if resp["funded"] == False:
raise TestFailure("EthRefundDirectTest - FAIL: Alice incorrectly saved as unfunded")
# alice refund order
api_url = alice["gateway_url"] + "ob/refund"
refund = {"orderId": orderId}
r = requests.post(api_url, data=json.dumps(refund, indent=4))
if r.status_code == 404:
raise TestFailure("EthRefundDirectTest - FAIL: Refund endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("EthRefundDirectTest - FAIL: Refund POST failed. Reason: %s", resp["reason"])
time.sleep(20)
# alice check order refunded correctly
api_url = alice["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("EthRefundDirectTest - FAIL: Couldn't load order from Alice")
resp = json.loads(r.text)
if resp["state"] != "REFUNDED":
raise TestFailure("EthRefundDirectTest - FAIL: Alice failed to save as rejected")
if "refundAddressTransaction" not in resp:
raise TestFailure("EthRefundDirectTest - FAIL: Alice failed to record refund payment")
# bob check order refunded correctly
api_url = bob["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("EthRefundDirectTest - FAIL: Couldn't load order from Bob")
resp = json.loads(r.text)
if resp["state"] != "REFUNDED":
raise TestFailure("EthRefundDirectTest - FAIL: Bob failed to save as rejected")
if "refundAddressTransaction" not in resp:
raise TestFailure("EthRefundDirectTest - FAIL: Bob failed to record refund payment")
time.sleep(2)
# Check the funds moved into bob's wallet
api_url = bob["gateway_url"] + "wallet/balance/T" + self.cointype
r = requests.get(api_url)
if r.status_code == 200:
resp = json.loads(r.text)
confirmed = int(resp["confirmed"])
#unconfirmed = int(resp["unconfirmed"])
#if confirmed <= 50 - int(payment_amount["amount"]):
# raise TestFailure("EthRefundDirectTest - FAIL: Bob failed to receive the multisig payout")
else:
raise TestFailure("EthRefundDirectTest - FAIL: Failed to query Bob's balance")
print("EthRefundDirectTest - PASS")
if __name__ == '__main__':
print("Running EthRefundDirectTest")
EthRefundDirectTest().main(["--regtest", "--disableexchangerates"])
| mit | 7,355,317,307,970,098,000 | 44.45098 | 109 | 0.610009 | false |
gmjosack/pygerduty | pygerduty/v2.py | 1 | 20708 | import copy
import re
import six
from six.moves import urllib
from .common import (
Requester,
_lower,
_upper,
_singularize,
_pluralize,
_json_dumper,
)
__author__ = "Gary M. Josack <[email protected]>"
from .version import __version__, version_info # noqa
TRIGGER_LOG_ENTRY_RE = re.compile(
r'log_entries/(?P<log_entry_id>[A-Z0-9]+)'
)
# TODO:
# Support for Log Entries
# Support for Reports
class Error(Exception):
pass
class BadRequest(Error):
def __init__(self, payload, *args, **kwargs):
# Error Reponses don't always contain all fields.
# Sane defaults must be set.
self.code = payload.get("error", {}).get('code', 99999)
self.errors = payload.get("error", {}).get('errors', [])
self.message = payload.get("error", {}).get('message', str(payload))
Error.__init__(self, *args, **kwargs)
def __str__(self):
return "{0} ({1}): {2}".format(
self.message, self.code, self.errors)
class NotFound(Error):
pass
class Collection(object):
paginated = True
def __init__(self, pagerduty, base_container=None):
self.name = getattr(self, "name", False) or _lower(self.__class__.__name__)
self.sname = getattr(self, "sname", False) or _singularize(self.name)
self.container = (getattr(self, "container", False) or
globals()[_upper(self.sname)])
self.pagerduty = pagerduty
self.base_container = base_container
def create(self, **kwargs):
path = "{0}".format(self.name)
if self.base_container:
path = "{0}/{1}/{2}".format(
self.base_container.collection.name,
self.base_container.id, self.name)
data = {self.sname: {}}
extra_headers = {}
if "requester_id" in kwargs:
extra_headers["From"] = kwargs.pop("requester_id")
new_kwargs = Collection.process_kwargs(kwargs)
data[self.sname] = new_kwargs
response = self.pagerduty.request("POST", path, data=_json_dumper(data), extra_headers=extra_headers)
return self.container(self, **response.get(self.sname, {}))
@staticmethod
def process_kwargs(kwargs):
new_kwargs = {}
for kwarg_key, kwarg_value in kwargs.items():
if kwarg_key.endswith('_id'):
new_key = Collection.cut_suffix(kwarg_key)
new_kwargs[new_key] = Collection.id_to_obj(new_key, kwarg_value)
elif kwarg_key.endswith('_ids'):
new_key = Collection.cut_suffix(kwarg_key)
new_kwargs[_pluralize(new_key)] = Collection.ids_to_objs(new_key, kwarg_value)
else:
new_kwargs[kwarg_key] = kwarg_value
return new_kwargs
@staticmethod
def cut_suffix(key):
if key.endswith('_id'):
return key[:-3]
elif key.endswith('_ids'):
return key[:-4]
else:
return key
@staticmethod
def id_to_obj(key, value):
return {
"id": value,
"type": key
}
@staticmethod
def ids_to_objs(key, value):
new_kwargs = []
for v in value:
new_kwarg = Collection.id_to_obj(key, v)
new_kwargs.append(new_kwarg)
return new_kwargs
def update(self, entity_id, **kwargs):
path = "{0}/{1}".format(self.name, entity_id)
if self.base_container:
path = "{0}/{1}/{2}/{3}".format(
self.base_container.collection.name,
self.base_container.id, self.name, entity_id)
data = {self.sname: {}}
extra_headers = {}
if "requester_id" in kwargs:
extra_headers["From"] = kwargs.pop("requester_id")
data[self.sname] = kwargs
response = self.pagerduty.request("PUT", path, data=_json_dumper(data),
extra_headers=extra_headers)
return self.container(self, **response.get(self.sname, {}))
def _list_response(self, response):
entities = []
for entity in response.get(self.name, []):
entities.append(self.container(self, **entity))
return entities
def _list_no_pagination(self, **kwargs):
path = self.name
if self.base_container:
path = "{0}/{1}/{2}".format(
self.base_container.collection.name,
self.base_container.id, self.name)
suffix_path = kwargs.pop("_suffix_path", None)
if suffix_path is not None:
path += "/{0}".format(suffix_path)
response = self.pagerduty.request("GET", path, query_params=kwargs)
return self._list_response(response)
def list(self, **kwargs):
# Some APIs are paginated. If they are, and the user isn't doing
# pagination themselves, let's do it for them
if not self.paginated or any(key in kwargs for key in ('offset', 'limit')):
for i in self._list_no_pagination(**kwargs):
yield i
else:
offset = 0
limit = self.pagerduty.page_size
seen_items = set()
while True:
these_kwargs = copy.copy(kwargs)
these_kwargs.update({
'limit': limit,
'offset': offset,
})
this_paginated_result = self._list_no_pagination(**these_kwargs)
if not this_paginated_result:
break
for item in this_paginated_result:
if item.id in seen_items:
continue
seen_items.add(item.id)
yield item
offset += len(this_paginated_result)
if len(this_paginated_result) > limit:
# sometimes pagerduty decides to ignore your limit and
# just return everything. it seems to only do this when
# you're near the last page.
break
def count(self, **kwargs):
path = "{0}/count".format(self.name)
response = self.pagerduty.request("GET", path, query_params=kwargs)
return response.get("total", None)
def show(self, entity_id, **kwargs):
path = "{0}/{1}".format(self.name, entity_id)
if self.base_container:
path = "{0}/{1}/{2}/{3}".format(
self.base_container.collection.name,
self.base_container.id, self.name, entity_id)
response = self.pagerduty.request(
"GET", path, query_params=kwargs)
if response.get(self.sname):
return self.container(self, **response.get(self.sname, {}))
else:
return self.container(self, **response)
def delete(self, entity_id):
path = "{0}/{1}".format(self.name, entity_id)
if self.base_container:
path = "{0}/{1}/{2}/{3}".format(
self.base_container.collection.name,
self.base_container.id, self.name, entity_id)
response = self.pagerduty.request("DELETE", path)
return response
class MaintenanceWindows(Collection):
def list(self, **kwargs):
path = self.name
if "type" in kwargs:
path = "{0}/{1}".format(self.name, kwargs["type"])
del kwargs["type"]
response = self.pagerduty.request("GET", path, query_params=kwargs)
return self._list_response(response)
def update(self, entity_id, **kwargs):
path = "{0}/{1}".format(self.name, entity_id)
response = self.pagerduty.request("PUT", path, data=_json_dumper(kwargs))
return self.container(self, **response.get(self.sname, {}))
class Incidents(Collection):
def update(self, requester_id, *args):
path = "{0}".format(self.name)
extra_headers = {"From": requester_id}
data = {self.name: args}
response = self.pagerduty.request("PUT", path, data=_json_dumper(data), extra_headers=extra_headers)
return self.container(self, **response.get(self.sname, {}))
class Services(Collection):
def disable(self, entity_id, requester_id):
path = "{0}/{1}".format(self.name, entity_id)
extra_headers = {"From": requester_id}
data = {"status": "disable"}
response = self.pagerduty.request("PUT", path, data=_json_dumper(data), extra_headers=extra_headers)
return response
def enable(self, entity_id):
path = "{0}/{1}".format(self.name, entity_id)
data = {"status": "enable"}
response = self.pagerduty.request("PUT", path, data=_json_dumper(data))
return response
def regenerate_key(self, entity_id):
path = "{0}/{1}/regenerate_key".format(self.name, entity_id)
response = self.pagerduty.request("POST", path, data="")
return self.container(self, **response.get(self.sname, {}))
class Teams(Collection):
pass
class Alerts(Collection):
pass
class Overrides(Collection):
paginated = False
class EscalationPolicies(Collection):
pass
class EscalationRules(Collection):
paginated = False
def update(self, entity_id, **kwargs):
path = "{0}/{1}/{2}/{3}".format(
self.base_container.collection.name,
self.base_container.id, self.name, entity_id)
response = self.pagerduty.request("PUT", path, data=_json_dumper(kwargs))
return self.container(self, **response.get(self.sname, {}))
class Schedules(Collection):
def update(self, entity_id, **kwargs):
path = "{0}/{1}".format(self.name, entity_id)
data = {"overflow": kwargs["overflow"],
"schedule": kwargs["schedule"]}
response = self.pagerduty.request("PUT", path, data=_json_dumper(data))
return self.container(self, **response.get(self.sname, {}))
class ScheduleUsers(Collection):
"""This class exists because Users returned from a Schedule query are not
paginated, whereas responses for Users class are. This causes a pagination
bug if removed."""
name = 'users'
paginated = False
class Users(Collection):
pass
class Restrictions(Collection):
pass
class NotificationRules(Collection):
paginated = False
class ContactMethods(Collection):
paginated = False
class EmailFilters(Collection):
pass
class LogEntries(Collection):
pass
class Notes(Collection):
paginated = False
def update(self, *args, **kwargs):
raise NotImplementedError()
def count(self, *args, **kwargs):
raise NotImplementedError()
def show(self, *args, **kwargs):
raise NotImplementedError()
def delete(self, *args, **kwargs):
raise NotImplementedError()
class Container(object):
ATTR_NAME_OVERRIDE_KEY = '_attr_name_override'
def __init__(self, collection, **kwargs):
# This class depends on the existence on the _kwargs attr.
# Use object's __setattr__ to initialize.
object.__setattr__(self, "_kwargs", {})
self.collection = collection
self.pagerduty = collection.pagerduty
self._attr_overrides = kwargs.pop(Container.ATTR_NAME_OVERRIDE_KEY, None)
def _check_kwarg(key, value):
if isinstance(value, dict):
value[Container.ATTR_NAME_OVERRIDE_KEY] = self._attr_overrides
container = globals().get(_upper(_singularize(key)))
if container is not None and issubclass(container, Container):
_collection = globals().get(_upper(_pluralize(key)),
Collection)
return container(_collection(self.pagerduty), **value)
else:
return Container(Collection(self.pagerduty), **value)
return value
for key, value in kwargs.items():
if self._attr_overrides and key in self._attr_overrides:
key = self._attr_overrides[key]
if isinstance(value, list):
self._kwargs[key] = []
for item in value:
sname = _singularize(key)
self._kwargs[key].append(_check_kwarg(sname, item))
else:
self._kwargs[key] = _check_kwarg(key, value)
def __getattr__(self, name):
if name not in self._kwargs:
raise AttributeError(name)
return self._kwargs[name]
def __setattr__(self, name, value):
if name not in self._kwargs:
return object.__setattr__(self, name, value)
self._kwargs[name] = value
def __str__(self):
attrs = ["{0}={1}".format(k, repr(v)) for k, v in self._kwargs.items()]
return "<{0}: {1}>".format(self.__class__.__name__, ", ".join(attrs))
def __repr__(self):
return str(self)
def to_json(self):
json_dict = {}
overriden_attrs = dict()
if self._attr_overrides:
for key, value in self._attr_overrides.items():
overriden_attrs[value] = key
for key, value in self._kwargs.items():
if key in overriden_attrs:
key = overriden_attrs[key]
if isinstance(value, Container):
json_dict[key] = value.to_json()
elif isinstance(value, list):
json_dict[key] = []
for v in value:
if isinstance(v, Container):
json_dict[key].append(v.to_json())
else:
json_dict[key].append(v)
else:
json_dict[key] = value
return json_dict
class Incident(Container):
def __init__(self, *args, **kwargs):
Container.__init__(self, *args, **kwargs)
self.log_entries = LogEntries(self.pagerduty, self)
self.notes = Notes(self.pagerduty, self)
def _do_action(self, verb, requester, **kwargs):
path = '{0}/{1}'.format(self.collection.name, self.id)
data = {
"incident": {
"type": "incident_reference",
"status": verb
}
}
extra_headers = {'From': requester}
return self.pagerduty.request('PUT', path, data=_json_dumper(data), extra_headers=extra_headers)
def has_subject(self):
return hasattr(self.trigger_summary_data, 'subject')
def resolve(self, requester):
"""Resolve this incident.
:param requester: The email address of the individual acknowledging.
"""
self._do_action('resolved', requester=requester)
def acknowledge(self, requester):
"""Acknowledge this incident.
:param requester: The email address of the individual acknowledging.
"""
self._do_action('acknowledged', requester=requester)
def snooze(self, requester, duration):
"""Snooze incident.
:param requester: The email address of the individual requesting snooze.
"""
path = '{0}/{1}/{2}'.format(self.collection.name, self.id, 'snooze')
data = {"duration": duration}
extra_headers = {"From": requester}
return self.pagerduty.request('POST', path, data=_json_dumper(data), extra_headers=extra_headers)
def get_trigger_log_entry(self, **kwargs):
match = TRIGGER_LOG_ENTRY_RE.search(self.trigger_details_html_url)
return self.log_entries.show(match.group('log_entry_id'), **kwargs)
def reassign(self, user_ids, requester):
"""Reassign this incident to a user or list of users
:param user_ids: A non-empty list of user ids
:param requester: The email address of individual requesting reassign
"""
path = '{0}'.format(self.collection.name)
assignments = []
if not user_ids:
raise Error('Must pass at least one user id')
for user_id in user_ids:
ref = {
"assignee": {
"id": user_id,
"type": "user_reference"
}
}
assignments.append(ref)
data = {
"incidents": [
{
"id": self.id,
"type": "incident_reference",
"assignments": assignments
}
]
}
extra_headers = {"From": requester}
return self.pagerduty.request('PUT', path, data=_json_dumper(data), extra_headers=extra_headers)
class Note(Container):
pass
class Alert(Container):
pass
class EmailFilter(Container):
pass
class MaintenanceWindow(Container):
pass
class Override(Container):
pass
class NotificationRule(Container):
pass
class ContactMethod(Container):
pass
class EscalationPolicy(Container):
def __init__(self, *args, **kwargs):
Container.__init__(self, *args, **kwargs)
self.escalation_rules = EscalationRules(self.pagerduty, self)
class EscalationRule(Container):
pass
class RuleObject(Container):
pass
class ScheduleLayer(Container):
pass
class Service(Container):
def __init__(self, *args, **kwargs):
Container.__init__(self, *args, **kwargs)
self.email_filters = EmailFilters(self.pagerduty, self)
class Schedule(Container):
def __init__(self, *args, **kwargs):
# The json representation of Schedule has a field called
# "users". Rename it to schedule_users to avoid conflict with
# Users
kwargs[Container.ATTR_NAME_OVERRIDE_KEY] = {"users": "schedule_users"}
Container.__init__(self, *args, **kwargs)
self.overrides = Overrides(self.pagerduty, self)
self.users = ScheduleUsers(self.pagerduty, self)
class ScheduleUser(Container):
pass
class Team(Container):
pass
class Restriction(Container):
pass
class User(Container):
def __init__(self, *args, **kwargs):
Container.__init__(self, *args, **kwargs)
self.notification_rules = NotificationRules(self.pagerduty, self)
self.contact_methods = ContactMethods(self.pagerduty, self)
self.schedules = Schedules(self.pagerduty, self)
self.escalation_policies = EscalationPolicies(self.pagerduty, self)
self.log_entries = LogEntries(self.pagerduty, self)
class Entry(Container):
pass
class LogEntry(Container):
pass
class FinalSchedule(Container):
pass
class RenderSchedule(Container):
pass
class PagerDuty(object):
def __init__(self, api_token, timeout=10, page_size=25,
proxies=None, parse_datetime=False):
self.api_token = api_token
self._host = "api.pagerduty.com"
self._api_base = "https://{0}/".format(self._host)
self.timeout = timeout
self.page_size = page_size
self.requester = Requester(timeout=timeout, proxies=proxies, parse_datetime=parse_datetime)
# Collections
self.incidents = Incidents(self)
self.alerts = Alerts(self)
self.schedules = Schedules(self)
self.escalation_policies = EscalationPolicies(self)
self.users = Users(self)
self.services = Services(self)
self.maintenance_windows = MaintenanceWindows(self)
self.teams = Teams(self)
self.log_entries = LogEntries(self)
@staticmethod
def _process_query_params(query_params):
new_qp = []
for key, value in query_params.items():
if isinstance(value, (list, set, tuple)):
for elem in value:
new_qp.append(("{0}[]".format(key), elem))
else:
new_qp.append((key, value))
return urllib.parse.urlencode(new_qp)
def request(self, method, path, query_params=None, data=None,
extra_headers=None):
auth = "Token token={0}".format(self.api_token)
headers = {
"Accept": "application/vnd.pagerduty+json;version=2",
"Content-type": "application/json",
"Authorization": auth
}
if extra_headers:
headers.update(extra_headers)
if query_params is not None:
query_params = self._process_query_params(query_params)
url = urllib.parse.urljoin(self._api_base, path)
if query_params:
url += "?{0}".format(query_params)
if isinstance(data, six.text_type):
data = data.encode("utf-8")
request = urllib.request.Request(url, data=data, headers=headers)
request.get_method = lambda: method.upper()
return self.requester.execute_request(request)
| mit | 1,439,807,039,352,841,200 | 30.186747 | 109 | 0.576975 | false |
ohanar/PolyBoRi | pyroot/polybori/blocks.py | 1 | 14700 | import sys
if __name__ == '__main__':
import pathadjuster
from polybori.PyPolyBoRi import Ring, VariableBlock, Polynomial
from polybori.PyPolyBoRi import VariableFactory, MonomialFactory
from itertools import chain, islice
#class BlockEndException(object):
#pass
#def __init__(self, arg):
# self.arg = arg
# pass
class Block(object):
"""The block class represents a block of variables
<var_name>(start_index,...,start_index+size-1), it is the preferred
block type for simple one-dimensional variable sets"""
def __init__(self, var_name, size, start_index=0, reverse=False):
indices = range(start_index, start_index + size)
if reverse:
indices.reverse()
#self.index2pos=dict([(v,k) for (k,v) in enumerate(indices)])
self.names = [var_name + "(" + str(i) + ")" for i in indices]
self.var_name = var_name
self.start_index = start_index
self.reverse = reverse
self.size = size
def __iter__(self):
return iter(self.names)
def __getitem__(self, i):
return self.names[i]
def __len__(self):
return self.size
def register(self, start, context):
#def var_func(i):
# return Variable(self.index2pos[i]+start)
ring_context = context
while isinstance(ring_context, PrefixedDictProxy):
ring_context = ring_context.wrapped
ring = ring_context['r']
var_func = VariableBlock(self.size, self.start_index, start, self.
reverse, ring)
var_func.__name__ = self.var_name
context[self.var_name] = var_func
class AlternatingBlock(object):
"""The Alternating Block class is used for doing tricky variable
schemes,where base names vary, e.g.
a(0),b(0),a(1),b(1),a(2),b(2)"""
def __init__(self, var_names, size_per_variable, start_index=0,
reverse=False):
self.var_names = var_names
self.size_per_variable = size_per_variable
self.reverse = reverse
indices = range(start_index, start_index + size_per_variable)
if reverse:
indices.reverse()
names = []
for i in indices:
for n in var_names:
names.append(n + "(" + str(i) + ")")
self.indices = indices
self.index2pos = dict([(v, k) for (k, v) in enumerate(indices)])
self.names = names
def __len__(self):
return self.size_per_variable * len(self.var_names)
def __iter__(self):
return iter(self.names)
def __getitem__(self, i):
return self.names[i]
def register(self, start, context):
def gen_var_func(var_pos):
class var_factory(object):
def __init__(self, ring, index2pos, size):
self.ring = ring
self.index2pos = index2pos
self.size = size
def __call__(self, idx):
return self.ring.variable(self.index2pos[idx] * self.size +
var_pos + start)
ring_context = context
while isinstance(ring_context, PrefixedDictProxy):
ring_context = ring_context.wrapped
ring = ring_context['r']
return var_factory(ring, self.index2pos, len(self.var_names))
for (var_pos, n) in enumerate(self.var_names):
var_func = gen_var_func(var_pos)
var_func.__name__ = n
context[n] = var_func
def shift(f, i):
def g(j):
return f(i + j)
g.__name__ = f.__name__
return g
class AdderBlock(AlternatingBlock):
def __init__(self, adder_bits, sums="s", carries="c", input1="a",
input2="b", start_index=0):
AlternatingBlock.__init__(self, (sums, carries, input1, input2),
adder_bits, start_index=start_index, reverse=True)
self.input1 = input1
self.input2 = input2
self.sums = sums
self.carries = carries
self.start_index = start_index
self.adder_bits = adder_bits
def register(self, start, context):
super(AdderBlock, self).register(start, context)
a = context[self.input1]
b = context[self.input2]
self.s = shift(context[self.sums], self.start_index)
self.c = shift(context[self.carries], self.start_index)
a = shift(a, self.start_index)
b = shift(b, self.start_index)
carries = [Polynomial(a(0).ring().zero())]
for i in xrange(self.adder_bits):
#print i, ":"
c = 1 + (1 + a(i) * b(i)) * (1 + carries[-1] * a(i)) * (1 +
carries[-1] * b(i))
carries.append(c)
self.add_results = [a(i) + b(i) + carries[i] for i in xrange(self.
adder_bits)]
self.carries_polys = carries[1:]
#def s(i):
# return self.add_results[i-self.start_index]
#def c(i):
# return self.carries_polys[i-self.start_index]
#context[self.sums]=s
#context[self.carries]=c
def implement(self, equations):
for i in xrange(self.adder_bits):
equations.append(self.s(i) + self.add_results[i])
equations.append(self.c(i) + self.carries_polys[i])
pass
class HigherOrderBlock(object):
"""HigherOrderBlocks are multidimensional blocks of variables, for each dimension a seperate start_index and size can be specified
var_name : variables will be called <var_name>(multiindex), where multiindex is a tuple of the size <size_tuple>
size_tuple : specifies the sizes of the ranges of each component of the multi-indices
start_index_tuple : the multi-indices will be of the form start_index_tuple + a, where a is a multi-index with non-negative components
"""
def __init__(self, var_name, size_tuple, start_index_tuple=None,
reverse=False):
if start_index_tuple is None:
start_index_tuple = len(size_tuple) * (0, )
cart = [()]
assert len(size_tuple) == len(start_index_tuple)
outer_indices = range(len(size_tuple))
outer_indices.reverse()
for i in outer_indices:
s_i = start_index_tuple[i]
s = size_tuple[i]
#print "cart", cart
cart = [(j, ) + c for j in range(s_i, s_i + s) for c in cart]
if reverse:
cart.reverse()
self.cart = cart
self.cart2index = dict([(v, k) for (k, v) in enumerate(cart)])
self.var_name = var_name
self.names = [var_name + str(c) for c in cart]
pass
def __getitem__(self, i):
return self.names[i]
def __iter__(self):
return iter(self.names)
def __len__(self):
return len(self.names)
def register(self, start, context):
def var_func(*indices):
return Variable(self.cart2index[indices] + start)
var_func.__name__ = self.var_name
context[self.var_name] = var_func
class InOutBlock(object):
def __init__(self, out_size, in_size, output="out", input="in",
in_start_index=0, out_start_index=0,
out_reverse=False, in_reverse=False):
self.output = Block(var_name=output, start_index=out_start_index,
size=out_size, reverse=out_reverse)
self.input = Block(var_name=input, start_index=in_start_index,
size=in_size, reverse=in_reverse)
self.out_start_index = out_start_index
self.in_start_index = in_start_index
def __iter__(self):
return chain(self.output, self.input)
def __getitem__(self, i):
if (i < len(self.output)):
return self.output[i]
else:
return self.input[i - len(self.output)]
def __len__(self):
return len(self.output) + len(self.input)
def register(self, start, context):
self.output.register(start, context)
self.input.register(start + len(self.output), context)
self.out_vars = shift(context[self.output.var_name], self.
out_start_index)
self.in_vars = shift(context[self.input.var_name], self.in_start_index)
pass
class MultiBlock(object):
def __init__(self, sizes=[], var_names=["v"], start_indices=[], reverses=[
]):
self.start_indices = start_indices + [0] * (len(var_names) - len(
start_indices))
reverses += [False] * (len(var_names) - len(reverses))
sizes += [1] * (len(var_names) - len(sizes))
self.blocks = [Block(var_name=var_names[idx], size=sizes[idx],
start_index=self.start_indices[idx], reverse=reverses[idx]) for
idx in xrange(len(var_names))]
def __iter__(self):
return chain(*self.blocks)
def __getitem__(self, i):
return islice(chain(*self.blocks), i, i + 1).next()
# sum([bl.names for bl in self.blocks])[i]
def __len__(self):
return sum((len(bl) for bl in self.blocks))
def register(self, start, context):
offset = 0
for bl in self.blocks:
bl.register(start + offset, context)
offset += len(bl)
self.vars = [shift(context[self.blocks[idx].var_name], self.
start_indices[idx]) for idx in xrange(len(self.blocks))]
class PrefixedDictProxy(object):
"""docstring for PrefixedDictProxy"""
def __init__(self, wrapped, prefix):
super(PrefixedDictProxy, self).__init__()
self.wrapped = wrapped
self.prefix = prefix
def __getitem__(self, k):
try:
return self.wrapped[self.prefix + k]
except KeyError:
print self.prefix, k, list(self.wrapped)
raise KeyError
def __setitem__(self, k, v):
self.wrapped[self.prefix + k] = v
class MacroBlock(object):
def __init__(self, prefix):
self.prefix = prefix
self.blocks = []
self.combinations = []
self.connections = []
def declare(self, blocks):
self.blocks = blocks
def connect(self, combinations):
self.combinations = combinations
def __iter__(self):
return (self.prefix + "_" + n for n in chain(*self.blocks))
def __getitem__(self, i):
return self.prefix + "_" + islice(chain(*self.blocks), i, i + 1).next()
#for bl in self.blocks:
# if i >= len(bl):
# i -= len(bl)
# else:
# return bl[i]
def __len__(self):
return sum((len(bl) for bl in self.blocks))
def resolve(self, localname):
return self.prefix + "_" + localname
def register(self, start, context):
context = PrefixedDictProxy(context, self.prefix + "_")
offset = 0
for bl in self.blocks:
bl.register(start + offset, context)
offset += len(bl)
for ((con1, indices1), (con2, indices2)) in self.combinations:
for idx in xrange(min(len(indices1), len(indices2))):
self.connections += [context[con1](indices1[idx]) + context[
con2](indices2[idx])]
def implement(self, equations):
for bl in self.blocks:
if hasattr(bl, "implement"):
bl.implement(equations)
equations += self.connections
class IfThen(object):
def __init__(self, ifpart, thenpart, supposed_to_be_valid=True):
self.ifpart = [Polynomial(p) for p in ifpart]
self.thenpart = [Polynomial(p) for p in thenpart]
self.supposedToBeValid = supposed_to_be_valid
def __str__(self):
return ("If(AND(" + ", ".join([str(p) + " == 0" for p in self.ifpart])
+ ")), THEN " + ", ".join([str(p) + " == 0" for p in self.thenpart
]))
def if_then(i, t, supposed_to_be_valid=True):
return IfThen(i, t, supposed_to_be_valid)
def declare_ring(blocks, context=None):
"""Declare Ring is the preferred function to create a ring and declare a variable scheme,
the number of variables is automatically determined,
usually you pass globals() as context argument to store the ring and the variable mapping.
Example
declare_ring([Block("x",10),Block("y",5)],globals())
gives a ring with x(0..9),y(0..4) and registers the ring as r,
and the variable blocks x and y in the context dictionary globals(), which consists of the global variables of the python module
"""
if context is None:
context = sys.modules['__main__'].__dict__
def canonicalize(blocks):
for elt in blocks:
if isinstance(elt, str):
yield elt
else:
for subelt in elt:
yield subelt
blocks = list(blocks)
n = 0
for b in blocks:
if isinstance(b, str):
n = n + 1
else:
n = n + len(b)
r = Ring(n, names=canonicalize(blocks))
context["internalVariable"] = VariableFactory(r)
# context["Monomial"] = MonomialFactory(r)
context["r"] = r
declare_block_scheme(blocks, context)
return r
def declare_block_scheme(blocks, context):
start = 0
block_starts = []
ring = context["r"]
for b in blocks:
if start != 0:
block_starts.append(start)
if isinstance(b, str):
context[b] = context["internalVariable"](start)
#_cpp_set_variable_name(ring, start, b)
start = start + 1
else:
b.register(start, context)
#for (pos,name) in enumerate(b):
# _cpp_set_variable_name(ring, start+pos, name)
start = start + len(b)
context["block_start_hints"] = block_starts
context["number_of_declared_vars"] = start
def main():
r = Ring(1000)
ablock = AlternatingBlock(["a", "b", "c"], 100)
declare_block_scheme([ablock], globals())
for i in range(10):
print r.variable(i)
print list(ablock)
declare_block_scheme([
Block(var_name="x", size=100),
HigherOrderBlock("y", (3, 4, 11, 2)),
AlternatingBlock(["a", "b", "c"], 100)],
globals())
for i in range(10):
print x(i)
print y(0, 0, 0, 0)
print y(0, 0, 0, 1)
print y(0, 0, 1, 0)
print y(0, 0, 1, 1)
print a(0), a(1), a(2), b(0), b(1), c(0)
declare_block_scheme([
Block(var_name="x", size=100, reverse=True),
HigherOrderBlock("y", (3, 4, 11, 2), reverse=True),
AlternatingBlock(["a", "b", "c"], 100, reverse=True)],
globals())
for i in range(10):
print x(i)
print y(0, 0, 0, 0)
print y(0, 0, 0, 1)
print y(0, 0, 1, 0)
print y(0, 0, 1, 1)
print a(0), a(1), a(2), b(0), b(1), c(0)
declare_block_scheme(["a", "b", "c"], globals())
print a, b, c
if __name__ == '__main__':
main()
| gpl-2.0 | -2,597,420,333,108,813,300 | 31.522124 | 138 | 0.569592 | false |
wang-h/HieraParser | scripts/bootstrap_resampling_reorder_scores.py | 1 | 8809 | #!/usr/bin/python
# Author: Hao WANG
###############################################
# An implementation of paired bootstrap resampling for testing the statistical
# significance of the difference between two systems from (Koehn 2004 @ EMNLP)
# Specified for reordering Scores.
# Usage: ./bootstrap-resampling.py hypothesis_1 hypothesis_2 reference_1 [ reference_2 ... ]
###############################################
import sys
import numpy as np
from tqdm import tqdm
#constants
TIMES_TO_REPEAT_SUBSAMPLING = 1000
SUBSAMPLE_SIZE = 0
# if 0 then subsample size is equal to the whole set
MAX_NGRAMS = 4
def less_or_equal(lhs, rhs):
"""Less-or-equal relation of source-side tokens.
The definition is from ``Neubig et al.: Inducing a Discriminative Parser to
Optimize Machine Translation Reordering''."""
return min(lhs) <= min(rhs) and max(lhs) <= max(rhs)
def read_align(line):
"""Read one example from the alignment file."""
if not line:
return None
line = line[:-1]
fields = line.split()
if len(fields) < 3:
sys.exit('Too few fields.')
if fields[1] != '|||':
sys.exit('Wrong format.')
values = fields[0].split('-')
src_num = int(values[0])
trg_num = int(values[1])
# aligns[i] contains the indices of the target tokens which are aligned to
# the (i+1)-th source token.
aligns = [set() for _ in range(src_num)]
for field in fields[2:]:
values = field.split('-')
src_id = int(values[0])
trg_id = int(values[1])
if src_id < 0 or src_id >= src_num or trg_id < 0 or trg_id >= trg_num:
sys.stderr.write(line)
sys.exit('Wrong alignment data: %s')
aligns[src_id].add(trg_id)
sorted_list = []
for i in range(src_num):
if not aligns[i]:
continue
pos = 0
eq = False
while pos < len(sorted_list):
le = less_or_equal(aligns[i], aligns[sorted_list[pos][0]])
ge = less_or_equal(aligns[sorted_list[pos][0]], aligns[i])
eq = le and ge
if not le and not ge:
return []
if le:
break
pos += 1
if not eq:
sorted_list.insert(pos, [])
sorted_list[pos].append(i)
alignment = [-1] * src_num
for i in range(len(sorted_list)):
for j in sorted_list[i]:
alignment[j] = i
alignment.append(len(sorted_list))
return alignment
def read_order(line):
"""Read one example from the order file."""
if not line:
return None
line = line[:-1]
order = line.split()
order = [int(item) for item in order]
return order
def calculate_Tau(alignment, order):
"""Calculate Kendall's Tau."""
src_num = len(order)
if src_num <= 1:
return 1.0
errors = 0
for i in range(src_num - 1):
for j in range(i + 1, src_num):
if alignment[order[i]] > alignment[order[j]]:
errors += 1
tau = 1.0 - float(errors) / (src_num * (src_num - 1) / 2)
return tau
def calculate_FRS(alignment, order):
"""Calculate the fuzzy reordering score."""
src_num = len(order)
if src_num <= 1:
return 1.0
discont = 0
for i in range(src_num + 1):
trg_prv = alignment[order[i - 1]] if i - 1 >= 0 else -1
trg_cur = alignment[order[i]] if i < src_num else alignment[-1]
if trg_prv != trg_cur and trg_prv + 1 != trg_cur:
discont += 1
frs = 1.0 - float(discont) / (src_num + 1)
return frs
def calculate_CMS(alignment, order):
"""Calculate the complete matching score."""
if calculate_Tau(alignment, order) < 1.0:
return 0.0
else:
return 1.0
def getFRS(aligns, orders, indices=None):
return _CalculateReorderingScores(aligns, orders, calculate_FRS, indices)
def getNKT(aligns, orders, indices=None):
return _CalculateReorderingScores(aligns, orders, calculate_Tau, indices)
def getCMS(aligns, orders, indices=None):
return _CalculateReorderingScores(aligns, orders, calculate_CMS, indices)
def _CalculateReorderingScores(aligns, orders, scoreFunc, indices=None):
num = 0
skipped = 0
sum_ = []
if indices is None:
candidates = range(len(aligns))
else:
candidates = indices
for idx in candidates:
alignment = aligns[idx]
order = orders[idx].copy()
if not alignment:
skipped += 1
continue
assert len(alignment) - 1 == len(order)
# Remove unaligned tokens.
for i, a in enumerate(alignment):
if a < 0:
order.remove(i)
num += 1
sum_.append(scoreFunc(alignment, order))
return sum(sum_)/num
def main(argv):
#checking cmdline argument consistency
if len(argv) != 4:
print("Usage: ./bootstrap-hypothesis-difference-significance.py hypothesis_1 hypothesis_2 reference\n", file=sys.stderr)
sys.exit(1)
print("reading data", file=sys.stderr)
#read all data
data = readAllData(argv)
# #calculate each sentence's contribution to BP and ngram precision
# print("rperforming preliminary calculations (hypothesis 1); ", file=sys.stderr)
# preEvalHypo(data, "hyp1")
# print("rperforming preliminary calculations (hypothesis 2); ", file=sys.stderr)
# preEvalHypo(data, "hyp2")
#start comparing
print("comparing hypotheses -- this may take some time; ", file=sys.stderr)
bootstrap_report(data, "Fuzzy Reordering Scores", getFRS)
bootstrap_report(data, "Normalized Kendall's Tau", getNKT)
bootstrap_report(data, "CMS", getCMS)
#####
def bootstrap_report(data, title, func):
subSampleIndices = np.random.choice(data["size"], SUBSAMPLE_SIZE if SUBSAMPLE_SIZE > 0 else data["size"], replace=True)
realScore1 = func(data["refs"], data["hyp1"], subSampleIndices)
realScore2 = func(data["refs"], data["hyp2"], subSampleIndices)
subSampleScoreDiffArr, subSampleScore1Arr, subSampleScore2Arr = bootstrap_pass(data, func)
scorePValue = bootstrap_pvalue(subSampleScoreDiffArr, realScore1, realScore2)
(scoreAvg1, scoreVar1) = bootstrap_interval(subSampleScore1Arr)
(scoreAvg2, scoreVar2) = bootstrap_interval(subSampleScore2Arr)
print ("\n---=== %s score ===---\n" % title)
print ("actual score of hypothesis 1: %f" % realScore1)
print ("95/100 confidence interval for hypothesis 1 score: %f +- %f"%(scoreAvg1, scoreVar1) + "\n-----\n")
print ("actual score of hypothesis 1: %f" % realScore2)
print ("95/100 confidence interval for hypothesis 2 score: %f +- %f"%(scoreAvg2, scoreVar2)+ "\n-----\n")
print ("Assuming that essentially the same system generated the two hypothesis translations (null-hypothesis),\n")
print ("the probability of actually getting them (p-value) is: %f\n"% scorePValue)
#####
def bootstrap_pass(data, scoreFunc):
subSampleDiffArr = []
subSample1Arr = []
subSample2Arr = []
#applying sampling
for idx in tqdm(range(TIMES_TO_REPEAT_SUBSAMPLING), ncols=80, postfix="Subsampling"):
subSampleIndices = np.random.choice(data["size"], SUBSAMPLE_SIZE if SUBSAMPLE_SIZE > 0 else data["size"], replace=True)
score1 = scoreFunc(data["refs"], data["hyp1"], subSampleIndices)
score2 = scoreFunc(data["refs"], data["hyp2"], subSampleIndices)
subSampleDiffArr.append(abs(score2 - score1))
subSample1Arr.append(score1)
subSample2Arr.append(score2)
return np.array(subSampleDiffArr), np.array(subSample1Arr), np.array(subSample2Arr)
#####
#
#####
def bootstrap_pvalue(subSampleDiffArr, realScore1, realScore2):
realDiff = abs(realScore2 - realScore1)
#get subsample difference mean
averageSubSampleDiff = subSampleDiffArr.mean()
#calculating p-value
count = 0.0
for subSampleDiff in subSampleDiffArr:
if subSampleDiff - averageSubSampleDiff >= realDiff:
count += 1
return count / TIMES_TO_REPEAT_SUBSAMPLING
#####
#
#####
def bootstrap_interval(subSampleArr):
sortedArr = sorted(subSampleArr, reverse=False)
lowerIdx = int(TIMES_TO_REPEAT_SUBSAMPLING / 40)
higherIdx = TIMES_TO_REPEAT_SUBSAMPLING - lowerIdx - 1
lower = sortedArr[lowerIdx]
higher = sortedArr[higherIdx]
diff = higher - lower
return (lower + 0.5 * diff, 0.5 * diff)
#####
# read 2 hyp and 1 to \infty ref data files
#####
def readAllData(argv):
assert len(argv[1:]) == 3
hypFile1, hypFile2 = argv[2:]
refFile = argv[1]
result = {}
#reading hypotheses and checking for matching sizes
result["hyp1"] = [read_order(line) for line in open(hypFile1)]
result["size"] = len(result["hyp1"])
result["hyp2"] = [read_order(line) for line in open(hypFile2)]
assert len(result["hyp2"]) == len(result["hyp1"])
refDataX = [read_align(line) for line in open(refFile)]
# updateCounts($result{ngramCounts}, $refDataX);
result["refs"] = refDataX
return result
if __name__ == '__main__':
main(sys.argv)
| apache-2.0 | 7,772,108,416,269,102,000 | 31.625926 | 128 | 0.644341 | false |
mnahm5/django-estore | Lib/site-packages/awscli/customizations/cloudfront.py | 1 | 10580 | # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import sys
import time
import random
import rsa
from botocore.utils import parse_to_aware_datetime
from botocore.signers import CloudFrontSigner
from awscli.arguments import CustomArgument
from awscli.customizations.utils import validate_mutually_exclusive_handler
from awscli.customizations.commands import BasicCommand
def register(event_handler):
event_handler.register('building-command-table.cloudfront', _add_sign)
# Provides a simpler --paths for ``aws cloudfront create-invalidation``
event_handler.register(
'building-argument-table.cloudfront.create-invalidation', _add_paths)
event_handler.register(
'operation-args-parsed.cloudfront.create-invalidation',
validate_mutually_exclusive_handler(['invalidation_batch'], ['paths']))
event_handler.register(
'operation-args-parsed.cloudfront.create-distribution',
validate_mutually_exclusive_handler(
['default_root_object', 'origin_domain_name'],
['distribution_config']))
event_handler.register(
'building-argument-table.cloudfront.create-distribution',
lambda argument_table, **kwargs: argument_table.__setitem__(
'origin-domain-name', OriginDomainName(argument_table)))
event_handler.register(
'building-argument-table.cloudfront.create-distribution',
lambda argument_table, **kwargs: argument_table.__setitem__(
'default-root-object', CreateDefaultRootObject(argument_table)))
context = {}
event_handler.register(
'top-level-args-parsed', context.update, unique_id='cloudfront')
event_handler.register(
'operation-args-parsed.cloudfront.update-distribution',
validate_mutually_exclusive_handler(
['default_root_object'], ['distribution_config']))
event_handler.register(
'building-argument-table.cloudfront.update-distribution',
lambda argument_table, **kwargs: argument_table.__setitem__(
'default-root-object', UpdateDefaultRootObject(
context=context, argument_table=argument_table)))
def unique_string(prefix='cli'):
return '%s-%s-%s' % (prefix, int(time.time()), random.randint(1, 1000000))
def _add_paths(argument_table, **kwargs):
argument_table['invalidation-batch'].required = False
argument_table['paths'] = PathsArgument()
class PathsArgument(CustomArgument):
def __init__(self):
doc = (
'The space-separated paths to be invalidated.'
' Note: --invalidation-batch and --paths are mututally exclusive.'
)
super(PathsArgument, self).__init__('paths', nargs='+', help_text=doc)
def add_to_params(self, parameters, value):
if value is not None:
parameters['InvalidationBatch'] = {
"CallerReference": unique_string(),
"Paths": {"Quantity": len(value), "Items": value},
}
class ExclusiveArgument(CustomArgument):
DOC = '%s This argument and --%s are mututally exclusive.'
def __init__(self, name, argument_table,
exclusive_to='distribution-config', help_text=''):
argument_table[exclusive_to].required = False
super(ExclusiveArgument, self).__init__(
name, help_text=self.DOC % (help_text, exclusive_to))
def distribution_config_template(self):
return {
"CallerReference": unique_string(),
"Origins": {"Quantity": 0, "Items": []},
"DefaultCacheBehavior": {
"TargetOriginId": "placeholder",
"ForwardedValues": {
"QueryString": False,
"Cookies": {"Forward": "none"},
},
"TrustedSigners": {
"Enabled": False,
"Quantity": 0
},
"ViewerProtocolPolicy": "allow-all",
"MinTTL": 0
},
"Enabled": True,
"Comment": "",
}
class OriginDomainName(ExclusiveArgument):
def __init__(self, argument_table):
super(OriginDomainName, self).__init__(
'origin-domain-name', argument_table,
help_text='The domain name for your origin.')
def add_to_params(self, parameters, value):
if value is None:
return
parameters.setdefault(
'DistributionConfig', self.distribution_config_template())
origin_id = unique_string(prefix=value)
item = {"Id": origin_id, "DomainName": value, "OriginPath": ''}
if item['DomainName'].endswith('.s3.amazonaws.com'):
# We do not need to detect '.s3[\w-].amazonaws.com' as S3 buckets,
# because CloudFront treats GovCloud S3 buckets as custom domain.
# http://docs.aws.amazon.com/govcloud-us/latest/UserGuide/setting-up-cloudfront.html
item["S3OriginConfig"] = {"OriginAccessIdentity": ""}
else:
item["CustomOriginConfig"] = {
'HTTPPort': 80, 'HTTPSPort': 443,
'OriginProtocolPolicy': 'http-only'}
parameters['DistributionConfig']['Origins'] = {
"Quantity": 1, "Items": [item]}
parameters['DistributionConfig']['DefaultCacheBehavior'][
'TargetOriginId'] = origin_id
class CreateDefaultRootObject(ExclusiveArgument):
def __init__(self, argument_table, help_text=''):
super(CreateDefaultRootObject, self).__init__(
'default-root-object', argument_table, help_text=help_text or (
'The object that you want CloudFront to return (for example, '
'index.html) when a viewer request points to your root URL.'))
def add_to_params(self, parameters, value):
if value is not None:
parameters.setdefault(
'DistributionConfig', self.distribution_config_template())
parameters['DistributionConfig']['DefaultRootObject'] = value
class UpdateDefaultRootObject(CreateDefaultRootObject):
def __init__(self, context, argument_table):
super(UpdateDefaultRootObject, self).__init__(
argument_table, help_text=(
'The object that you want CloudFront to return (for example, '
'index.html) when a viewer request points to your root URL. '
'CLI will automatically make a get-distribution-config call '
'to load and preserve your other settings.'))
self.context = context
def add_to_params(self, parameters, value):
if value is not None:
client = self.context['session'].create_client(
'cloudfront',
region_name=self.context['parsed_args'].region,
endpoint_url=self.context['parsed_args'].endpoint_url,
verify=self.context['parsed_args'].verify_ssl)
response = client.get_distribution_config(Id=parameters['Id'])
parameters['IfMatch'] = response['ETag']
parameters['DistributionConfig'] = response['DistributionConfig']
parameters['DistributionConfig']['DefaultRootObject'] = value
def _add_sign(command_table, session, **kwargs):
command_table['sign'] = SignCommand(session)
class SignCommand(BasicCommand):
NAME = 'sign'
DESCRIPTION = 'Sign a given url.'
DATE_FORMAT = """Supported formats include:
YYYY-MM-DD (which means 0AM UTC of that day),
YYYY-MM-DDThh:mm:ss (with default timezone as UTC),
YYYY-MM-DDThh:mm:ss+hh:mm or YYYY-MM-DDThh:mm:ss-hh:mm (with offset),
or EpochTime (which always means UTC).
Do NOT use YYYYMMDD, because it will be treated as EpochTime."""
ARG_TABLE = [
{
'name': 'url',
'no_paramfile': True, # To disable the default paramfile behavior
'required': True,
'help_text': 'The URL to be signed',
},
{
'name': 'key-pair-id',
'required': True,
'help_text': (
"The active CloudFront key pair Id for the key pair "
"that you're using to generate the signature."),
},
{
'name': 'private-key',
'required': True,
'help_text': 'file://path/to/your/private-key.pem',
},
{
'name': 'date-less-than', 'required': True,
'help_text':
'The expiration date and time for the URL. ' + DATE_FORMAT,
},
{
'name': 'date-greater-than',
'help_text':
'An optional start date and time for the URL. ' + DATE_FORMAT,
},
{
'name': 'ip-address',
'help_text': (
'An optional IP address or IP address range to allow client '
'making the GET request from. Format: x.x.x.x/x or x.x.x.x'),
},
]
def _run_main(self, args, parsed_globals):
signer = CloudFrontSigner(
args.key_pair_id, RSASigner(args.private_key).sign)
date_less_than = parse_to_aware_datetime(args.date_less_than)
date_greater_than = args.date_greater_than
if date_greater_than is not None:
date_greater_than = parse_to_aware_datetime(date_greater_than)
if date_greater_than is not None or args.ip_address is not None:
policy = signer.build_policy(
args.url, date_less_than, date_greater_than=date_greater_than,
ip_address=args.ip_address)
sys.stdout.write(signer.generate_presigned_url(
args.url, policy=policy))
else:
sys.stdout.write(signer.generate_presigned_url(
args.url, date_less_than=date_less_than))
return 0
class RSASigner(object):
def __init__(self, private_key):
self.priv_key = rsa.PrivateKey.load_pkcs1(private_key.encode('utf8'))
def sign(self, message):
return rsa.sign(message, self.priv_key, 'SHA-1')
| mit | -4,161,104,277,783,351,000 | 39.692308 | 96 | 0.608507 | false |
dellsystem/wikinotes | mdx/mdx_wiki_def_list.py | 1 | 3254 | #!/usr/bin/env Python
"""
Definition List Extension for Python-Markdown
=============================================
Added parsing of Definition Lists to Python-Markdown.
A simple example:
Apple
: Pomaceous fruit of plants of the genus Malus in
the family Rosaceae.
: An american computer company.
Orange
: The fruit of an evergreen tree of the genus Citrus.
Copyright 2008 - [Waylan Limberg](http://achinghead.com)
"""
import re
import markdown
from markdown.util import etree
class DefListProcessor(markdown.blockprocessors.BlockProcessor):
""" Process Definition Lists. """
RE = re.compile(r'(^|\n)[ ]{0,3}:[ ]{1,3}(.*?)(\n|$)')
def test(self, parent, block):
return bool(self.RE.search(block))
def run(self, parent, blocks):
block = blocks.pop(0)
m = self.RE.search(block)
terms = [l for l in block[:m.start()].split('\n') if l.strip()]
block = block[m.end():]
d, theRest = self.detab(block)
# Got rid of the noindent thing (basically set it to false always)
if d:
d = '%s\n%s' % (m.group(2), d)
else:
d = m.group(2)
sibling = self.lastChild(parent)
if not terms and sibling.tag == 'p':
# The previous paragraph contains the terms
state = 'looselist'
terms = sibling.text.split('\n')
parent.remove(sibling)
# Aquire new sibling
sibling = self.lastChild(parent)
else:
state = 'list'
if sibling and sibling.tag == 'dl':
# This is another item on an existing list
dl = sibling
if len(dl) and dl[-1].tag == 'dd' and len(dl[-1]):
state = 'looselist'
else:
# This is a new list
dl = etree.SubElement(parent, 'dl')
# Add terms
for term in terms:
dt = etree.SubElement(dl, 'dt')
dt.text = term
# Add definition
self.parser.state.set(state)
dd = etree.SubElement(dl, 'dd')
self.parser.parseBlocks(dd, [d])
self.parser.state.reset()
if theRest:
blocks.insert(0, theRest)
class DefListIndentProcessor(markdown.blockprocessors.ListIndentProcessor):
""" Process indented children of definition list items. """
ITEM_TYPES = ['dd']
LIST_TYPES = ['dl']
def create_item(self, parent, block):
""" Create a new dd and parse the block with it as the parent. """
dd = markdown.etree.SubElement(parent, 'dd')
self.parser.parseBlocks(dd, [block])
class DefListExtension(markdown.Extension):
""" Add definition lists to Markdown. """
def extendMarkdown(self, md, md_globals):
""" Add an instance of DefListProcessor to BlockParser. """
md.parser.blockprocessors.add('defindent',
DefListIndentProcessor(md.parser),
'>indent')
md.parser.blockprocessors.add('deflist',
DefListProcessor(md.parser),
'>ulist')
def makeExtension(configs={}):
return DefListExtension(configs=configs)
| gpl-3.0 | 4,532,069,618,653,678,600 | 30.288462 | 75 | 0.560541 | false |
harterj/moose | python/MooseDocs/extensions/datetime.py | 5 | 2202 | #* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os
import datetime
from ..base import components
from ..common import exceptions
from ..tree import tokens, html, latex
from . import command
def make_extension(**kwargs):
return DateTimeExtension(**kwargs)
DateTime = tokens.newToken('DateTime', datetime=None, format=None, inline=True)
class DateTimeExtension(command.CommandExtension):
"""
Adds ability to include date/time information.
"""
@staticmethod
def defaultConfig():
config = command.CommandExtension.defaultConfig()
return config
def extend(self, reader, renderer):
self.requires(command)
self.addCommand(reader, TodayCommand())
renderer.add('DateTime', RenderDateTime())
class TodayCommand(command.CommandComponent):
COMMAND = 'datetime'
SUBCOMMAND = 'today'
@staticmethod
def defaultSettings():
settings = command.CommandComponent.defaultSettings()
settings['format'] = ('%Y-%m-%d', "The date format (see python datetime).")
return settings
def createToken(self, parent, info, page):
content = info['inline'] if 'inline' in info else info['block']
if content:
raise exceptions.MooseDocsException("Content is not supported for the 'datetime today' command.")
DateTime(parent, datetime=datetime.date.today(),
inline='inline' in info,
format=self.settings['format'])
return parent
class RenderDateTime(components.RenderComponent):
def createHTML(self, parent, token, page):
html.Tag(parent, 'span' if token['inline'] else 'p',
class_='moose-datetime',
string=token['datetime'].strftime(token['format']))
return parent
def createLatex(self, parent, token, page):
latex.String(parent, content=token['datetime'].strftime(token['format']))
return parent
| lgpl-2.1 | 3,319,022,733,818,852,000 | 32.363636 | 109 | 0.673025 | false |
fireeye/flare-wmi | python-cim/tests/test_object_resolver.py | 1 | 13198 | import cim
import cim.objects
from fixtures import *
def test_object_resolver(repo):
"""
Args:
repo (cim.CIM): the deleted-instance repo
Returns:
None
"""
resolver = cim.objects.ObjectResolver(repo)
assert len(resolver.get_keys(cim.Key('NS_'))) == 47490
for key in resolver.get_keys(cim.Key('NS_')):
if not key.is_data_reference:
continue
o = resolver.get_object(key)
assert o is not None
def test_root_namespace(repo):
"""
Args:
repo (cim.CIM): the deleted-instance repo
Returns:
None
"""
with cim.objects.Namespace(repo, cim.objects.ROOT_NAMESPACE_NAME) as ns:
''':type: ns: cim.objects.TreeNamespace '''
assert ns.parent == None
# children namespaces
assert sorted(map(lambda n: n.name, ns.namespaces)) == ['__SystemClass',
'root\\CIMV2',
'root\\Cli',
'root\\DEFAULT',
'root\\Interop',
'root\\Microsoft',
'root\\PEH',
'root\\Policy',
'root\\RSOP',
'root\\SECURITY',
'root\\SecurityCenter',
'root\\SecurityCenter2',
'root\\ServiceModel',
'root\\ThinPrint',
'root\\WMI',
'root\\aspnet',
'root\\directory',
'root\\nap',
'root\\subscription', ]
# children classes
assert sorted(map(lambda n: n.name, ns.classes)) == ['CIM_ClassCreation',
'CIM_ClassDeletion',
'CIM_ClassIndication',
'CIM_ClassModification',
'CIM_Error',
'CIM_Indication',
'CIM_InstCreation',
'CIM_InstDeletion',
'CIM_InstIndication',
'CIM_InstModification',
'MSFT_ExtendedStatus',
'MSFT_WmiError',
'__ACE',
'__AbsoluteTimerInstruction',
'__AggregateEvent',
'__ArbitratorConfiguration',
'__CIMOMIdentification',
'__CacheControl',
'__ClassCreationEvent',
'__ClassDeletionEvent',
'__ClassModificationEvent',
'__ClassOperationEvent',
'__ClassProviderRegistration',
'__ConsumerFailureEvent',
'__Event',
'__EventConsumer',
'__EventConsumerProviderCacheControl',
'__EventConsumerProviderRegistration',
'__EventDroppedEvent',
'__EventFilter',
'__EventGenerator',
'__EventProviderCacheControl',
'__EventProviderRegistration',
'__EventQueueOverflowEvent',
'__EventSinkCacheControl',
'__ExtendedStatus',
'__ExtrinsicEvent',
'__FilterToConsumerBinding',
'__IndicationRelated',
'__InstanceCreationEvent',
'__InstanceDeletionEvent',
'__InstanceModificationEvent',
'__InstanceOperationEvent',
'__InstanceProviderRegistration',
'__IntervalTimerInstruction',
'__ListOfEventActiveNamespaces',
'__MethodInvocationEvent',
'__MethodProviderRegistration',
'__NAMESPACE',
'__NTLMUser9X',
'__NamespaceCreationEvent',
'__NamespaceDeletionEvent',
'__NamespaceModificationEvent',
'__NamespaceOperationEvent',
'__NotifyStatus',
'__ObjectProviderCacheControl',
'__ObjectProviderRegistration',
'__PARAMETERS',
'__PropertyProviderCacheControl',
'__PropertyProviderRegistration',
'__Provider',
'__ProviderHostQuotaConfiguration',
'__ProviderRegistration',
'__QOSFailureEvent',
'__SecurityDescriptor',
'__SecurityRelatedClass',
'__SystemClass',
'__SystemEvent',
'__SystemSecurity',
'__TimerEvent',
'__TimerInstruction',
'__TimerNextFiring',
'__Trustee',
'__Win32Provider',
'__thisNAMESPACE']
def test_object_count(root):
"""
enumerate all the objects in the repository.
Args:
root (cim.objects.TreeNamespace): the root namespace
Returns:
None
"""
namespaces = []
classes = []
instances = []
def collect(ns):
for namespace in ns.namespaces:
namespaces.append(namespace)
for klass in ns.classes:
classes.append(klass)
for instance in klass.instances:
instances.append(instance)
for namespace in ns.namespaces:
collect(namespace)
collect(root)
# collected empirically
assert len(namespaces) == 55
assert len(classes) == 8162
assert len(instances) == 1887
def test_class_definitions(classes):
"""
parse all qualifiers and properties from all class definitions in the repository.
demonstrates there's no critical errors encountered while enumerating classes.
Args:
classes (List[cim.objects.TreeClassDefinition]): the list of classes found in the win7/deleted-instance repo.
Returns:
None
"""
qualifiers = []
properties = []
propqualifiers = []
for klass in classes:
definition = klass.cd
# these are the qualifiers that apply to the class itself
for qualname, qualval in definition.qualifiers.items():
qualifiers.append((klass.ns, klass.name, qualname, qualval))
# these are the properties defined on the class
for propname, propref in definition.properties.items():
properties.append((klass.ns, klass.name, propname))
# these are the qualifiers that apply to the property on the class
for qualname, qualval in propref.qualifiers.items():
propqualifiers.append((klass.ns, klass.name, propname, qualname, qualval))
# collected empirically
assert len(qualifiers) == 17650
assert len(properties) == 27431
assert len(propqualifiers) == 66948
def test_class_layouts(classes):
"""
parse all class layouts from all class definitions in the repository.
demonstrates there's no critical errors encountered while enumerating classes.
Args:
classes (List[cim.objects.TreeClassDefinition]): the list of classes found in the win7/deleted-instance repo.
Returns:
None
"""
derivations = []
properties = []
for klass in classes:
layout = klass.cl
derivations.append((klass.ns, klass.name, layout.derivation))
for propname, propval in layout.properties.items():
if propval.has_default_value:
properties.append((klass.ns, klass.name, propname, propval.default_value))
else:
properties.append((klass.ns, klass.name, propname, None))
# collected empirically
assert len(derivations) == 8162
assert len(properties) == 53867
def test_class_instances(classes):
"""
parse all class instances from all class definitions in the repository.
demonstrates there's no critical errors encountered while enumerating classes.
Args:
classes (List[cim.objects.TreeClassDefinition]): the list of classes found in the win7/deleted-instance repo.
Returns:
None
"""
qualifiers = []
properties = []
propqualifiers = []
for klass in classes:
for instance in klass.instances:
# these are the qualifiers that apply to the instance itself
for qualname, qualval in instance.qualifiers.items():
qualifiers.append((klass.ns, klass.name, instance.key, qualname, qualval))
# these are the properties defined on the instance
for propname, propref in instance.properties.items():
if propref.is_initialized:
properties.append((klass.ns, klass.name, instance.key, propname, propref.value))
else:
properties.append((klass.ns, klass.name, instance.key, propname, None))
# these are the qualifiers that apply to the property on the instance
for qualname, qualval in propref.qualifiers.items():
propqualifiers.append((klass.ns, klass.name, propname, qualname, qualval))
# collected empirically
assert len(qualifiers) == 12
assert len(properties) == 8237
assert len(propqualifiers) == 20117
| apache-2.0 | 1,072,370,550,895,224,200 | 47.701107 | 117 | 0.37877 | false |
mshuffett/MetaPyMusic | retry.py | 1 | 1637 | import time
import math
import logging
# Retry decorator with exponential backoff
def retry(tries, delay=3, backoff=2, test_f=lambda x: bool(x)):
'''Retries a function or method until function test_f on its return returns True.
test_f initially returns true when the functions return value is truthy
delay sets the initial delay in seconds, and backoff sets the factor by which
the delay should lengthen after each failure. backoff must be greater than 1,
or else it isn't really a backoff. tries must be at least 0, and delay
greater than 0.'''
if backoff <= 1:
raise ValueError("backoff must be greater than 1")
tries = math.floor(tries)
if tries < 0:
raise ValueError("tries must be 0 or greater")
if delay <= 0:
raise ValueError("delay must be greater than 0")
def deco_retry(f):
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay # make mutable
rv = f(*args, **kwargs) # first attempt
logging.info('Try 1 complete')
while mtries > 0:
if test_f(rv) is True: # Done on success
return rv
mtries -= 1 # consume an attempt
time.sleep(mdelay) # wait...
mdelay *= backoff # make future wait longer
rv = f(*args, **kwargs) # Try again
logging.info('Try %d complete' % (tries - mtries + 1))
return rv # Ran out of tries :-(
return f_retry # true decorator -> decorated function
return deco_retry # @retry(arg[, ...]) -> true decorator
| gpl-2.0 | -8,846,816,822,790,871,000 | 33.851064 | 85 | 0.597434 | false |
ikresoft/django-sitetree | sitetree/fields.py | 1 | 1970 | from django import template
from django.forms import ChoiceField
from django.utils.safestring import mark_safe
from .templatetags.sitetree import sitetree_tree
from .utils import get_tree_model, get_tree_item_model
MODEL_TREE_CLASS = get_tree_model()
MODEL_TREE_ITEM_CLASS = get_tree_item_model()
class TreeItemChoiceField(ChoiceField):
"""Generic sitetree item field.
Customized ChoiceField with TreeItems of a certain tree.
Accepts the `tree` kwarg - tree model or alias.
Use `initial` kwarg to set initial sitetree item by its ID.
"""
template = 'admin/sitetree/tree/tree_combo.html'
root_title = '---------'
def __init__(self, tree, required=True, widget=None, label=None, initial=None, help_text=None, *args, **kwargs):
super(TreeItemChoiceField, self).__init__(required=required, widget=widget, label=label, initial=initial,
help_text=help_text, *args, **kwargs)
if isinstance(tree, MODEL_TREE_CLASS):
tree = tree.alias
self.tree = tree
self.choices = self._build_choices()
def _build_choices(self):
"""Build choices list runtime using 'sitetree_tree' tag"""
tree_token = u'sitetree_tree from "%s" template "%s"' % (self.tree, self.template)
choices_str = sitetree_tree(template.Parser(None),
template.Token(token_type=template.TOKEN_BLOCK,
contents=tree_token)).render(template.Context(current_app='admin'))
tree_choices = [('', self.root_title)]
for line in choices_str.splitlines():
if line.strip():
splitted = line.split(':::')
tree_choices.append((splitted[0], mark_safe(splitted[1])))
return tree_choices
def clean(self, value):
if not value:
return None
return MODEL_TREE_ITEM_CLASS.objects.get(pk=value)
| bsd-3-clause | -623,359,590,279,110,500 | 39.204082 | 118 | 0.617259 | false |
chiralhat/mnm-python | cr7mnsim/dimerfuncs.py | 1 | 5390 | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 26 13:54:24 2016
These functions set up the Hamiltonians for various ways of dealing with Cr7Mn
dimers, including the full Spin-1 treatment, the truncated Spin-1/2 treatment,
and the Spin-1/2 rotating frame treatment.
@author: ccollett
"""
import qutip as qt
import numpy as np
import scipy.constants as sc
ubG=sc.physical_constants['Bohr magneton in Hz/T'][0]/1e9/1e4
def spin_system(S):
sx, sy, sz = [x/S for x in qt.jmat(S)]
si = qt.qeye(2*S + 1)
return (sx, sy, sz, si)
def two_spin_system(S):
sx,sy,sz,si=spin_system(S)
sx1,sy1,sz1=[qt.tensor(s,si) for s in [sx,sy,sz]]
sx2,sy2,sz2=[qt.tensor(si,s) for s in [sx,sy,sz]]
return sx1,sy1,sz1,sx2,sy2,sz2
def rotating_states(t, E, S=1/2):
sx, sy, sz, si = spin_system(S)
c = np.cos(E * t / 2)
s = np.sin(E * t / 2)
cs = 2 * c * s
c2s2 = c**2 - s**2
syp = cs*sz + c2s2*sy
szp = c2s2*sz + cs*sy
return [sx, syp, szp, si]
def spin_rotators(operators, theta=np.pi/4, S=1/2):
sx, sy, sz, si = operators
if S == 1/2:
Rx = np.cos(theta)*si - 1j*np.sin(theta)*sx
Ry = np.cos(theta)*si - 1j*np.sin(theta)*sy
Rz = np.cos(theta)*si - 1j*np.sin(theta)*sz
else:
Rx = si + 2j*np.sin(theta)*np.cos(theta)*sx + 1/2*(2j*np.sin(theta)*sx)**2
Ry = si + 2j*np.sin(theta)*np.cos(theta)*sx + 1/2*(2j*np.sin(theta)*sy)**2
Rz = si + 2j*np.sin(theta)*np.cos(theta)*sx + 1/2*(2j*np.sin(theta)*sz)**2
return Rx, Ry, Rz
def two_spin_rotators(E1, E2, theta=np.pi / 4, t=0, S=1/2):
all_operators = [rotating_states(t, E, S) for E in [E1, E2]]
Rs = [spin_rotators(operators, theta) for operators in all_operators]
si = qt.qeye(2*S + 1)
R1s = [qt.tensor(R,si) for R in Rs[0]]
R2s = [qt.tensor(si,R) for R in Rs[1]]
return R1s + R2s
def cr_h_shalf(E1,E2,J,Jp):
sx1,sy1,sz1,sx2,sy2,sz2=two_spin_system(1/2)
return E1*sx1 + E2*sx2 + J*sz1*sz2+Jp*(sx1*sx2+sy1*sy2)
def cr_h_s1(D1,D2,E1,E2,J,Jp):
sx1,sy1,sz1,sx2,sy2,sz2=two_spin_system(1)
return D1*sz1**2+D2*sz2**2 + E1*(sx1**2-sy1**2) + E2*(sx2**2-sy2**2)+J*sz1*sz2+Jp*(sx1*sx2+sy1*sy2)
def cr_h_rot(E1,E2,J,Jp):
sx1,sy1,sz1,sx2,sy2,sz2=two_spin_system(1/2)
U1=(1j*E1*sx1).expm()
U2=(1j*E2*sx2).expm()
y1,z1=[U1.dag()*o*U1 for o in [sy1,sz1]]
y2,z2=[U2.dag()*o*U2 for o in [sy2,sz2]]
return J*z1*z2+Jp*(sx1*sx2+y1*y2)
#Define single qubit Hamiltonian
def cr_ham_single(h,D=24.2,E=1.95,g=1.96,theta=0):
hscale=ubG*g
sx,sy,sz,_=spin_system(1)
return -D*sz**2+E*(sx**2-sy**2)+h*hscale*(np.cos(theta*np.pi/180)*sz+np.sin(theta*np.pi/180)*sx)
def cr_ham_single_shalf(h,E=1.95,g=1.96,theta=0):
hscale=ubG*g
sx,sy,sz, _=spin_system(1/2)
return E*(sx**2-sy**2)+h*hscale*(np.cos(theta*np.pi/180)*sz+np.sin(theta*np.pi/180)*sx)
#Quantum control functions: these go into the setup of the time-dep hamiltonian
def j_evolve(theta,Ham,J,tst=0,npts=500):
tend=tst+theta/2/J
def H1c(t,args):
return 0
H1=[Ham,H1c]
return H1,tend
def e_evolve(fHam,E1,E2,tau,tst=0,npts=500):
tend=tst+tau
def H1c(t,args):
if t>=tst and t<tend:
return 1
else:
return 0
H1=[fHam,H1c]
return H1,tend
def r2_spin(axis,ops,theta,qubit,args,Ham,tst=0,nrot=13):
sp=qubit-1
sx1,sy1,sz1,sx2,sy2,sz2=ops
w0=2*np.array(args['Es'])
if axis=='z':
phi=0
else:
phi=np.pi/2
tend=tst+nrot*theta/args['w1']/2
def H1coeff(t,args,fun,spt):
if t>=tst and t<tend:
return args['w1']*fun((w0[sp]-w0[spt])*t+phi)
else:
return 0
def H1z_coeff(t,args):
return H1coeff(t,args,np.cos,0)
def H2z_coeff(t,args):
return H1coeff(t,args,np.cos,1)
def H1y_coeff(t,args):
return H1coeff(t,args,np.sin,0)
def H2y_coeff(t,args):
return H1coeff(t,args,np.sin,1)
# tlist=[tend]
H1s=[[sz1,H1z_coeff],[sy1,H1y_coeff],[sz2,H2z_coeff],[sy2,H2y_coeff]]
return H1s,tend
# This formalism comes from Vandersypen and Chuang, RMP 76, 1037
def r2_spin_rot(axis,ops,theta,qubit,args,Ham,tst=0,nrot=13):
sp=qubit-1
sx1,sy1,sz1,sx2,sy2,sz2=ops
w0=2*np.array(args['Es'])
if axis=='z':
phi=0
else:
phi=np.pi/2
tend=tst+nrot*theta/args['w1']/2
def H1coeff(t,args,fun,spt):
if t>=tst and t<tend:
return args['w1']*fun((w0[sp]-w0[spt])*t+phi)
else:
return 0
def H1z_coeff(t,args):
return H1coeff(t,args,np.cos,0)
def H2z_coeff(t,args):
return H1coeff(t,args,np.cos,1)
def H1y_coeff(t,args):
return H1coeff(t,args,np.sin,0)
def H2y_coeff(t,args):
return H1coeff(t,args,np.sin,1)
# tlist=[tend]
H1s=[[sz1,H1z_coeff],[sy1,H1y_coeff],[sz2,H2z_coeff],[sy2,H2y_coeff]]
return H1s,tend
def spin_echo(axis,spin,args,Ham,fullHam,E1,E2,tau,tst=0,nrot=13):
H1,t1=r2_spin_rot('z',np.pi/2,1,args,Ham,tst=tst)
H2,t2=e_evolve(fullHam,E1,E2,tau,tst=t1)
H3,t3=r2_spin_rot('z',np.pi,1,args,Ham,tst=t2)
H4,t4=e_evolve(fullHam,E1,E2,tau,tst=t3)
# H1s=[*H1,H2,*H3,H4]
H1s=H1+[H2]+H3+[H4]
return H1s,t4
| bsd-3-clause | -4,823,955,798,372,183,000 | 30.865854 | 103 | 0.576438 | false |
proversity-org/problem-builder | problem_builder/south_migrations/0002_copy_from_mentoring.py | 1 | 2213 | # -*- coding: utf-8 -*-
from django.db.utils import DatabaseError
from south.db import db
from south.v2 import DataMigration
class Migration(DataMigration):
def forwards(self, orm):
"""
Copy student data from old table to the new one.
Problem Builder stores student answers in 'problem_builder_answer'.
However earlier versions [may have] used 'mentoring_answer'.
If a 'mentoring' app is currently installed on this instance, copy the student data over
to the new table in case it is being used.
"""
try:
db.execute(
'INSERT INTO problem_builder_answer ('
'name, student_id, course_id, student_input, created_on, modified_on '
') SELECT '
'name, student_id, course_id, student_input, created_on, modified_on '
'FROM mentoring_answer'
)
except DatabaseError: # Would like to just catch 'Table does not exist' but can't do that in a db-agnostic way
print(" - Seems like mentoring_answer does not exist. No data migration needed.")
def backwards(self, orm):
raise RuntimeError("Cannot safely reverse this migration.")
models = {
'problem_builder.answer': {
'Meta': {'unique_together': "(('student_id', 'course_id', 'name'),)", 'object_name': 'Answer'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'student_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'student_input': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'})
}
}
complete_apps = ['problem_builder']
symmetrical = True
| agpl-3.0 | 4,631,452,300,160,067,000 | 47.108696 | 119 | 0.586082 | false |
amruth27m/Speedway-R420-Alarm-Python | dataBaseHandler.py | 1 | 1438 | #!/usr/bin/env python3
#Program to log the gate events to database
import pymysql
from encryptor import Encryptor
class DatabaseHandler:
def __init__(self):
self._data = Encryptor.getData(Encryptor())
def connect(self):
try:
self._conn = pymysql.Connect(self._data['databaseHost'],self._data['databaseUserName'],self._data['databasePassword'],self._data['databaseDatabase'])
print('MySQL connection successfully established')
return True
except pymysql.Error as e:
print(str(e))
return False
def disconnect(self):
try:
self._conn.close()
return True
except pymysql.Error as e:
print(str(e))
return False
except AttributeError:
print('Connection not established')
return False
def insertGatelog(self,book_id,book_name,alaram_time):
self.connect()
query = '''INSERT INTO gatelog (book_id,book_name,alaram_time) values('''+'\''+str(book_id)+'\''+',\''+str(book_name)+'\',\''+str(alaram_time)+'\');'
print(query)
cursor = self._conn.cursor()
try:
cursor.execute(query)
self._conn.commit()
print('Data successfully written')
except:
self._conn.rollback()
print('Failed to write data')
finally:
self.disconnect()
| gpl-3.0 | 7,264,559,808,139,672,000 | 31.681818 | 161 | 0.572323 | false |
CurrencyCloud/currencycloud-python | tests/integration/test_authentication.py | 1 | 2080 | from betamax import Betamax
from currencycloud import Client, Config
class TestAuthentication:
def setup_method(self, method):
# TODO: To run against real server please delete ../fixtures/vcr_cassettes/* and replace
# login_id and api_key with valid credentials before running the tests
login_id = '[email protected]'
api_key = 'deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef'
environment = Config.ENV_DEMO
self.client = Client(login_id, api_key, environment)
def test_authentication_happens_lazily(self):
with Betamax(self.client.config.session) as betamax:
betamax.use_cassette('authentication/happens_lazily')
assert self.client.config._auth_token is None
assert self.client.config.auth_token is not None
def test_authentication_can_reuse_an_auth_token(self):
special_client = Client(None, None, Config.ENV_DEMO)
special_client.config.auth_token = "deadbeefdeadbeefdeadbeefdeadbeef"
with Betamax(special_client.config.session) as betamax:
betamax.use_cassette('authentication/can_use_just_a_token')
response = special_client.beneficiaries.find()
assert response is not None
def test_authentication_can_be_closed(self):
with Betamax(self.client.config.session) as betamax:
betamax.use_cassette('authentication/can_be_closed')
assert self.client.config.auth_token is not None
assert self.client.close_session() is True
assert self.client.config._auth_token is None
def test_authentication_handles_session_timeout(self):
# Set the token to an invalid one
self.client.config.auth_token = 'deadbeefdeadbeefdeadbeefdeadbeef'
with Betamax(self.client.config.session) as betamax:
betamax.use_cassette('authentication/handles_session_timeout', match_requests_on=['uri', 'method'])
response = self.client.beneficiaries.find()
assert response is not None
| mit | 5,715,076,033,479,736,000 | 40.6 | 111 | 0.692308 | false |
HaebinShin/tensorflow | tensorflow/contrib/learn/python/learn/tests/monitors_test.py | 1 | 8758 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Monitors tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.contrib import testing
from tensorflow.contrib.learn.python import learn
from tensorflow.python.platform import tf_logging as logging
class _MyEveryN(learn.monitors.EveryN):
def __init__(self, every_n_steps=100, first_n_steps=1):
super(_MyEveryN, self).__init__(
every_n_steps=every_n_steps, first_n_steps=first_n_steps)
self._steps_begun = []
self._steps_ended = []
self._post_steps = []
@property
def steps_begun(self):
return self._steps_begun
@property
def steps_ended(self):
return self._steps_ended
@property
def post_steps(self):
return self._post_steps
def every_n_step_begin(self, step):
super(_MyEveryN, self).every_n_step_begin(step)
self._steps_begun.append(step)
return []
def every_n_step_end(self, step, outputs):
super(_MyEveryN, self).every_n_step_end(step, outputs)
self._steps_ended.append(step)
return False
def every_n_post_step(self, step, session):
super(_MyEveryN, self).every_n_post_step(step, session)
self._post_steps.append(step)
return False
class MonitorsTest(tf.test.TestCase):
"""Monitors tests."""
def setUp(self):
# Mock out logging calls so we can verify whether correct tensors are being
# monitored.
self._actual_log = logging.info
def mockLog(*args, **kwargs):
self.logged_message = args
self._actual_log(*args, **kwargs)
logging.info = mockLog
def tearDown(self):
logging.info = self._actual_log
def _run_monitor(self, monitor, num_epochs=3, num_steps_per_epoch=10,
pass_max_steps=True):
if pass_max_steps:
max_steps = num_epochs * num_steps_per_epoch - 1
else:
max_steps = None
monitor.begin(max_steps=max_steps, init_step=0)
for epoch in xrange(num_epochs):
monitor.epoch_begin(epoch)
should_stop = False
step = epoch * num_steps_per_epoch
next_epoch_step = step + num_steps_per_epoch
while (not should_stop) and (step < next_epoch_step):
tensors = monitor.step_begin(step)
output = tf.get_default_session().run(tensors) if tensors else {}
output = dict(zip(
[t.name if isinstance(t, tf.Tensor) else t for t in tensors],
output))
should_stop = monitor.step_end(step=step, output=output)
monitor.post_step(step=step, session=None)
step += 1
monitor.epoch_end(epoch)
monitor.end()
def test_base_monitor(self):
with tf.Graph().as_default() as g, self.test_session(g):
self._run_monitor(learn.monitors.BaseMonitor())
def test_every_n(self):
monitor = _MyEveryN(every_n_steps=8, first_n_steps=2)
with tf.Graph().as_default() as g, self.test_session(g):
self._run_monitor(monitor, num_epochs=3, num_steps_per_epoch=10)
expected_steps = [0, 1, 2, 10, 18, 26, 29]
self.assertEqual(expected_steps, monitor.steps_begun)
self.assertEqual(expected_steps, monitor.steps_ended)
self.assertEqual(expected_steps, monitor.post_steps)
def test_every_n_no_max_steps(self):
monitor = _MyEveryN(every_n_steps=8, first_n_steps=2)
with tf.Graph().as_default() as g, self.test_session(g):
self._run_monitor(monitor, num_epochs=3, num_steps_per_epoch=10,
pass_max_steps=False)
begin_end_steps = [0, 1, 2, 10, 18, 26]
post_steps = [0, 1, 2, 10, 18, 26, 29]
self.assertEqual(begin_end_steps, monitor.steps_begun)
self.assertEqual(begin_end_steps, monitor.steps_ended)
self.assertEqual(post_steps, monitor.post_steps)
def test_print(self):
with tf.Graph().as_default() as g, self.test_session(g):
t = tf.constant(42.0, name='foo')
self._run_monitor(learn.monitors.PrintTensor(tensor_names=[t.name]))
self.assertRegexpMatches(str(self.logged_message), t.name)
def test_logging_trainable(self):
with tf.Graph().as_default() as g, self.test_session(g):
var = tf.Variable(tf.constant(42.0), name='foo')
var.initializer.run()
cof = tf.constant(1.0)
loss = tf.sub(tf.mul(var, cof), tf.constant(1.0))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
tf.get_default_session().run(train_step)
self._run_monitor(learn.monitors.LoggingTrainable('foo'))
self.assertRegexpMatches(str(self.logged_message), var.name)
def test_summary_saver(self):
with tf.Graph().as_default() as g, self.test_session(g):
log_dir = 'log/dir'
summary_writer = testing.FakeSummaryWriter(log_dir, g)
var = tf.Variable(0.0)
var.initializer.run()
tensor = tf.assign_add(var, 1.0)
summary_op = tf.scalar_summary('my_summary', tensor)
self._run_monitor(
learn.monitors.SummarySaver(
summary_op=summary_op, save_steps=8,
summary_writer=summary_writer),
num_epochs=3, num_steps_per_epoch=10)
summary_writer.assert_summaries(
test_case=self, expected_logdir=log_dir, expected_graph=g,
expected_summaries={
0: {'my_summary': 1.0},
1: {'my_summary': 2.0},
9: {'my_summary': 3.0},
17: {'my_summary': 4.0},
25: {'my_summary': 5.0},
29: {'my_summary': 6.0},
})
# TODO(b/29293803): Add better tests with a mocked estimator.
def test_validation_monitor(self):
monitor = learn.monitors.ValidationMonitor(x=tf.constant(2.0))
with tf.Graph().as_default() as g, self.test_session(g):
with self.assertRaisesRegexp(ValueError, 'set_estimator'):
self._run_monitor(monitor)
def test_graph_dump(self):
monitor0 = learn.monitors.GraphDump()
monitor1 = learn.monitors.GraphDump()
with tf.Graph().as_default() as g, self.test_session(g):
const_var = tf.Variable(42.0, name='my_const')
counter_var = tf.Variable(0.0, name='my_counter')
assign_add = tf.assign_add(counter_var, 1.0, name='my_assign_add')
tf.initialize_all_variables().run()
self._run_monitor(monitor0, num_epochs=3, num_steps_per_epoch=10)
self.assertEqual({
step: {
const_var.name: 42.0,
counter_var.name: step + 1.0,
assign_add.name: step + 1.0,
} for step in xrange(30)
}, monitor0.data)
self._run_monitor(monitor1, num_epochs=3, num_steps_per_epoch=10)
self.assertEqual({
step: {
const_var.name: 42.0,
counter_var.name: step + 31.0,
assign_add.name: step + 31.0,
} for step in xrange(30)
}, monitor1.data)
for step in xrange(30):
matched, non_matched = monitor1.compare(monitor0, step=step)
self.assertEqual([const_var.name], matched)
self.assertEqual({
assign_add.name: (step + 31.0, step + 1.0),
counter_var.name: (step + 31.0, step + 1.0),
}, non_matched)
matched, non_matched = monitor0.compare(monitor1, step=step)
self.assertEqual([const_var.name], matched)
self.assertEqual({
assign_add.name: (step + 1.0, step + 31.0),
counter_var.name: (step + 1.0, step + 31.0),
}, non_matched)
def test_capture_variable(self):
monitor = learn.monitors.CaptureVariable(
var_name='my_assign_add:0', every_n=8, first_n=2)
with tf.Graph().as_default() as g, self.test_session(g):
var = tf.Variable(0.0, name='my_var')
var.initializer.run()
tf.assign_add(var, 1.0, name='my_assign_add')
self._run_monitor(monitor, num_epochs=3, num_steps_per_epoch=10)
self.assertEqual({
0: 1.0,
1: 2.0,
2: 3.0,
10: 4.0,
18: 5.0,
26: 6.0,
29: 7.0,
}, monitor.values)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 | 1,881,653,203,717,929,500 | 35.340249 | 80 | 0.622859 | false |
AlexMog/IRCPokemonBot | irclib.py | 1 | 48783 | # Copyright (C) 1999--2002 Joel Rosdahl
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# keltus <[email protected]>
#
# $Id: irclib.py,v 1.47 2008/09/25 22:00:59 keltus Exp $
"""irclib -- Internet Relay Chat (IRC) protocol client library.
This library is intended to encapsulate the IRC protocol at a quite
low level. It provides an event-driven IRC client framework. It has
a fairly thorough support for the basic IRC protocol, CTCP, DCC chat,
but DCC file transfers is not yet supported.
In order to understand how to make an IRC client, I'm afraid you more
or less must understand the IRC specifications. They are available
here: [IRC specifications].
The main features of the IRC client framework are:
* Abstraction of the IRC protocol.
* Handles multiple simultaneous IRC server connections.
* Handles server PONGing transparently.
* Messages to the IRC server are done by calling methods on an IRC
connection object.
* Messages from an IRC server triggers events, which can be caught
by event handlers.
* Reading from and writing to IRC server sockets are normally done
by an internal select() loop, but the select()ing may be done by
an external main loop.
* Functions can be registered to execute at specified times by the
event-loop.
* Decodes CTCP tagging correctly (hopefully); I haven't seen any
other IRC client implementation that handles the CTCP
specification subtilties.
* A kind of simple, single-server, object-oriented IRC client class
that dispatches events to instance methods is included.
Current limitations:
* The IRC protocol shines through the abstraction a bit too much.
* Data is not written asynchronously to the server, i.e. the write()
may block if the TCP buffers are stuffed.
* There are no support for DCC file transfers.
* The author haven't even read RFC 2810, 2811, 2812 and 2813.
* Like most projects, documentation is lacking...
.. [IRC specifications] http://www.irchelp.org/irchelp/rfc/
"""
import bisect
import re
import select
import socket
import string
import sys
import time
import types
VERSION = 0, 4, 8
DEBUG = 0
MOG_DEBUG = 1
# TODO
# ----
# (maybe) thread safety
# (maybe) color parser convenience functions
# documentation (including all event types)
# (maybe) add awareness of different types of ircds
# send data asynchronously to the server (and DCC connections)
# (maybe) automatically close unused, passive DCC connections after a while
# NOTES
# -----
# connection.quit() only sends QUIT to the server.
# ERROR from the server triggers the error event and the disconnect event.
# dropping of the connection triggers the disconnect event.
class IRCError(Exception):
"""Represents an IRC exception."""
pass
class IRC:
"""Class that handles one or several IRC server connections.
When an IRC object has been instantiated, it can be used to create
Connection objects that represent the IRC connections. The
responsibility of the IRC object is to provide an event-driven
framework for the connections and to keep the connections alive.
It runs a select loop to poll each connection's TCP socket and
hands over the sockets with incoming data for processing by the
corresponding connection.
The methods of most interest for an IRC client writer are server,
add_global_handler, remove_global_handler, execute_at,
execute_delayed, process_once and process_forever.
Here is an example:
irc = irclib.IRC()
server = irc.server()
server.connect(\"irc.some.where\", 6667, \"my_nickname\")
server.privmsg(\"a_nickname\", \"Hi there!\")
irc.process_forever()
This will connect to the IRC server irc.some.where on port 6667
using the nickname my_nickname and send the message \"Hi there!\"
to the nickname a_nickname.
"""
def __init__(self, fn_to_add_socket=None,
fn_to_remove_socket=None,
fn_to_add_timeout=None):
"""Constructor for IRC objects.
Optional arguments are fn_to_add_socket, fn_to_remove_socket
and fn_to_add_timeout. The first two specify functions that
will be called with a socket object as argument when the IRC
object wants to be notified (or stop being notified) of data
coming on a new socket. When new data arrives, the method
process_data should be called. Similarly, fn_to_add_timeout
is called with a number of seconds (a floating point number)
as first argument when the IRC object wants to receive a
notification (by calling the process_timeout method). So, if
e.g. the argument is 42.17, the object wants the
process_timeout method to be called after 42 seconds and 170
milliseconds.
The three arguments mainly exist to be able to use an external
main loop (for example Tkinter's or PyGTK's main app loop)
instead of calling the process_forever method.
An alternative is to just call ServerConnection.process_once()
once in a while.
"""
if fn_to_add_socket and fn_to_remove_socket:
self.fn_to_add_socket = fn_to_add_socket
self.fn_to_remove_socket = fn_to_remove_socket
else:
self.fn_to_add_socket = None
self.fn_to_remove_socket = None
self.fn_to_add_timeout = fn_to_add_timeout
self.connections = []
self.handlers = {}
self.delayed_commands = [] # list of tuples in the format (time, function, arguments)
self.add_global_handler("ping", _ping_ponger, -42)
def server(self):
"""Creates and returns a ServerConnection object."""
c = ServerConnection(self)
self.connections.append(c)
return c
def process_data(self, sockets):
"""Called when there is more data to read on connection sockets.
Arguments:
sockets -- A list of socket objects.
See documentation for IRC.__init__.
"""
for s in sockets:
for c in self.connections:
if s == c._get_socket():
c.process_data()
def process_timeout(self):
"""Called when a timeout notification is due.
See documentation for IRC.__init__.
"""
t = time.time()
while self.delayed_commands:
if t >= self.delayed_commands[0][0]:
self.delayed_commands[0][1](*self.delayed_commands[0][2])
del self.delayed_commands[0]
else:
break
def process_once(self, timeout=0):
"""Process data from connections once.
Arguments:
timeout -- How long the select() call should wait if no
data is available.
This method should be called periodically to check and process
incoming data, if there are any. If that seems boring, look
at the process_forever method.
"""
sockets = map(lambda x: x._get_socket(), self.connections)
sockets = filter(lambda x: x != None, sockets)
if sockets:
(i, o, e) = select.select(sockets, [], [], timeout)
self.process_data(i)
else:
time.sleep(timeout)
self.process_timeout()
def process_forever(self, timeout=0.2):
"""Run an infinite loop, processing data from connections.
This method repeatedly calls process_once.
Arguments:
timeout -- Parameter to pass to process_once.
"""
while 1:
self.process_once(timeout)
def disconnect_all(self, message=""):
"""Disconnects all connections."""
for c in self.connections:
c.disconnect(message)
def add_global_handler(self, event, handler, priority=0):
"""Adds a global handler function for a specific event type.
Arguments:
event -- Event type (a string). Check the values of the
numeric_events dictionary in irclib.py for possible event
types.
handler -- Callback function.
priority -- A number (the lower number, the higher priority).
The handler function is called whenever the specified event is
triggered in any of the connections. See documentation for
the Event class.
The handler functions are called in priority order (lowest
number is highest priority). If a handler function returns
\"NO MORE\", no more handlers will be called.
"""
if not event in self.handlers:
self.handlers[event] = []
bisect.insort(self.handlers[event], ((priority, handler)))
def remove_global_handler(self, event, handler):
"""Removes a global handler function.
Arguments:
event -- Event type (a string).
handler -- Callback function.
Returns 1 on success, otherwise 0.
"""
if not event in self.handlers:
return 0
for h in self.handlers[event]:
if handler == h[1]:
self.handlers[event].remove(h)
return 1
def execute_at(self, at, function, arguments=()):
"""Execute a function at a specified time.
Arguments:
at -- Execute at this time (standard \"time_t\" time).
function -- Function to call.
arguments -- Arguments to give the function.
"""
self.execute_delayed(at-time.time(), function, arguments)
def execute_delayed(self, delay, function, arguments=()):
"""Execute a function after a specified time.
Arguments:
delay -- How many seconds to wait.
function -- Function to call.
arguments -- Arguments to give the function.
"""
bisect.insort(self.delayed_commands, (delay+time.time(), function, arguments))
if self.fn_to_add_timeout:
self.fn_to_add_timeout(delay)
def dcc(self, dcctype="chat"):
"""Creates and returns a DCCConnection object.
Arguments:
dcctype -- "chat" for DCC CHAT connections or "raw" for
DCC SEND (or other DCC types). If "chat",
incoming data will be split in newline-separated
chunks. If "raw", incoming data is not touched.
"""
c = DCCConnection(self, dcctype)
self.connections.append(c)
return c
def _handle_event(self, connection, event):
"""[Internal]"""
h = self.handlers
for handler in h.get("all_events", []) + h.get(event.eventtype(), []):
if handler[1](connection, event) == "NO MORE":
return
def _remove_connection(self, connection):
"""[Internal]"""
self.connections.remove(connection)
if self.fn_to_remove_socket:
self.fn_to_remove_socket(connection._get_socket())
_rfc_1459_command_regexp = re.compile("^(:(?P<prefix>[^ ]+) +)?(?P<command>[^ ]+)( *(?P<argument> .+))?")
class Connection:
"""Base class for IRC connections.
Must be overridden.
"""
def __init__(self, irclibobj):
self.irclibobj = irclibobj
def _get_socket():
raise IRCError, "Not overridden"
##############################
### Convenience wrappers.
def execute_at(self, at, function, arguments=()):
self.irclibobj.execute_at(at, function, arguments)
def execute_delayed(self, delay, function, arguments=()):
self.irclibobj.execute_delayed(delay, function, arguments)
class ServerConnectionError(IRCError):
pass
class ServerNotConnectedError(ServerConnectionError):
pass
# Huh!? Crrrrazy EFNet doesn't follow the RFC: their ircd seems to
# use \n as message separator! :P
_linesep_regexp = re.compile("\r?\n")
class ServerConnection(Connection):
"""This class represents an IRC server connection.
ServerConnection objects are instantiated by calling the server
method on an IRC object.
"""
def __init__(self, irclibobj):
Connection.__init__(self, irclibobj)
self.connected = 0 # Not connected yet.
self.socket = None
self.ssl = None
def connect(self, server, port, nickname, password=None, username=None,
ircname=None, localaddress="", localport=0, ssl=False, ipv6=False):
"""Connect/reconnect to a server.
Arguments:
server -- Server name.
port -- Port number.
nickname -- The nickname.
password -- Password (if any).
username -- The username.
ircname -- The IRC name ("realname").
localaddress -- Bind the connection to a specific local IP address.
localport -- Bind the connection to a specific local port.
ssl -- Enable support for ssl.
ipv6 -- Enable support for ipv6.
This function can be called to reconnect a closed connection.
Returns the ServerConnection object.
"""
if self.connected:
self.disconnect("Changing servers")
self.previous_buffer = ""
self.handlers = {}
self.real_server_name = ""
self.real_nickname = nickname
self.server = server
self.port = port
self.nickname = nickname
self.username = username or nickname
self.ircname = ircname or nickname
self.password = password
self.localaddress = localaddress
self.localport = localport
self.localhost = socket.gethostname()
if ipv6:
self.socket = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
else:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.socket.bind((self.localaddress, self.localport))
self.socket.connect((self.server, self.port))
if ssl:
self.ssl = socket.ssl(self.socket)
except socket.error, x:
self.socket.close()
self.socket = None
raise ServerConnectionError, "Couldn't connect to socket: %s" % x
self.connected = 1
if self.irclibobj.fn_to_add_socket:
self.irclibobj.fn_to_add_socket(self.socket)
# Log on...
if self.password:
self.pass_(self.password)
self.nick(self.nickname)
self.user(self.username, self.ircname)
return self
def close(self):
"""Close the connection.
This method closes the connection permanently; after it has
been called, the object is unusable.
"""
self.disconnect("Closing object")
self.irclibobj._remove_connection(self)
def _get_socket(self):
"""[Internal]"""
return self.socket
def get_server_name(self):
"""Get the (real) server name.
This method returns the (real) server name, or, more
specifically, what the server calls itself.
"""
if self.real_server_name:
return self.real_server_name
else:
return ""
def get_nickname(self):
"""Get the (real) nick name.
This method returns the (real) nickname. The library keeps
track of nick changes, so it might not be the nick name that
was passed to the connect() method. """
return self.real_nickname
def process_data(self):
"""[Internal]"""
try:
if self.ssl:
new_data = self.ssl.read(2**14)
else:
new_data = self.socket.recv(2**14)
except socket.error, x:
# The server hung up.
self.disconnect("Connection reset by peer")
return
if not new_data:
# Read nothing: connection must be down.
self.disconnect("Connection reset by peer")
return
lines = _linesep_regexp.split(self.previous_buffer + new_data)
# Save the last, unfinished line.
self.previous_buffer = lines.pop()
for line in lines:
if DEBUG:
print "FROM SERVER:", line
if not line:
continue
prefix = None
command = None
arguments = None
self._handle_event(Event("all_raw_messages",
self.get_server_name(),
None,
[line]))
m = _rfc_1459_command_regexp.match(line)
if m.group("prefix"):
prefix = m.group("prefix")
if not self.real_server_name:
self.real_server_name = prefix
if m.group("command"):
command = m.group("command").lower()
if m.group("argument"):
a = m.group("argument").split(" :", 1)
arguments = a[0].split()
if len(a) == 2:
arguments.append(a[1])
# Translate numerics into more readable strings.
if command in numeric_events:
command = numeric_events[command]
if command == "nick":
if nm_to_n(prefix) == self.real_nickname:
self.real_nickname = arguments[0]
elif command == "welcome":
# Record the nickname in case the client changed nick
# in a nicknameinuse callback.
self.real_nickname = arguments[0]
if command in ["privmsg", "notice"]:
target, message = arguments[0], arguments[1]
messages = _ctcp_dequote(message)
if command == "privmsg":
if is_channel(target):
command = "pubmsg"
else:
if is_channel(target):
command = "pubnotice"
else:
command = "privnotice"
for m in messages:
if type(m) is types.TupleType:
if command in ["privmsg", "pubmsg"]:
command = "ctcp"
else:
command = "ctcpreply"
m = list(m)
if DEBUG:
print "command: %s, source: %s, target: %s, arguments: %s" % (
command, prefix, target, m)
self._handle_event(Event(command, prefix, target, m))
if command == "ctcp" and m[0] == "ACTION":
self._handle_event(Event("action", prefix, target, m[1:]))
else:
if DEBUG:
print "command: %s, source: %s, target: %s, arguments: %s" % (
command, prefix, target, [m])
self._handle_event(Event(command, prefix, target, [m]))
else:
target = None
if command == "quit":
arguments = [arguments[0]]
elif command == "ping":
target = arguments[0]
else:
target = arguments[0]
arguments = arguments[1:]
if command == "mode":
if not is_channel(target):
command = "umode"
if DEBUG:
print "command: %s, source: %s, target: %s, arguments: %s" % (
command, prefix, target, arguments)
self._handle_event(Event(command, prefix, target, arguments))
def _handle_event(self, event):
"""[Internal]"""
self.irclibobj._handle_event(self, event)
if event.eventtype() in self.handlers:
for fn in self.handlers[event.eventtype()]:
fn(self, event)
def is_connected(self):
"""Return connection status.
Returns true if connected, otherwise false.
"""
return self.connected
def add_global_handler(self, *args):
"""Add global handler.
See documentation for IRC.add_global_handler.
"""
self.irclibobj.add_global_handler(*args)
def remove_global_handler(self, *args):
"""Remove global handler.
See documentation for IRC.remove_global_handler.
"""
self.irclibobj.remove_global_handler(*args)
def action(self, target, action):
"""Send a CTCP ACTION command."""
self.ctcp("ACTION", target, action)
def admin(self, server=""):
"""Send an ADMIN command."""
self.send_raw(" ".join(["ADMIN", server]).strip())
def ctcp(self, ctcptype, target, parameter=""):
"""Send a CTCP command."""
ctcptype = ctcptype.upper()
self.privmsg(target, "\001%s%s\001" % (ctcptype, parameter and (" " + parameter) or ""))
def ctcp_reply(self, target, parameter):
"""Send a CTCP REPLY command."""
self.notice(target, "\001%s\001" % parameter)
def disconnect(self, message=""):
"""Hang up the connection.
Arguments:
message -- Quit message.
"""
if not self.connected:
return
self.connected = 0
self.quit(message)
try:
self.socket.close()
except socket.error, x:
pass
self.socket = None
self._handle_event(Event("disconnect", self.server, "", [message]))
def globops(self, text):
"""Send a GLOBOPS command."""
self.send_raw("GLOBOPS :" + text)
def info(self, server=""):
"""Send an INFO command."""
self.send_raw(" ".join(["INFO", server]).strip())
def invite(self, nick, channel):
"""Send an INVITE command."""
self.send_raw(" ".join(["INVITE", nick, channel]).strip())
def ison(self, nicks):
"""Send an ISON command.
Arguments:
nicks -- List of nicks.
"""
self.send_raw("ISON " + " ".join(nicks))
def join(self, channel, key=""):
"""Send a JOIN command."""
self.send_raw("JOIN %s%s" % (channel, (key and (" " + key))))
def kick(self, channel, nick, comment=""):
"""Send a KICK command."""
self.send_raw("KICK %s %s%s" % (channel, nick, (comment and (" :" + comment))))
def links(self, remote_server="", server_mask=""):
"""Send a LINKS command."""
command = "LINKS"
if remote_server:
command = command + " " + remote_server
if server_mask:
command = command + " " + server_mask
self.send_raw(command)
def list(self, channels=None, server=""):
"""Send a LIST command."""
command = "LIST"
if channels:
command = command + " " + ",".join(channels)
if server:
command = command + " " + server
self.send_raw(command)
def lusers(self, server=""):
"""Send a LUSERS command."""
self.send_raw("LUSERS" + (server and (" " + server)))
def mode(self, target, command):
"""Send a MODE command."""
self.send_raw("MODE %s %s" % (target, command))
def motd(self, server=""):
"""Send an MOTD command."""
self.send_raw("MOTD" + (server and (" " + server)))
def names(self, channels=None):
"""Send a NAMES command."""
self.send_raw("NAMES" + (channels and (" " + ",".join(channels)) or ""))
def nick(self, newnick):
"""Send a NICK command."""
self.send_raw("NICK " + newnick)
def notice(self, target, text):
"""Send a NOTICE command."""
# Should limit len(text) here!
self.send_raw("NOTICE %s :%s" % (target, text))
def oper(self, nick, password):
"""Send an OPER command."""
self.send_raw("OPER %s %s" % (nick, password))
def part(self, channels, message=""):
"""Send a PART command."""
if type(channels) == types.StringType:
self.send_raw("PART " + channels + (message and (" " + message)))
else:
self.send_raw("PART " + ",".join(channels) + (message and (" " + message)))
def pass_(self, password):
"""Send a PASS command."""
self.send_raw("PASS " + password)
def ping(self, target, target2=""):
"""Send a PING command."""
self.send_raw("PING %s%s" % (target, target2 and (" " + target2)))
def pong(self, target, target2=""):
"""Send a PONG command."""
self.send_raw("PONG %s%s" % (target, target2 and (" " + target2)))
def privmsg(self, target, text):
"""Send a PRIVMSG command."""
# Should limit len(text) here!
self.send_raw("PRIVMSG %s :%s" % (target, text))
def privmsg_many(self, targets, text):
"""Send a PRIVMSG command to multiple targets."""
# Should limit len(text) here!
self.send_raw("PRIVMSG %s :%s" % (",".join(targets), text))
def quit(self, message=""):
"""Send a QUIT command."""
# Note that many IRC servers don't use your QUIT message
# unless you've been connected for at least 5 minutes!
self.send_raw("QUIT" + (message and (" :" + message)))
def send_raw(self, string):
"""Send raw string to the server.
The string will be padded with appropriate CR LF.
"""
if self.socket is None:
raise ServerNotConnectedError, "Not connected."
try:
if self.ssl:
self.ssl.write(string + "\r\n")
else:
self.socket.send(string + "\r\n")
if DEBUG:
print "TO SERVER:", string
if MOG_DEBUG:
print ">> ", string
except socket.error, x:
# Ouch!
self.disconnect("Connection reset by peer.")
def squit(self, server, comment=""):
"""Send an SQUIT command."""
self.send_raw("SQUIT %s%s" % (server, comment and (" :" + comment)))
def stats(self, statstype, server=""):
"""Send a STATS command."""
self.send_raw("STATS %s%s" % (statstype, server and (" " + server)))
def time(self, server=""):
"""Send a TIME command."""
self.send_raw("TIME" + (server and (" " + server)))
def topic(self, channel, new_topic=None):
"""Send a TOPIC command."""
if new_topic is None:
self.send_raw("TOPIC " + channel)
else:
self.send_raw("TOPIC %s :%s" % (channel, new_topic))
def trace(self, target=""):
"""Send a TRACE command."""
self.send_raw("TRACE" + (target and (" " + target)))
def user(self, username, realname):
"""Send a USER command."""
self.send_raw("USER %s 0 * :%s" % (username, realname))
def userhost(self, nicks):
"""Send a USERHOST command."""
self.send_raw("USERHOST " + ",".join(nicks))
def users(self, server=""):
"""Send a USERS command."""
self.send_raw("USERS" + (server and (" " + server)))
def version(self, server=""):
"""Send a VERSION command."""
self.send_raw("VERSION" + (server and (" " + server)))
def wallops(self, text):
"""Send a WALLOPS command."""
self.send_raw("WALLOPS :" + text)
def who(self, target="", op=""):
"""Send a WHO command."""
self.send_raw("WHO%s%s" % (target and (" " + target), op and (" o")))
def whois(self, targets):
"""Send a WHOIS command."""
self.send_raw("WHOIS " + ",".join(targets))
def whowas(self, nick, max="", server=""):
"""Send a WHOWAS command."""
self.send_raw("WHOWAS %s%s%s" % (nick,
max and (" " + max),
server and (" " + server)))
class DCCConnectionError(IRCError):
pass
class DCCConnection(Connection):
"""This class represents a DCC connection.
DCCConnection objects are instantiated by calling the dcc
method on an IRC object.
"""
def __init__(self, irclibobj, dcctype):
Connection.__init__(self, irclibobj)
self.connected = 0
self.passive = 0
self.dcctype = dcctype
self.peeraddress = None
self.peerport = None
def connect(self, address, port):
"""Connect/reconnect to a DCC peer.
Arguments:
address -- Host/IP address of the peer.
port -- The port number to connect to.
Returns the DCCConnection object.
"""
self.peeraddress = socket.gethostbyname(address)
self.peerport = port
self.socket = None
self.previous_buffer = ""
self.handlers = {}
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.passive = 0
try:
self.socket.connect((self.peeraddress, self.peerport))
except socket.error, x:
raise DCCConnectionError, "Couldn't connect to socket: %s" % x
self.connected = 1
if self.irclibobj.fn_to_add_socket:
self.irclibobj.fn_to_add_socket(self.socket)
return self
def listen(self):
"""Wait for a connection/reconnection from a DCC peer.
Returns the DCCConnection object.
The local IP address and port are available as
self.localaddress and self.localport. After connection from a
peer, the peer address and port are available as
self.peeraddress and self.peerport.
"""
self.previous_buffer = ""
self.handlers = {}
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.passive = 1
try:
self.socket.bind((socket.gethostbyname(socket.gethostname()), 0))
self.localaddress, self.localport = self.socket.getsockname()
self.socket.listen(10)
except socket.error, x:
raise DCCConnectionError, "Couldn't bind socket: %s" % x
return self
def disconnect(self, message=""):
"""Hang up the connection and close the object.
Arguments:
message -- Quit message.
"""
if not self.connected:
return
self.connected = 0
try:
self.socket.close()
except socket.error, x:
pass
self.socket = None
self.irclibobj._handle_event(
self,
Event("dcc_disconnect", self.peeraddress, "", [message]))
self.irclibobj._remove_connection(self)
def process_data(self):
"""[Internal]"""
if self.passive and not self.connected:
conn, (self.peeraddress, self.peerport) = self.socket.accept()
self.socket.close()
self.socket = conn
self.connected = 1
if DEBUG:
print "DCC connection from %s:%d" % (
self.peeraddress, self.peerport)
self.irclibobj._handle_event(
self,
Event("dcc_connect", self.peeraddress, None, None))
return
try:
new_data = self.socket.recv(2**14)
except socket.error, x:
# The server hung up.
self.disconnect("Connection reset by peer")
return
if not new_data:
# Read nothing: connection must be down.
self.disconnect("Connection reset by peer")
return
if self.dcctype == "chat":
# The specification says lines are terminated with LF, but
# it seems safer to handle CR LF terminations too.
chunks = _linesep_regexp.split(self.previous_buffer + new_data)
# Save the last, unfinished line.
self.previous_buffer = chunks[-1]
if len(self.previous_buffer) > 2**14:
# Bad peer! Naughty peer!
self.disconnect()
return
chunks = chunks[:-1]
else:
chunks = [new_data]
command = "dccmsg"
prefix = self.peeraddress
target = None
for chunk in chunks:
if DEBUG:
print "FROM PEER:", chunk
arguments = [chunk]
if DEBUG:
print "command: %s, source: %s, target: %s, arguments: %s" % (
command, prefix, target, arguments)
self.irclibobj._handle_event(
self,
Event(command, prefix, target, arguments))
def _get_socket(self):
"""[Internal]"""
return self.socket
def privmsg(self, string):
"""Send data to DCC peer.
The string will be padded with appropriate LF if it's a DCC
CHAT session.
"""
try:
self.socket.send(string)
if self.dcctype == "chat":
self.socket.send("\n")
if DEBUG:
print "TO PEER: %s\n" % string
except socket.error, x:
# Ouch!
self.disconnect("Connection reset by peer.")
class SimpleIRCClient:
"""A simple single-server IRC client class.
This is an example of an object-oriented wrapper of the IRC
framework. A real IRC client can be made by subclassing this
class and adding appropriate methods.
The method on_join will be called when a "join" event is created
(which is done when the server sends a JOIN messsage/command),
on_privmsg will be called for "privmsg" events, and so on. The
handler methods get two arguments: the connection object (same as
self.connection) and the event object.
Instance attributes that can be used by sub classes:
ircobj -- The IRC instance.
connection -- The ServerConnection instance.
dcc_connections -- A list of DCCConnection instances.
"""
def __init__(self):
self.ircobj = IRC()
self.connection = self.ircobj.server()
self.dcc_connections = []
self.ircobj.add_global_handler("all_events", self._dispatcher, -10)
self.ircobj.add_global_handler("dcc_disconnect", self._dcc_disconnect, -10)
def _dispatcher(self, c, e):
"""[Internal]"""
m = "on_" + e.eventtype()
if hasattr(self, m):
getattr(self, m)(c, e)
def _dcc_disconnect(self, c, e):
self.dcc_connections.remove(c)
def connect(self, server, port, nickname, password=None, username=None,
ircname=None, localaddress="", localport=0, ssl=False, ipv6=False):
"""Connect/reconnect to a server.
Arguments:
server -- Server name.
port -- Port number.
nickname -- The nickname.
password -- Password (if any).
username -- The username.
ircname -- The IRC name.
localaddress -- Bind the connection to a specific local IP address.
localport -- Bind the connection to a specific local port.
ssl -- Enable support for ssl.
ipv6 -- Enable support for ipv6.
This function can be called to reconnect a closed connection.
"""
self.connection.connect(server, port, nickname,
password, username, ircname,
localaddress, localport, ssl, ipv6)
def dcc_connect(self, address, port, dcctype="chat"):
"""Connect to a DCC peer.
Arguments:
address -- IP address of the peer.
port -- Port to connect to.
Returns a DCCConnection instance.
"""
dcc = self.ircobj.dcc(dcctype)
self.dcc_connections.append(dcc)
dcc.connect(address, port)
return dcc
def dcc_listen(self, dcctype="chat"):
"""Listen for connections from a DCC peer.
Returns a DCCConnection instance.
"""
dcc = self.ircobj.dcc(dcctype)
self.dcc_connections.append(dcc)
dcc.listen()
return dcc
def start(self):
"""Start the IRC client."""
self.ircobj.process_forever()
class Event:
"""Class representing an IRC event."""
def __init__(self, eventtype, source, target, arguments=None):
"""Constructor of Event objects.
Arguments:
eventtype -- A string describing the event.
source -- The originator of the event (a nick mask or a server).
target -- The target of the event (a nick or a channel).
arguments -- Any event specific arguments.
"""
self._eventtype = eventtype
self._source = source
self._target = target
if arguments:
self._arguments = arguments
else:
self._arguments = []
def eventtype(self):
"""Get the event type."""
return self._eventtype
def source(self):
"""Get the event source."""
return self._source
def target(self):
"""Get the event target."""
return self._target
def arguments(self):
"""Get the event arguments."""
return self._arguments
_LOW_LEVEL_QUOTE = "\020"
_CTCP_LEVEL_QUOTE = "\134"
_CTCP_DELIMITER = "\001"
_low_level_mapping = {
"0": "\000",
"n": "\n",
"r": "\r",
_LOW_LEVEL_QUOTE: _LOW_LEVEL_QUOTE
}
_low_level_regexp = re.compile(_LOW_LEVEL_QUOTE + "(.)")
def mask_matches(nick, mask):
"""Check if a nick matches a mask.
Returns true if the nick matches, otherwise false.
"""
nick = irc_lower(nick)
mask = irc_lower(mask)
mask = mask.replace("\\", "\\\\")
for ch in ".$|[](){}+":
mask = mask.replace(ch, "\\" + ch)
mask = mask.replace("?", ".")
mask = mask.replace("*", ".*")
r = re.compile(mask, re.IGNORECASE)
return r.match(nick)
_special = "-[]\\`^{}"
nick_characters = string.ascii_letters + string.digits + _special
_ircstring_translation = string.maketrans(string.ascii_uppercase + "[]\\^",
string.ascii_lowercase + "{}|~")
def irc_lower(s):
"""Returns a lowercased string.
The definition of lowercased comes from the IRC specification (RFC
1459).
"""
return s.translate(_ircstring_translation)
def _ctcp_dequote(message):
"""[Internal] Dequote a message according to CTCP specifications.
The function returns a list where each element can be either a
string (normal message) or a tuple of one or two strings (tagged
messages). If a tuple has only one element (ie is a singleton),
that element is the tag; otherwise the tuple has two elements: the
tag and the data.
Arguments:
message -- The message to be decoded.
"""
def _low_level_replace(match_obj):
ch = match_obj.group(1)
# If low_level_mapping doesn't have the character as key, we
# should just return the character.
return _low_level_mapping.get(ch, ch)
if _LOW_LEVEL_QUOTE in message:
# Yup, there was a quote. Release the dequoter, man!
message = _low_level_regexp.sub(_low_level_replace, message)
if _CTCP_DELIMITER not in message:
return [message]
else:
# Split it into parts. (Does any IRC client actually *use*
# CTCP stacking like this?)
chunks = message.split(_CTCP_DELIMITER)
messages = []
i = 0
while i < len(chunks)-1:
# Add message if it's non-empty.
if len(chunks[i]) > 0:
messages.append(chunks[i])
if i < len(chunks)-2:
# Aye! CTCP tagged data ahead!
messages.append(tuple(chunks[i+1].split(" ", 1)))
i = i + 2
if len(chunks) % 2 == 0:
# Hey, a lonely _CTCP_DELIMITER at the end! This means
# that the last chunk, including the delimiter, is a
# normal message! (This is according to the CTCP
# specification.)
messages.append(_CTCP_DELIMITER + chunks[-1])
return messages
def is_channel(string):
"""Check if a string is a channel name.
Returns true if the argument is a channel name, otherwise false.
"""
return string and string[0] in "#&+!"
def ip_numstr_to_quad(num):
"""Convert an IP number as an integer given in ASCII
representation (e.g. '3232235521') to an IP address string
(e.g. '192.168.0.1')."""
n = long(num)
p = map(str, map(int, [n >> 24 & 0xFF, n >> 16 & 0xFF,
n >> 8 & 0xFF, n & 0xFF]))
return ".".join(p)
def ip_quad_to_numstr(quad):
"""Convert an IP address string (e.g. '192.168.0.1') to an IP
number as an integer given in ASCII representation
(e.g. '3232235521')."""
p = map(long, quad.split("."))
s = str((p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3])
if s[-1] == "L":
s = s[:-1]
return s
def nm_to_n(s):
"""Get the nick part of a nickmask.
(The source of an Event is a nickmask.)
"""
return s.split("!")[0]
def nm_to_uh(s):
"""Get the userhost part of a nickmask.
(The source of an Event is a nickmask.)
"""
return s.split("!")[1]
def nm_to_h(s):
"""Get the host part of a nickmask.
(The source of an Event is a nickmask.)
"""
return s.split("@")[1]
def nm_to_u(s):
"""Get the user part of a nickmask.
(The source of an Event is a nickmask.)
"""
s = s.split("!")[1]
return s.split("@")[0]
def parse_nick_modes(mode_string):
"""Parse a nick mode string.
The function returns a list of lists with three members: sign,
mode and argument. The sign is \"+\" or \"-\". The argument is
always None.
Example:
>>> irclib.parse_nick_modes(\"+ab-c\")
[['+', 'a', None], ['+', 'b', None], ['-', 'c', None]]
"""
return _parse_modes(mode_string, "")
def parse_channel_modes(mode_string):
"""Parse a channel mode string.
The function returns a list of lists with three members: sign,
mode and argument. The sign is \"+\" or \"-\". The argument is
None if mode isn't one of \"b\", \"k\", \"l\", \"v\" or \"o\".
Example:
>>> irclib.parse_channel_modes(\"+ab-c foo\")
[['+', 'a', None], ['+', 'b', 'foo'], ['-', 'c', None]]
"""
return _parse_modes(mode_string, "bklvo")
def _parse_modes(mode_string, unary_modes=""):
"""[Internal]"""
modes = []
arg_count = 0
# State variable.
sign = ""
a = mode_string.split()
if len(a) == 0:
return []
else:
mode_part, args = a[0], a[1:]
if mode_part[0] not in "+-":
return []
for ch in mode_part:
if ch in "+-":
sign = ch
elif ch == " ":
collecting_arguments = 1
elif ch in unary_modes:
if len(args) >= arg_count + 1:
modes.append([sign, ch, args[arg_count]])
arg_count = arg_count + 1
else:
modes.append([sign, ch, None])
else:
modes.append([sign, ch, None])
return modes
def _ping_ponger(connection, event):
"""[Internal]"""
connection.pong(event.target())
# Numeric table mostly stolen from the Perl IRC module (Net::IRC).
numeric_events = {
"001": "welcome",
"002": "yourhost",
"003": "created",
"004": "myinfo",
"005": "featurelist", # XXX
"200": "tracelink",
"201": "traceconnecting",
"202": "tracehandshake",
"203": "traceunknown",
"204": "traceoperator",
"205": "traceuser",
"206": "traceserver",
"207": "traceservice",
"208": "tracenewtype",
"209": "traceclass",
"210": "tracereconnect",
"211": "statslinkinfo",
"212": "statscommands",
"213": "statscline",
"214": "statsnline",
"215": "statsiline",
"216": "statskline",
"217": "statsqline",
"218": "statsyline",
"219": "endofstats",
"221": "umodeis",
"231": "serviceinfo",
"232": "endofservices",
"233": "service",
"234": "servlist",
"235": "servlistend",
"241": "statslline",
"242": "statsuptime",
"243": "statsoline",
"244": "statshline",
"250": "luserconns",
"251": "luserclient",
"252": "luserop",
"253": "luserunknown",
"254": "luserchannels",
"255": "luserme",
"256": "adminme",
"257": "adminloc1",
"258": "adminloc2",
"259": "adminemail",
"261": "tracelog",
"262": "endoftrace",
"263": "tryagain",
"265": "n_local",
"266": "n_global",
"300": "none",
"301": "away",
"302": "userhost",
"303": "ison",
"305": "unaway",
"306": "nowaway",
"311": "whoisuser",
"312": "whoisserver",
"313": "whoisoperator",
"314": "whowasuser",
"315": "endofwho",
"316": "whoischanop",
"317": "whoisidle",
"318": "endofwhois",
"319": "whoischannels",
"321": "liststart",
"322": "list",
"323": "listend",
"324": "channelmodeis",
"329": "channelcreate",
"331": "notopic",
"332": "currenttopic",
"333": "topicinfo",
"341": "inviting",
"342": "summoning",
"346": "invitelist",
"347": "endofinvitelist",
"348": "exceptlist",
"349": "endofexceptlist",
"351": "version",
"352": "whoreply",
"353": "namreply",
"361": "killdone",
"362": "closing",
"363": "closeend",
"364": "links",
"365": "endoflinks",
"366": "endofnames",
"367": "banlist",
"368": "endofbanlist",
"369": "endofwhowas",
"371": "info",
"372": "motd",
"373": "infostart",
"374": "endofinfo",
"375": "motdstart",
"376": "endofmotd",
"377": "motd2", # 1997-10-16 -- tkil
"381": "youreoper",
"382": "rehashing",
"384": "myportis",
"391": "time",
"392": "usersstart",
"393": "users",
"394": "endofusers",
"395": "nousers",
"401": "nosuchnick",
"402": "nosuchserver",
"403": "nosuchchannel",
"404": "cannotsendtochan",
"405": "toomanychannels",
"406": "wasnosuchnick",
"407": "toomanytargets",
"409": "noorigin",
"411": "norecipient",
"412": "notexttosend",
"413": "notoplevel",
"414": "wildtoplevel",
"421": "unknowncommand",
"422": "nomotd",
"423": "noadmininfo",
"424": "fileerror",
"431": "nonicknamegiven",
"432": "erroneusnickname", # Thiss iz how its speld in thee RFC.
"433": "nicknameinuse",
"436": "nickcollision",
"437": "unavailresource", # "Nick temporally unavailable"
"441": "usernotinchannel",
"442": "notonchannel",
"443": "useronchannel",
"444": "nologin",
"445": "summondisabled",
"446": "usersdisabled",
"451": "notregistered",
"461": "needmoreparams",
"462": "alreadyregistered",
"463": "nopermforhost",
"464": "passwdmismatch",
"465": "yourebannedcreep", # I love this one...
"466": "youwillbebanned",
"467": "keyset",
"471": "channelisfull",
"472": "unknownmode",
"473": "inviteonlychan",
"474": "bannedfromchan",
"475": "badchannelkey",
"476": "badchanmask",
"477": "nochanmodes", # "Channel doesn't support modes"
"478": "banlistfull",
"481": "noprivileges",
"482": "chanoprivsneeded",
"483": "cantkillserver",
"484": "restricted", # Connection is restricted
"485": "uniqopprivsneeded",
"491": "nooperhost",
"492": "noservicehost",
"501": "umodeunknownflag",
"502": "usersdontmatch",
}
generated_events = [
# Generated events
"dcc_connect",
"dcc_disconnect",
"dccmsg",
"disconnect",
"ctcp",
"ctcpreply",
]
protocol_events = [
# IRC protocol events
"error",
"join",
"kick",
"mode",
"part",
"ping",
"privmsg",
"privnotice",
"pubmsg",
"pubnotice",
"quit",
"invite",
"pong",
]
all_events = generated_events + protocol_events + numeric_events.values()
| mit | -1,705,436,094,571,637,500 | 30.211132 | 105 | 0.564131 | false |
jaredmanning/learning | lpthw/ex33.py | 1 | 1813 | #i = 0
#numbers = []
#
#while i < 6:
# print "At the top i is %d" % i
# numbers.append(i)
#
# i += 1
# print "Numbers now: ", numbers
# print "At the bottom i is %d" % i
#
#
#print "The numbers: "
#
#for num in numbers:
# print num
#Study Drill Part 1
#print "What's the limit of the list?"
#a = int(raw_input("> "))
#
#def list_numbers(a):
# """This function might add numbers to a list?"""
# i = 0
# numbers = []
#
# while i < a:
# print "At the top i is %d" % i
# numbers.append(i)
#
# i += 1
# print "Numbers now: ", numbers
# print "At the bottom i is %d" % i
#
# print "The numbers: "
#
# for num in numbers:
# print num
#
# return
#
#list_numbers(a)
#Study Drill Part 2
#print "What's the limit of the list?"
#a = int(raw_input("> "))
#
#print "What is the desired increment?"
#n = int(raw_input("> "))
#
#def list_numbers():
# """This function might add numbers to a list?"""
# i = 0
# numbers = []
#
# while i < a:
# print "At the top i is %d" % i
# numbers.append(i)
#
# i += n
# print "Numbers now: ", numbers
# print "At the bottom i is %d" % i
#
# print "The numbers: "
#
# for num in numbers:
# print num
#
# return
#
#list_numbers()
#Study Drill Part 3
print "What's the limit of the list?"
a = int(raw_input("> "))
print "What is the desired increment?"
n = int(raw_input("> "))
def list_numbers():
"""This function adds numbers to a list"""
i = 0
numbers = []
for i in range(0, a, n):
print "At the top i is %d" % i
numbers.append(i)
print "Numbers now: ", numbers
print "At the bottom i is %d" % i
print "The numbers: "
for num in numbers:
print num
return
list_numbers()
| mit | -10,674,561,632,627,338 | 17.313131 | 53 | 0.533922 | false |
bigswitch/nova | nova/notifications.py | 1 | 15437 | # Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Functionality related to notifications common to multiple layers of
the system.
"""
import datetime
from oslo_context import context as common_context
from oslo_log import log
from oslo_utils import excutils
from oslo_utils import timeutils
import six
import nova.conf
import nova.context
from nova import exception
from nova.i18n import _LE
from nova.image import glance
from nova import network
from nova.network import model as network_model
from nova import objects
from nova.objects import base as obj_base
from nova import rpc
from nova import utils
LOG = log.getLogger(__name__)
CONF = nova.conf.CONF
def notify_decorator(name, fn):
"""Decorator for notify which is used from utils.monkey_patch().
:param name: name of the function
:param fn: - object of the function
:returns: fn -- decorated function
"""
def wrapped_func(*args, **kwarg):
body = {}
body['args'] = []
body['kwarg'] = {}
for arg in args:
body['args'].append(arg)
for key in kwarg:
body['kwarg'][key] = kwarg[key]
ctxt = (common_context.get_context_from_function_and_args(
fn, args, kwarg) or
common_context.get_current() or
nova.context.RequestContext())
notifier = rpc.get_notifier('api',
publisher_id=(CONF.default_publisher_id
or CONF.host))
method = getattr(notifier, CONF.default_notification_level.lower(),
notifier.info)
method(ctxt, name, body)
return fn(*args, **kwarg)
return wrapped_func
def send_api_fault(url, status, exception):
"""Send an api.fault notification."""
if not CONF.notify_api_faults:
return
payload = {'url': url, 'exception': six.text_type(exception),
'status': status}
rpc.get_notifier('api').error(common_context.get_current() or
nova.context.get_admin_context(),
'api.fault',
payload)
def send_update(context, old_instance, new_instance, service="compute",
host=None):
"""Send compute.instance.update notification to report any changes occurred
in that instance
"""
if not CONF.notify_on_state_change:
# skip all this if updates are disabled
return
update_with_state_change = False
old_vm_state = old_instance["vm_state"]
new_vm_state = new_instance["vm_state"]
old_task_state = old_instance["task_state"]
new_task_state = new_instance["task_state"]
# we should check if we need to send a state change or a regular
# notification
if old_vm_state != new_vm_state:
# yes, the vm state is changing:
update_with_state_change = True
elif (CONF.notify_on_state_change == "vm_and_task_state" and
old_task_state != new_task_state):
# yes, the task state is changing:
update_with_state_change = True
if update_with_state_change:
# send a notification with state changes
# value of verify_states need not be True as the check for states is
# already done here
send_update_with_states(context, new_instance, old_vm_state,
new_vm_state, old_task_state, new_task_state, service, host)
else:
try:
old_display_name = None
if new_instance["display_name"] != old_instance["display_name"]:
old_display_name = old_instance["display_name"]
_send_instance_update_notification(context, new_instance,
service=service, host=host,
old_display_name=old_display_name)
except exception.InstanceNotFound:
LOG.debug('Failed to send instance update notification. The '
'instance could not be found and was most likely '
'deleted.', instance=new_instance)
except Exception:
LOG.exception(_LE("Failed to send state update notification"),
instance=new_instance)
def send_update_with_states(context, instance, old_vm_state, new_vm_state,
old_task_state, new_task_state, service="compute", host=None,
verify_states=False):
"""Send compute.instance.update notification to report changes if there
are any, in the instance
"""
if not CONF.notify_on_state_change:
# skip all this if updates are disabled
return
fire_update = True
# send update notification by default
if verify_states:
# check whether we need to send notification related to state changes
fire_update = False
# do not send notification if the conditions for vm and(or) task state
# are not satisfied
if old_vm_state != new_vm_state:
# yes, the vm state is changing:
fire_update = True
elif (CONF.notify_on_state_change == "vm_and_task_state" and
old_task_state != new_task_state):
# yes, the task state is changing:
fire_update = True
if fire_update:
# send either a state change or a regular notification
try:
_send_instance_update_notification(context, instance,
old_vm_state=old_vm_state, old_task_state=old_task_state,
new_vm_state=new_vm_state, new_task_state=new_task_state,
service=service, host=host)
except exception.InstanceNotFound:
LOG.debug('Failed to send instance update notification. The '
'instance could not be found and was most likely '
'deleted.', instance=instance)
except Exception:
LOG.exception(_LE("Failed to send state update notification"),
instance=instance)
def _compute_states_payload(instance, old_vm_state=None,
old_task_state=None, new_vm_state=None, new_task_state=None):
# If the states were not specified we assume the current instance
# states are the correct information. This is important to do for
# both old and new states because otherwise we create some really
# confusing nofications like:
#
# None(None) => Building(none)
#
# When we really were just continuing to build
if new_vm_state is None:
new_vm_state = instance["vm_state"]
if new_task_state is None:
new_task_state = instance["task_state"]
if old_vm_state is None:
old_vm_state = instance["vm_state"]
if old_task_state is None:
old_task_state = instance["task_state"]
states_payload = {
"old_state": old_vm_state,
"state": new_vm_state,
"old_task_state": old_task_state,
"new_task_state": new_task_state,
}
return states_payload
def _send_instance_update_notification(context, instance, old_vm_state=None,
old_task_state=None, new_vm_state=None, new_task_state=None,
service="compute", host=None, old_display_name=None):
"""Send 'compute.instance.update' notification to inform observers
about instance state changes.
"""
payload = info_from_instance(context, instance, None, None)
# determine how we'll report states
payload.update(
_compute_states_payload(
instance, old_vm_state, old_task_state,
new_vm_state, new_task_state))
# add audit fields:
(audit_start, audit_end) = audit_period_bounds(current_period=True)
payload["audit_period_beginning"] = audit_start
payload["audit_period_ending"] = audit_end
# add bw usage info:
bw = bandwidth_usage(instance, audit_start)
payload["bandwidth"] = bw
# add old display name if it is changed
if old_display_name:
payload["old_display_name"] = old_display_name
rpc.get_notifier(service, host).info(context,
'compute.instance.update', payload)
def audit_period_bounds(current_period=False):
"""Get the start and end of the relevant audit usage period
:param current_period: if True, this will generate a usage for the
current usage period; if False, this will generate a usage for the
previous audit period.
"""
begin, end = utils.last_completed_audit_period()
if current_period:
audit_start = end
audit_end = timeutils.utcnow()
else:
audit_start = begin
audit_end = end
return (audit_start, audit_end)
def bandwidth_usage(instance_ref, audit_start,
ignore_missing_network_data=True):
"""Get bandwidth usage information for the instance for the
specified audit period.
"""
admin_context = nova.context.get_admin_context(read_deleted='yes')
def _get_nwinfo_old_skool():
"""Support for getting network info without objects."""
if (instance_ref.get('info_cache') and
instance_ref['info_cache'].get('network_info') is not None):
cached_info = instance_ref['info_cache']['network_info']
if isinstance(cached_info, network_model.NetworkInfo):
return cached_info
return network_model.NetworkInfo.hydrate(cached_info)
try:
return network.API().get_instance_nw_info(admin_context,
instance_ref)
except Exception:
try:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Failed to get nw_info'),
instance=instance_ref)
except Exception:
if ignore_missing_network_data:
return
raise
# FIXME(comstud): Temporary as we transition to objects.
if isinstance(instance_ref, obj_base.NovaObject):
nw_info = instance_ref.info_cache.network_info
if nw_info is None:
nw_info = network_model.NetworkInfo()
else:
nw_info = _get_nwinfo_old_skool()
macs = [vif['address'] for vif in nw_info]
uuids = [instance_ref["uuid"]]
bw_usages = objects.BandwidthUsageList.get_by_uuids(admin_context, uuids,
audit_start)
bw = {}
for b in bw_usages:
if b.mac in macs:
label = 'net-name-not-found-%s' % b.mac
for vif in nw_info:
if vif['address'] == b.mac:
label = vif['network']['label']
break
bw[label] = dict(bw_in=b.bw_in, bw_out=b.bw_out)
return bw
def image_meta(system_metadata):
"""Format image metadata for use in notifications from the instance
system metadata.
"""
image_meta = {}
for md_key, md_value in six.iteritems(system_metadata):
if md_key.startswith('image_'):
image_meta[md_key[6:]] = md_value
return image_meta
def info_from_instance(context, instance, network_info,
system_metadata, **kw):
"""Get detailed instance information for an instance which is common to all
notifications.
:param:instance: nova.objects.Instance
:param:network_info: network_info provided if not None
:param:system_metadata: system_metadata DB entries for the instance,
if not None
.. note::
Currently unused here in trunk, but needed for potential custom
modifications.
"""
def null_safe_str(s):
return str(s) if s else ''
def null_safe_int(s):
return int(s) if s else ''
def null_safe_isotime(s):
if isinstance(s, datetime.datetime):
return utils.strtime(s)
else:
return str(s) if s else ''
image_ref_url = glance.generate_image_url(instance.image_ref)
instance_type = instance.get_flavor()
instance_type_name = instance_type.get('name', '')
instance_flavorid = instance_type.get('flavorid', '')
instance_info = dict(
# Owner properties
tenant_id=instance.project_id,
user_id=instance.user_id,
# Identity properties
instance_id=instance.uuid,
display_name=instance.display_name,
reservation_id=instance.reservation_id,
hostname=instance.hostname,
# Type properties
instance_type=instance_type_name,
instance_type_id=instance.instance_type_id,
instance_flavor_id=instance_flavorid,
architecture=instance.architecture,
# Capacity properties
memory_mb=instance.memory_mb,
disk_gb=instance.root_gb + instance.ephemeral_gb,
vcpus=instance.vcpus,
# Note(dhellmann): This makes the disk_gb value redundant, but
# we are keeping it for backwards-compatibility with existing
# users of notifications.
root_gb=instance.root_gb,
ephemeral_gb=instance.ephemeral_gb,
# Location properties
host=instance.host,
node=instance.node,
availability_zone=instance.availability_zone,
cell_name=null_safe_str(instance.cell_name),
# Date properties
created_at=str(instance.created_at),
# Terminated and Deleted are slightly different (although being
# terminated and not deleted is a transient state), so include
# both and let the recipient decide which they want to use.
terminated_at=null_safe_isotime(instance.get('terminated_at', None)),
deleted_at=null_safe_isotime(instance.get('deleted_at', None)),
launched_at=null_safe_isotime(instance.get('launched_at', None)),
# Image properties
image_ref_url=image_ref_url,
os_type=instance.os_type,
kernel_id=instance.kernel_id,
ramdisk_id=instance.ramdisk_id,
# Status properties
state=instance.vm_state,
state_description=null_safe_str(instance.task_state),
progress=null_safe_int(instance.progress),
# accessIPs
access_ip_v4=instance.access_ip_v4,
access_ip_v6=instance.access_ip_v6,
)
if network_info is not None:
fixed_ips = []
for vif in network_info:
for ip in vif.fixed_ips():
ip["label"] = vif["network"]["label"]
ip["vif_mac"] = vif["address"]
fixed_ips.append(ip)
instance_info['fixed_ips'] = fixed_ips
# add image metadata
image_meta_props = image_meta(instance.system_metadata)
instance_info["image_meta"] = image_meta_props
# add instance metadata
instance_info['metadata'] = instance.metadata
instance_info.update(kw)
return instance_info
| apache-2.0 | -7,944,843,228,296,015,000 | 33.689888 | 79 | 0.615081 | false |
Azure/azure-sdk-for-python | sdk/graphrbac/azure-graphrbac/tests/test_graphrbac.py | 1 | 8439 | # coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import unittest
import azure.graphrbac.models
from devtools_testutils import AzureMgmtTestCase
import pytest
# GraphRBAC tests
AD_DOMAIN = "myaddomain.onmicrosoft.com"
class GraphRbacTest(AzureMgmtTestCase):
def setUp(self):
super(GraphRbacTest, self).setUp()
# Set the env variable AZURE_AD_DOMAIN or put AD_DOMAIN in your "mgmt_settings_real" file
self.ad_domain = self.set_value_to_scrub('AD_DOMAIN', AD_DOMAIN)
self.graphrbac_client = self.create_basic_client(
azure.graphrbac.GraphRbacManagementClient,
tenant_id=self.ad_domain
)
def _build_object_url(self, object_id):
return "https://graph.windows.net/{}/directoryObjects/{}".format(
self.ad_domain,
object_id
)
def test_signed_in_user(self):
user = self.graphrbac_client.signed_in_user.get()
assert user.mail_nickname.startswith("admin") # Assuming we do the test with adminXXX account
# Create a group, and check I own it
group_create_parameters = azure.graphrbac.models.GroupCreateParameters(
display_name="pytestgroup_display",
mail_nickname="pytestgroup_nickname"
)
group = None
try:
group = self.graphrbac_client.groups.create(group_create_parameters)
self.graphrbac_client.groups.add_owner(
group.object_id,
self._build_object_url(user.object_id)
)
owned_objects = list(self.graphrbac_client.signed_in_user.list_owned_objects())
for obj in owned_objects:
if obj.display_name == "pytestgroup_display":
break
else:
pytest.fail("Didn't found the group I just created in my owned objects")
try:
self.graphrbac_client.groups.remove_owner(
group.object_id,
user.object_id
)
pytest.fail("Remove the only owner MUST fail")
except azure.graphrbac.models.GraphErrorException as err:
assert "The group must have at least one owner, hence this owner cannot be removed." in err.message
finally:
if group:
self.graphrbac_client.groups.delete(group.object_id)
def test_deleted_applications(self):
existing_deleted_applications = list(self.graphrbac_client.deleted_applications.list())
# Delete the app if already exists
for app in self.graphrbac_client.applications.list(filter="displayName eq 'pytest_deleted_app'"):
self.graphrbac_client.applications.delete(app.object_id)
# Create an app
app = self.graphrbac_client.applications.create({
'available_to_other_tenants': False,
'display_name': 'pytest_deleted_app',
'identifier_uris': ['http://pytest_deleted_app.org']
})
# Delete the app
self.graphrbac_client.applications.delete(app.object_id)
# I should see it now in deletedApplications
existing_deleted_applications = list(self.graphrbac_client.deleted_applications.list(
filter="displayName eq 'pytest_deleted_app'"
))
# At least one, but if you executed this test a lot, you might see several app deleted with this name
assert len(existing_deleted_applications) >= 1
assert all(app.display_name == 'pytest_deleted_app' for app in existing_deleted_applications)
# Ho my god, most important app ever
restored_app = self.graphrbac_client.deleted_applications.restore(app.object_id)
assert restored_app.object_id == app.object_id
# You know what, no I don't care
self.graphrbac_client.applications.delete(app.object_id)
self.graphrbac_client.deleted_applications.hard_delete(app.object_id)
def test_graphrbac_users(self):
user = self.graphrbac_client.users.create(
azure.graphrbac.models.UserCreateParameters(
user_principal_name="testbuddy#TEST@{}".format(self.ad_domain),
account_enabled=False,
display_name='Test Buddy',
mail_nickname='testbuddy',
password_profile=azure.graphrbac.models.PasswordProfile(
password='MyStr0ngP4ssword',
force_change_password_next_login=True
)
)
)
self.assertEqual(user.display_name, 'Test Buddy')
user = self.graphrbac_client.users.get(user.object_id)
self.assertEqual(user.display_name, 'Test Buddy')
user = self.graphrbac_client.users.get(user.user_principal_name)
self.assertEqual(user.display_name, 'Test Buddy')
users = self.graphrbac_client.users.list(
filter="displayName eq 'Test Buddy'"
)
users = list(users)
self.assertEqual(len(users), 1)
self.assertEqual(users[0].display_name, 'Test Buddy')
self.graphrbac_client.users.delete(user.object_id)
def test_groups(self):
group_create_parameters = azure.graphrbac.models.GroupCreateParameters(
display_name="pytestgroup_display",
mail_nickname="pytestgroup_nickname"
)
group = self.graphrbac_client.groups.create(group_create_parameters)
self.assertEqual(group.display_name, "pytestgroup_display")
group = self.graphrbac_client.groups.get(group.object_id)
self.assertEqual(group.display_name, "pytestgroup_display")
groups = self.graphrbac_client.groups.list(
filter="displayName eq 'pytestgroup_display'"
)
groups = list(groups)
self.assertEqual(len(groups), 1)
self.assertEqual(groups[0].display_name, "pytestgroup_display")
self.graphrbac_client.groups.delete(group.object_id)
def test_apps_and_sp(self):
# Delete the app if already exists
for app in self.graphrbac_client.applications.list(filter="displayName eq 'pytest_app'"):
self.graphrbac_client.applications.delete(app.object_id)
app = self.graphrbac_client.applications.create({
'available_to_other_tenants': False,
'display_name': 'pytest_app',
'identifier_uris': ['http://pytest_app.org'],
'app_roles': [{
"allowed_member_types": ["User"],
"description": "Creators can create Surveys",
"display_name": "SurveyCreator",
"id": "1b4f816e-5eaf-48b9-8613-7923830595ad", # Random, but fixed for tests
"is_enabled": True,
"value": "SurveyCreator"
}]
})
# Take this opportunity to test get_objects_by_object_ids
objects = self.graphrbac_client.objects.get_objects_by_object_ids({
'object_ids': [app.object_id],
'types': ['Application']
})
objects = list(objects)
assert len(objects) == 1
assert objects[0].display_name == 'pytest_app'
apps = list(self.graphrbac_client.applications.list(
filter="displayName eq 'pytest_app'"
))
assert len(apps) == 1
assert apps[0].app_roles[0].display_name == "SurveyCreator"
sp = self.graphrbac_client.service_principals.create({
'app_id': app.app_id, # Do NOT use app.object_id
'account_enabled': False
})
# Testing getting SP id by app ID
result = self.graphrbac_client.applications.get_service_principals_id_by_app_id(app.app_id)
assert result.value == sp.object_id
self.graphrbac_client.service_principals.update(
sp.object_id,
{
'account_enabled': False
}
)
self.graphrbac_client.service_principals.delete(sp.object_id)
self.graphrbac_client.applications.delete(app.object_id)
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
| mit | -7,201,706,950,653,175,000 | 37.701835 | 115 | 0.601162 | false |
jjlee/git-meld-index | setup.py | 1 | 1882 | #!/usr/bin/env python
import ast
import codecs
from setuptools import setup
def read_text(path):
with codecs.open(path, "r", "utf-8") as fh:
return fh.read()
def read_version(path):
with open(path) as fh:
for line in fh:
stripped = line.strip()
if stripped == "" or stripped.startswith("#"):
continue
elif line.startswith("from __future__ import"):
continue
else:
if not line.startswith("__version__ = "):
raise Exception("Can't find __version__ line in " + path)
break
else:
raise Exception("Can't find __version__ line in " + path)
_, _, quoted = line.rstrip().partition("= ")
return ast.literal_eval(quoted)
classifiers = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: GNU General Public License v2 (GPLv2)",
"Operating System :: POSIX",
"Programming Language :: Python",
# "Programming Language :: Python :: 3", # TODO
"Topic :: Software Development :: Version Control",
]
scripts = [
"bin/git-meld-index-run-merge-tool",
]
setup(
name="git-meld-index",
url='https://github.com/jjlee/git-meld-index',
author='John Lee',
author_email='[email protected]',
classifiers=classifiers,
data_files=[("share/man/man1", ["doc/git-meld-index.1"])],
description="Like git add -p but with meld (or any difftool)",
license="GPL",
long_description=read_text("README.md"),
package_dir={"": "src"},
platforms=["any"],
py_modules=["git_meld_index"],
scripts=scripts,
version=read_version("src/git_meld_index.py"),
zip_safe=False,
entry_points={
"console_scripts": [
"git-meld-index = git_meld_index:main",
],
}
)
| gpl-2.0 | 6,906,737,977,692,128,000 | 25.507042 | 77 | 0.571201 | false |
GeoCat/QGIS | python/plugins/processing/algs/grass7/ext/v_net_allpairs.py | 1 | 1236 | # -*- coding: utf-8 -*-
"""
***************************************************************************
v_net_allpairs.py
---------------------
Date : December 2015
Copyright : (C) 2015 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Médéric Ribreux'
__date__ = 'December 2015'
__copyright__ = '(C) 2015, Médéric Ribreux'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from .v_net import incorporatePoints
def processCommand(alg, parameters):
incorporatePoints(alg, parameters)
| gpl-2.0 | -9,031,646,828,645,999,000 | 36.272727 | 75 | 0.412195 | false |
google/hypebot | hypebot/plugins/league/summoner_lib.py | 1 | 12403 | # Copyright 2018 The Hypebot Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Summoner-related libraries.
Fetches summoner data from Riot API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from absl import logging
import arrow
from hypebot.core import inflect_lib
from hypebot.protos.riot.v4 import constants_pb2
from hypebot.protos.riot.v4 import league_pb2
DEFAULT_REGION = 'na'
GAME_MODES = {
'ARAM': 'ARAM',
'ASCENSION': 'Ascension',
'CLASSIC': {
constants_pb2.QueueType.BOT_5x5: 'Bots',
constants_pb2.QueueType.BOT_TT_3x3: 'TT Bots',
constants_pb2.QueueType.GROUP_FINDER_5x5: 'Team Builder',
constants_pb2.QueueType.NORMAL_5x5_BLIND: 'Normals',
constants_pb2.QueueType.NORMAL_5x5_DRAFT: 'Normals',
constants_pb2.QueueType.NORMAL_3x3: 'TT Normals',
constants_pb2.QueueType.ONEFORALL_5x5: 'One For All (SR)',
constants_pb2.QueueType.RANKED_FLEX_SR: 'Flecks',
constants_pb2.QueueType.RANKED_FLEX_TT: 'TT Flecks',
constants_pb2.QueueType.RANKED_SOLO_5x5: 'YoloQ',
constants_pb2.QueueType.TEAM_BUILDER_RANKED_SOLO:
'YoloQ', # this is weird
constants_pb2.QueueType.RANKED_TEAM_3x3: 'Ranked 3s',
constants_pb2.QueueType.RANKED_TEAM_5x5: 'Ranked 5s',
constants_pb2.QueueType.TEAM_BUILDER_DRAFT_UNRANKED_5x5: 'Normals',
constants_pb2.QueueType.URF_5x5: 'URF',
constants_pb2.QueueType.CLASH: 'CLASH',
},
'KINGPORO': 'Poro King',
'ODIN': 'Dominion',
'ONEFORALL': 'One For All',
'SIEGE': 'Nexus Siege',
'GAMEMODEX': {
constants_pb2.QueueType.NEXUS_BLITZ: 'Blitz',
},
}
def NormalizeSummoner(input_text):
return ''.join(input_text.split()).lower()
class SummonerLib(object):
"""Class for fetching various data from Riot API."""
def __init__(self, rito, game):
self._rito = rito
self._game = game
def _GetMatchParticipant(self, encrypted_account_id, match_ref, match):
participant_ids = [
p.participant_id
for p in match.participant_identities
if p.player.current_account_id == encrypted_account_id
]
participant = None
if participant_ids:
[participant] = [
p for p in match.participants
if p.participant_id == participant_ids[0]
]
return participant
participants = [
p for p in match.participants if p.champion_id == match_ref.champion
]
if participants:
# Best guess, which is wrong for blind pick and one-for-all game types.
# Rito is full of filthy casuals.
return participants[0]
def Who(self, summoner):
"""Gets and formats data for a summoner."""
summoner_data = {}
game_data = {}
# Populate basic data (username, summoner name, region)
summoner_data['username'] = summoner['username']
summoner_data['summoner'] = summoner['summoner']
region = summoner.get('region', DEFAULT_REGION)
summoner_data['region'] = region
encrypted_summoner_id = summoner.get('encrypted_summoner_id', '')
encrypted_account_id = summoner.get('encrypted_account_id', '')
r = self._rito.GetSummoner(region, summoner['summoner'])
if r:
summoner_data['profile_icon_id'] = r.profile_icon_id
r = self._rito.ListRecentMatches(region, encrypted_account_id)
last_game_ref = None
last_game = None
participant = None
if r:
last_game_ref = r.matches[0]
last_game = self._rito.GetMatch(region, last_game_ref.game_id)
if last_game:
participant = self._GetMatchParticipant(encrypted_account_id,
last_game_ref, last_game)
if last_game_ref and last_game and participant:
# Champion played
champion_id = participant.champion_id
game_data['champion'] = self._game.champion_id_to_name[str(champion_id)]
# Game type
logging.info('Evaluating (%s, %s)', last_game.game_mode,
last_game.game_type)
game_type = GAME_MODES.get(last_game.game_mode)
if last_game.game_mode == 'CLASSIC':
game_type = game_type.get(last_game.queue_id)
game_data['type'] = game_type or 'Unknown'
# Game time
# It seems rito api returns games in US/Pacific time, but this could
# change at any point in the future.
logging.info('SummonerLib: gametime: %s', last_game_ref.timestamp)
game_data['time'] = arrow.get(last_game_ref.timestamp /
1000.0).to('US/Pacific')
# Other data (win/loss, fantasy points, penta)
game_data['win'] = participant.stats.win
game_data['fantasy_points'] = self._ComputeFantasyPoints(
participant.stats)
summoner_data['penta'] = participant.stats.penta_kills > 0
summoner_data['last_game'] = game_data
# Find dynamic queue rank
rank = None
r = self._rito.ListLeaguePositions(region, encrypted_summoner_id)
if r:
leagues = r.positions
for league in leagues:
if league.queue_type == constants_pb2.QueueType.RANKED_SOLO_5x5:
tier = constants_pb2.Tier.Enum.Name(league.tier)[0].upper()
division = self._RomanToLatin(
league_pb2.TierRank.Enum.Name(league.rank))
rank = tier + division
if not rank:
rank = 'Unranked'
summoner_data['rank'] = rank
return summoner_data
def Champs(self, summoner):
"""Gets and formats champion mastery data for summoner."""
encrypted_summoner_id = summoner.get('encrypted_summoner_id', '')
region = summoner.get('region', DEFAULT_REGION)
r = self._rito.ListChampionMasteries(region, encrypted_summoner_id)
if r:
logging.info('Got champ mastery data for %s/%s [%s]', region,
encrypted_summoner_id, summoner['summoner'])
# Calculate total number of chests received
total_chests = sum(1 for x in r.champion_masteries if x.chest_granted)
top_champs = []
for champ in r.champion_masteries[:3]:
top_champs.append(self._game.champion_id_to_name[str(
champ.champion_id)])
top_champ_lvl = r.champion_masteries[0].champion_level
chest_verb = ''
chest_verb_dict = {
(0, 2): 'receiving',
(2, 4): 'collecting',
(4, 8): 'earning',
(8, 16): 'amassing',
(16, 32): 'hoarding'
}
for range_spec, verb in chest_verb_dict.items():
if total_chests in range(*range_spec):
chest_verb = verb
break
if chest_verb:
chest_str = '%s %s' % (chest_verb,
inflect_lib.Plural(total_chests, 'chest'))
else:
chest_str = 'with a boatload of chests (%d)' % total_chests
return (u'{0} is a L{1} {2[0]} main, but sometimes likes to play {2[1]} '
'and {2[2]}, {3} this season.').format(summoner['summoner'],
top_champ_lvl, top_champs,
chest_str)
def ChampMasterySingle(self, summoner, champ_name):
"""Gets and formats champion mastery for summoner and specific champ."""
# Get the champ ID.
champ_id = self._game.GetChampId(champ_name)
if champ_id is None:
return 'Champion "%s" not found.' % champ_name
champ_display_name = self._game.GetChampDisplayName(champ_name)
encrypted_summoner_id = summoner.get('encrypted_summoner_id', '')
region = summoner.get('region', DEFAULT_REGION)
r = self._rito.GetChampionMastery(region, encrypted_summoner_id, champ_id)
if r:
logging.info('Got single champ mastery data for %s/%s [%s] on Champ %s',
region, encrypted_summoner_id, summoner['summoner'],
champ_display_name)
champ_level = r.champion_level
points = r.champion_points
return ('%s is a L%d %s player with %d mastery points.' %
(summoner['summoner'], champ_level, champ_display_name, points))
else:
logging.info(
'Got chimp mastery data for %s/%s [%s] on Champ %s (no data)', region,
encrypted_summoner_id, summoner['summoner'], champ_display_name)
return '%s does not play %s.' % (summoner['summoner'], champ_display_name)
def Chimps(self, summoner):
"""Gets and formats Chimp mastery data for summoner."""
encrypted_summoner_id = summoner.get('encrypted_summoner_id', '')
region = summoner.get('region', DEFAULT_REGION)
# Wukong is Champ ID 62
r = self._rito.GetChampionMastery(region, encrypted_summoner_id, 62)
if r:
logging.info('Got chimp mastery data for %s/%s [%s]', region,
encrypted_summoner_id, summoner['summoner'])
champ_level = r.champion_level
points = r.champion_points
return ('%s is a L%d Wukong player with %d mastery points.' %
(summoner['summoner'], champ_level, points))
else:
logging.info('Got chimp mastery data for %s/%s [%s] (no data)', region,
encrypted_summoner_id, summoner['summoner'])
return '%s is not a fan of monkeys.' % summoner['summoner']
def _ComputeFantasyPoints(self, stats):
"""Calculates the number of fantasy points recieved in a game."""
point_mapping = {
'kills': 2,
'deaths': -0.5,
'assists': 1.5,
'triple_kills': 2,
'quadra_kills': 5,
'penta_kills': 10,
'neutral_minions_killed': 0.01,
'total_minions_killed': 0.01
}
points = 0
for stat in point_mapping:
points += point_mapping[stat] * getattr(stats, stat)
if max(stats.assists, stats.kills) > 10:
points += 2
return points
def _RomanToLatin(self, roman_numerals):
"""Translates a str roman numeral (I to V) into the latin equivalent."""
roman = roman_numerals.strip().upper()
return {'I': '1', 'II': '2', 'III': '3', 'IV': '4', 'V': '5'}[roman]
class SummonerTracker(object):
"""Tracks summoners."""
def __init__(self, rito, user_prefs):
self._rito = rito
self._user_prefs = user_prefs
def ParseSummoner(self, user, smurfs, region, name):
"""Parses a summoner(s) out of mangled garbage the user supplied as input.
Args:
user: The user which triggered this parsing. Converts 'me'.
smurfs: Whether to include smurfs.
region: If any/not default.
name: summoner or special string (e.g., 'me').
Returns:
A list of summoner_info dicts with the following fields:
- username: Unused for now
- summoner: The parsed summoner name
- encrypted_summoner_id: The encrypted rito summoner id, which is useful
for other API calls
- encrypted_account_id: The encrypted rito account id, which is useful
for other API calls
- encrypted_puuid: The encrypted rito PUUID, which is useful for other
API calls
- region: The given or inferred region for which this summoner is valid
"""
region = (region or self._user_prefs.Get(user, 'lol_region')).lower()
if name == 'me':
names = self._user_prefs.Get(user, 'lol_summoner')
if not names:
return []
else:
names = self._user_prefs.Get(name, 'lol_summoner') or name
names = [NormalizeSummoner(name) for name in names.split(',')]
if smurfs is None:
names = names[:1]
summoners = []
for name in names:
r = self._rito.GetSummoner(region, name)
if r:
summoners.append({
'username': None,
'summoner': r.name,
'encrypted_summoner_id': r.id,
'encrypted_account_id': r.account_id,
'encrypted_puuid': r.puuid,
'region': region
})
return summoners
| apache-2.0 | -3,853,912,355,005,328,000 | 36.584848 | 80 | 0.622188 | false |
gsmke/django-leaf | leaf/tests/test_page.py | 1 | 2974 | import os
import pytest
from model_mommy import mommy
@pytest.mark.parametrize('value', (
'',
'/',
'/test/test2',
'/test/test2/',
))
def test_strip_trailing_slash(value):
from leaf.page import strip_trailing_slash
assert not strip_trailing_slash(value).endswith('/')
@pytest.mark.parametrize('url', (
'',
'example',
'example/test',
'example/test/',
'example/test2/test3',
))
def test_get_names(url):
from leaf.page import get_names
valid_paths = [
url,
os.path.join(url, 'index'),
os.path.join('pages', url),
os.path.join('pages', url, 'index'),
]
return get_names(url) == valid_paths
@pytest.mark.parametrize('url', (
'admin',
'admin/',
'admin/example',
'admin/example/test',
'admin/example/test/',
'admin/example/test2/test3',
))
def test_get_names_admin(url):
from leaf.page import get_names
assert get_names(url) == []
@pytest.mark.parametrize('url,expected', (
('', '/index'),
('/', '/index'),
('/test', '/test'),
('/test/test2', '/test/test2'),
))
def test_get_url(url, expected):
from leaf.page import get_url
class View:
kwargs = {
'url': url
}
assert get_url(View()) == expected
def test_get_url_kwarg():
from leaf.page import get_url
class View:
url = '/testing'
assert get_url(View()) == '/testing'
def test_get_url_none():
from django.http import Http404
from leaf.page import get_url
class View:
kwargs = {
'url': None
}
with pytest.raises(Http404):
get_url(View())
@pytest.mark.django_db
def test_get_from_database():
from leaf.page import get_from_database
node = mommy.make('leaf.PageNode', slug='test', template='example-page')
page_class = mommy.make("leaf_test.PageClass", node=node)
assert get_from_database('test/') == page_class
assert get_from_database('test') == page_class
@pytest.mark.django_db
def test_get_from_database_no_template():
from leaf.page import get_from_database
mommy.make('leaf.PageNode', slug='test')
assert get_from_database('test/') is None
assert get_from_database('test') is None
@pytest.mark.django_db
def test_get_from_database_no_page_class():
from leaf.page import get_from_database
mommy.make('leaf.PageNode', slug='test', template='example-page')
assert get_from_database('test/') is None
assert get_from_database('test') is None
@pytest.mark.django_db
def test_get_from_database_home_page():
from leaf.page import get_from_database
home_page = mommy.make('leaf.PageNode', slug='home', template='example-page')
page_class = mommy.make("leaf_test.PageClass", node=home_page)
assert get_from_database('') == page_class
assert get_from_database('/') == page_class
assert get_from_database('home') == page_class
assert get_from_database('home/') == page_class
| bsd-3-clause | 1,072,196,295,359,407,600 | 22.417323 | 81 | 0.624748 | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_4_0_0/models/chargeitemdefinition_tests.py | 1 | 7197 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b on 2019-05-07.
# 2019, SMART Health IT.
import os
import io
import unittest
import json
from . import chargeitemdefinition
from .fhirdate import FHIRDate
class ChargeItemDefinitionTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("ChargeItemDefinition", js["resourceType"])
return chargeitemdefinition.ChargeItemDefinition(js)
def testChargeItemDefinition1(self):
inst = self.instantiate_from("chargeitemdefinition-device-example.json")
self.assertIsNotNone(inst, "Must have instantiated a ChargeItemDefinition instance")
self.implChargeItemDefinition1(inst)
js = inst.as_json()
self.assertEqual("ChargeItemDefinition", js["resourceType"])
inst2 = chargeitemdefinition.ChargeItemDefinition(js)
self.implChargeItemDefinition1(inst2)
def implChargeItemDefinition1(self, inst):
self.assertEqual(inst.applicability[0].description, "Verify ChargeItem pertains to Device 12345")
self.assertEqual(inst.applicability[0].expression, "%context.service.suppliedItem='Device/12345'")
self.assertEqual(inst.applicability[0].language, "text/fhirpath")
self.assertEqual(inst.description, "Financial details for custom made device")
self.assertEqual(inst.id, "device")
self.assertEqual(inst.propertyGroup[0].priceComponent[0].amount.currency, "EUR")
self.assertEqual(inst.propertyGroup[0].priceComponent[0].amount.value, 67.44)
self.assertEqual(inst.propertyGroup[0].priceComponent[0].code.coding[0].code, "VK")
self.assertEqual(inst.propertyGroup[0].priceComponent[0].code.coding[0].display, "Verkaufspreis (netto)")
self.assertEqual(inst.propertyGroup[0].priceComponent[0].code.coding[0].system, "http://fhir.de/CodeSystem/billing-attributes")
self.assertEqual(inst.propertyGroup[0].priceComponent[0].type, "base")
self.assertEqual(inst.propertyGroup[1].applicability[0].description, "Gültigkeit Steuersatz")
self.assertEqual(inst.propertyGroup[1].applicability[0].expression, "%context.occurenceDateTime > '2018-04-01'")
self.assertEqual(inst.propertyGroup[1].applicability[0].language, "text/fhirpath")
self.assertEqual(inst.propertyGroup[1].priceComponent[0].code.coding[0].code, "MWST")
self.assertEqual(inst.propertyGroup[1].priceComponent[0].code.coding[0].display, "Mehrwersteuersatz")
self.assertEqual(inst.propertyGroup[1].priceComponent[0].code.coding[0].system, "http://fhir.de/CodeSystem/billing-attributes")
self.assertEqual(inst.propertyGroup[1].priceComponent[0].factor, 1.19)
self.assertEqual(inst.propertyGroup[1].priceComponent[0].type, "tax")
self.assertEqual(inst.propertyGroup[2].applicability[0].description, "Gültigkeit Steuersatz")
self.assertEqual(inst.propertyGroup[2].applicability[0].expression, "%context.occurenceDateTime <= '2018-04-01'")
self.assertEqual(inst.propertyGroup[2].applicability[0].language, "text/fhirpath")
self.assertEqual(inst.propertyGroup[2].priceComponent[0].code.coding[0].code, "MWST")
self.assertEqual(inst.propertyGroup[2].priceComponent[0].code.coding[0].display, "Mehrwersteuersatz")
self.assertEqual(inst.propertyGroup[2].priceComponent[0].code.coding[0].system, "http://fhir.de/CodeSystem/billing-attributes")
self.assertEqual(inst.propertyGroup[2].priceComponent[0].factor, 1.07)
self.assertEqual(inst.propertyGroup[2].priceComponent[0].type, "tax")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.url, "http://sap.org/ChargeItemDefinition/device-123")
def testChargeItemDefinition2(self):
inst = self.instantiate_from("chargeitemdefinition-ebm-example.json")
self.assertIsNotNone(inst, "Must have instantiated a ChargeItemDefinition instance")
self.implChargeItemDefinition2(inst)
js = inst.as_json()
self.assertEqual("ChargeItemDefinition", js["resourceType"])
inst2 = chargeitemdefinition.ChargeItemDefinition(js)
self.implChargeItemDefinition2(inst2)
def implChargeItemDefinition2(self, inst):
self.assertEqual(inst.applicability[0].description, "Excludes billing code 13250 for same Encounter")
self.assertEqual(inst.applicability[0].expression, "[some CQL expression]")
self.assertEqual(inst.applicability[0].language, "text/cql")
self.assertEqual(inst.applicability[1].description, "Applies only once per Encounter")
self.assertEqual(inst.applicability[1].expression, "[some CQL expression]")
self.assertEqual(inst.applicability[1].language, "text/CQL")
self.assertEqual(inst.code.coding[0].code, "30110")
self.assertEqual(inst.code.coding[0].display, "Allergologiediagnostik I")
self.assertEqual(inst.code.coding[0].system, "http://fhir.de/CodingSystem/kbv/ebm")
self.assertEqual(inst.description, "Allergologisch-diagnostischer Komplex zur Diagnostik und/oder zum Ausschluss einer (Kontakt-)Allergie vom Spättyp (Typ IV), einschl. Kosten")
self.assertEqual(inst.effectivePeriod.end.date, FHIRDate("2018-06-30").date)
self.assertEqual(inst.effectivePeriod.end.as_json(), "2018-06-30")
self.assertEqual(inst.effectivePeriod.start.date, FHIRDate("2018-04-01").date)
self.assertEqual(inst.effectivePeriod.start.as_json(), "2018-04-01")
self.assertEqual(inst.id, "ebm")
self.assertEqual(inst.propertyGroup[0].priceComponent[0].amount.currency, "EUR")
self.assertEqual(inst.propertyGroup[0].priceComponent[0].amount.value, 67.44)
self.assertEqual(inst.propertyGroup[0].priceComponent[0].code.coding[0].code, "gesamt-euro")
self.assertEqual(inst.propertyGroup[0].priceComponent[0].code.coding[0].display, "Gesamt (Euro)")
self.assertEqual(inst.propertyGroup[0].priceComponent[0].code.coding[0].system, "http://fhir.de/CodeSystem/kbv/ebm-attribute")
self.assertEqual(inst.propertyGroup[0].priceComponent[0].type, "base")
self.assertEqual(inst.propertyGroup[0].priceComponent[1].code.coding[0].code, "gesamt-punkte")
self.assertEqual(inst.propertyGroup[0].priceComponent[1].code.coding[0].display, "Gesamt (Punkte)")
self.assertEqual(inst.propertyGroup[0].priceComponent[1].code.coding[0].system, "http://fhir.de/CodeSystem/kbv/ebm-attribute")
self.assertEqual(inst.propertyGroup[0].priceComponent[1].factor, 633)
self.assertEqual(inst.propertyGroup[0].priceComponent[1].type, "informational")
self.assertEqual(inst.status, "active")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.url, "http://fhir.de/ChargeItemDefinition/kbv/ebm-30110")
self.assertEqual(inst.version, "2-2018")
| bsd-3-clause | -729,465,384,742,688,800 | 66.233645 | 185 | 0.720044 | false |
felgari/k2 | aptrend.py | 1 | 2851 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Felipe Gallego. All rights reserved.
#
# This is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Script to calculate ap for trend.
"""
import sys
import os
import csv
from ctes import *
class ApTrend(object):
def __init__(self):
self._ap = []
def calculate_ap(self, trend, first_trend, second_trend, pos1, pos2):
cur_ap = TREND_IG
v1 = trend[0]
v2 = trend[1]
v3 = trend[2]
if v1 > TREND_HIGH_VALUE:
if first_trend == AVPOS_TREND_UP:
cur_ap = TREND_1
elif v2 > TREND_HIGH_VALUE:
if v2 - v1 > v3 and first_trend == AVPOS_TREND_UP:
cur_ap = TREND_2
elif v2 - v3 > v1 and second_trend == AVPOS_TREND_UP:
cur_ap = TREND_4
else:
cur_ap = TREND_3
elif v3 > TREND_HIGH_VALUE:
if first_trend == AVPOS_TREND_DOWN or second_trend == AVPOS_TREND_UP:
cur_ap = TREND_5
elif abs(v1 - TREND_AV < TREND_AV_DIFF) and \
abs(v2 - TREND_AV < TREND_AV_DIFF) and \
abs(v3 - TREND_AV < TREND_AV_DIFF):
cur_ap = TREND_3
elif first_trend == AVPOS_TREND_UP and second_trend == AVPOS_TREND_DOWN:
cur_ap = TREND_1
elif first_trend == AVPOS_TREND_DOWN and second_trend == AVPOS_TREND_UP \
and v2 >= v1 and v3 > v1:
cur_ap = TREND_4
elif pos1 < pos2 and pos1 - pos2 <= TREND_POS_DIFF_H:
cur_ap = TREND_1
elif pos1 > pos2 and pos1 - pos2 >= TREND_POS_DIFF_V:
cur_ap = TREND_4
self._ap.append(cur_ap)
return cur_ap
def write_data(self, index):
out_file_name = os.path.join(DATA_PATH, AP_FILE_TREND_PREFIX + str(index) + AP_FILE_TREND_EXT)
print("Saving trend ap in: %s" % out_file_name)
with open(out_file_name, "wt") as csvfile:
csvwriter = csv.writer(csvfile, delimiter=CSV_DELIMITER)
for ap_d in self._ap:
row = [ ap_d ]
csvwriter.writerow(row) | gpl-3.0 | 607,653,928,808,055,200 | 32.952381 | 102 | 0.558401 | false |
protwis/protwis | angles/migrations/0011_auto_20200402_1344.py | 1 | 1225 | # Generated by Django 2.0.8 on 2020-04-02 11:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('angles', '0010_residueangle_tau_angle'),
]
operations = [
migrations.AddField(
model_name='residueangle',
name='chi1',
field=models.FloatField(default=0, null=True),
),
migrations.AddField(
model_name='residueangle',
name='chi2',
field=models.FloatField(default=0, null=True),
),
migrations.AddField(
model_name='residueangle',
name='chi3',
field=models.FloatField(default=0, null=True),
),
migrations.AddField(
model_name='residueangle',
name='chi4',
field=models.FloatField(default=0, null=True),
),
migrations.AddField(
model_name='residueangle',
name='chi5',
field=models.FloatField(default=0, null=True),
),
migrations.AddField(
model_name='residueangle',
name='missing_atoms',
field=models.IntegerField(default=0, null=True),
),
]
| apache-2.0 | -1,172,428,415,425,360,100 | 27.488372 | 60 | 0.54449 | false |
protwis/protwis | build/management/commands/parse_excel_annotations.py | 1 | 23312 | from django.core.management.base import BaseCommand, CommandError
from django.core.management import call_command
from django.conf import settings
from django.db import connection
from common.alignment import Alignment, ClosestReceptorHomolog
from protein.models import Protein, ProteinSegment
from structure.models import Structure
import datetime
import logging
from optparse import make_option
import os
import shutil
import xlrd
import yaml
from collections import OrderedDict
import pprint
_mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG
def dict_constructor(loader, node):
return OrderedDict(loader.construct_pairs(node))
def represent_ordereddict(dumper, data):
value = []
for item_key, item_value in data.items():
node_key = dumper.represent_data(item_key)
node_value = dumper.represent_data(item_value)
value.append((node_key, node_value))
return yaml.nodes.MappingNode(u'tag:yaml.org,2002:map', value)
yaml.add_representer(OrderedDict, represent_ordereddict)
yaml.add_constructor(_mapping_tag, dict_constructor)
class Command(BaseCommand):
help = 'Basic functions for build scrips'
logger = logging.getLogger(__name__)
def add_arguments(self, parser):
parser.add_argument('-f', '--filename',
action='store',
dest='filename',
help='Path to Uniprot text file')
parser.add_argument('-m',
action='store_true',
default=False,
help='Run main template search. Updates Xtal_Templ.csv with closest receptor homologs')
annotation_source_file = os.sep.join([settings.DATA_DIR, 'structure_data', 'Structural_Annotation.xlsx'])
xtal_seg_end_file = os.sep.join([settings.DATA_DIR, 'structure_data', 'annotation', 'xtal_segends.yaml'])
mod_xtal_seg_end_file = os.sep.join([settings.DATA_DIR, 'structure_data', 'annotation', 'mod_xtal_segends.yaml'])
xtal_seg_end_bw_file = os.sep.join([settings.DATA_DIR, 'structure_data', 'annotation', 'xtal_segends_bw.yaml'])
ECD_annotation_source_file = os.sep.join([settings.DATA_DIR, 'structure_data', 'ECD_annotation.xlsx'])
ClassD_annotation_source_file = os.sep.join([settings.DATA_DIR, 'structure_data', 'Class_D_Annotation.xlsx'])
non_xtal_seg_end_file = os.sep.join([settings.DATA_DIR, 'structure_data', 'annotation', 'non_xtal_segends.yaml'])
non_xtal_seg_end_bw_file = os.sep.join([settings.DATA_DIR, 'structure_data', 'annotation', 'non_xtal_segends_bw.yaml'])
all_anomalities_file = os.sep.join([settings.DATA_DIR, 'structure_data', 'annotation', 'all_anomalities.yaml'])
xtal_anomalities_file = os.sep.join([settings.DATA_DIR, 'structure_data', 'annotation', 'xtal_anomalities.yaml'])
sequence_file = os.sep.join([settings.DATA_DIR, 'structure_data', 'annotation', 'sequences.yaml'])
ECD_wt_file = os.sep.join([settings.DATA_DIR, 'structure_data', 'annotation', 'ECD_wt.yaml'])
ECD_anomalies_file = os.sep.join([settings.DATA_DIR, 'structure_data', 'annotation', 'ECD_anomalies.yaml'])
if not os.path.exists(os.sep.join([settings.DATA_DIR, 'structure_data', 'annotation'])):
os.makedirs(os.sep.join([settings.DATA_DIR, 'structure_data', 'annotation']))
def handle(self, *args, **options):
self.data = self.parse_excel(self.annotation_source_file)
self.dump_files()
self.ECD_data = self.parse_excel(self.ECD_annotation_source_file)
self.dump_ECD_files()
self.ClassD_data = self.parse_excel(self.ClassD_annotation_source_file)
self.dump_ClassD_data()
# self.analyse_annotation_consistency()
self.find_representatives()
if options['m']:
self.main_template_search()
def dump_ECD_files(self):
data_dict = OrderedDict()
for key, val in self.ECD_data['wt'].items():
if val['H1x50']=='':
continue
entry_name = val['UniProt']
del val['Key']
del val['UniProt']
data_dict[entry_name] = val
with open(self.ECD_wt_file, 'w') as outfile:
yaml.dump(data_dict, outfile, indent=4)
anomalies = OrderedDict()
for key, val in self.ECD_data['anomalies'].items():
entry_name = val['protein']
del val['protein']
anomalies[entry_name] = val
with open(self.ECD_anomalies_file, 'w') as outfile:
yaml.dump(anomalies, outfile, indent=4)
def dump_ClassD_data(self):
data_dict1, data_dict2 = OrderedDict(), OrderedDict()
for key, val in self.ClassD_data['SegEnds_NonXtal_Prot#'].items():
entry_name = val['UniProt'].lower()
del val['Key']
del val['UniProt']
del val['']
data_dict1[entry_name] = val
with open(self.non_xtal_seg_end_file, 'a') as outfile:
yaml.dump(data_dict1, outfile, indent=4)
for key, val in self.ClassD_data['SegEnds_NonXtal_BW#'].items():
entry_name = val['UniProt'].lower()
del val['UniProt']
data_dict2[entry_name] = val
with open(self.non_xtal_seg_end_bw_file, 'a') as outfile:
yaml.dump(data_dict2, outfile, indent=4)
data = self.ClassD_data["Bulges_Constrictions"]
NonXtal_Bulges_Constr_GPCRdb = {}
for structure,vals in data.items():
entry = structure.lower()
NonXtal_Bulges_Constr_GPCRdb[entry] = OrderedDict()
for key,val in vals.items():
if not key:
continue
NonXtal_Bulges_Constr_GPCRdb[entry][key] = val
NonXtal_Bulges_Constr_GPCRdb = OrderedDict(sorted(NonXtal_Bulges_Constr_GPCRdb.items()))
with open(self.all_anomalities_file, 'a') as outfile:
yaml.dump(NonXtal_Bulges_Constr_GPCRdb, outfile, indent=4)
data = self.ClassD_data["Seqs"]
Seqs = {}
for structure,vals in data.items():
entry = structure.lower()
Seqs[entry] = OrderedDict()
for key,val in vals.items():
if not key:
continue
Seqs[entry][key] = val
Seqs = OrderedDict(sorted(Seqs.items()))
with open(self.sequence_file, 'a') as outfile:
yaml.dump(Seqs, outfile, indent=4)
structures = self.ClassD_data["SegEnds_Xtal_Prot#"]
pdb_info = {}
pdb_info_all = {}
for structure,vals in structures.items():
if structure.split("_")[-1] == "wt":
continue
if structure.split("_")[-1] == "dist":
continue
#print(structure)
pdb_id = structure.split("_")[-1]
pdb_info[pdb_id] = OrderedDict()
for key,val in vals.items():
if len(key)>3:
continue
if not key:
continue
if key[-1]!="b" and key[-1]!="e":
continue
pdb_info[pdb_id][key] = val
for structure,vals in structures.items():
entry = structure
pdb_info_all[entry] = OrderedDict()
for key,val in vals.items():
if len(key)>3:
continue
if not key:
continue
if key[-1]!="b" and key[-1]!="e":
continue
pdb_info_all[entry][key] = val
pdb_info = OrderedDict(sorted(pdb_info.items()))
with open(self.mod_xtal_seg_end_file, 'a') as outfile:
yaml.dump(pdb_info, outfile, indent=4)
pdb_info_all = OrderedDict(sorted(pdb_info_all.items()))
with open(self.xtal_seg_end_file, 'a') as outfile:
yaml.dump(pdb_info_all, outfile, indent=4)
def parse_excel(self,path):
workbook = xlrd.open_workbook(path)
worksheets = workbook.sheet_names()
d = {}
for worksheet_name in worksheets:
if worksheet_name in d:
print('Error, worksheet with this name already loaded')
continue
d[worksheet_name] = OrderedDict()
worksheet = workbook.sheet_by_name(worksheet_name)
num_rows = worksheet.nrows - 1
num_cells = worksheet.ncols - 1
curr_row = 0 #skip first, otherwise -1
headers = []
for i in range(num_cells):
h = worksheet.cell_value(0, i)
if h=="":
#replace header with index if empty
h = "i_"+str(i)
if h in headers:
# print('already have ',h)
h += "_"+str(i)
# print(h)
headers.append(worksheet.cell_value(0, i))
for curr_row in range(1,num_rows+1):
row = worksheet.row(curr_row)
key = worksheet.cell_value(curr_row, 0)
if key=='':
#in case there is no key for whatever reason
# print("no key!")
continue
# if key in d[worksheet_name]:
# print(key, "already in",worksheet_name)
d[worksheet_name][key] = OrderedDict()
temprow = {}
for curr_cell in range(num_cells):
# cell_type = worksheet.cell_type(curr_row, curr_cell)
cell_value = worksheet.cell_value(curr_row, curr_cell)
# temprow.append(cell_value)
if headers[curr_cell] not in d[worksheet_name][key]:
#do not overwrite
d[worksheet_name][key][headers[curr_cell]] = cell_value
# if curr_row>2: break
return d
def analyse_annotation_consistency(self):
NonXtal = self.data["Bulges_Constr_NonXtal_GPCRdb#"]
Xtal = self.data["Bulges_Constr_Xtal_GPCRdb#"]
output = {}
counting_xtal = {}
counting_non_xtal = {}
for entry_protein,vals in NonXtal.items():
anomalies=[]
anomalies_xtal=[]
for key,val in vals.items():
if "x" in val and "_" not in val:
if val.index("x") in [1,2]:
anomalies.append(val)
if vals['Xtal Templ'] in Xtal:
#print(Xtal[vals['Xtal Templ']])
for key,val in Xtal[vals['Xtal Templ']].items():
if "x" in val and "_" not in val:
if val.index("x") in [1,2]:
anomalies_xtal.append(val)
if entry_protein==vals['Xtal Templ']:
list1 = list(set(anomalies) - set(anomalies_xtal))
list2 = list(set(anomalies_xtal) - set(anomalies))
if list1 or list2:
for i in list1:
if i not in counting_non_xtal:
counting_non_xtal[i] = 0
counting_non_xtal[i] += 1
for i in list2:
if i not in counting_xtal:
counting_xtal[i] = 0
counting_xtal[i] += 1
#print("ISSUE!")
#print(entry_protein)
#print("NonXtal_anomalies",anomalies,"Xtal_anomalies",anomalies_xtal)
if list1: print(entry_protein,vals['Xtal Templ'],"Present in non-xtal, but not xtal",list1)
if list2: print(entry_protein,vals['Xtal Templ'],"Present in xtal, but not non-xtal",list2)
print("Overall")
print("Present in non-xtal, but not xtal",counting_xtal)
print("Present in xtal, but not non-xtal",counting_non_xtal)
structures = self.data["SegEnds_Xtal_Prot#"]
structures_non_xtal = self.data["SegEnds_NonXtal_Prot#"]
info = {}
for structure,vals in structures.items():
if structure.split("_")[-1] == "wt":
# print(structure)
entry = vals['UniProt']
info[entry] = {}
for key,val in vals.items():
# print(val,key)
if len(key)>3:
continue
if not key:
continue
if key[-1]!="b" and key[-1]!="e":
continue
info[entry][key] = val
if structures_non_xtal[entry][key]!=val:
print("error with ",entry,key,"Xtal sheet:",val,"NonXtal sheet:",structures_non_xtal[entry][key])
print(structures_non_xtal[entry])
print(vals)
#print(structure,info)
# with open(self.xtal_seg_end_file, 'w') as outfile:
# yaml.dump(pdb_info, outfile)
def dump_files(self):
structures = self.data["SegEnds_Xtal_Prot#"]
pdb_info = {}
pdb_info_all = {}
for structure,vals in structures.items():
if structure.split("_")[-1] == "wt":
continue
if structure.split("_")[-1] == "dist":
continue
#print(structure)
pdb_id = structure.split("_")[-1]
pdb_info[pdb_id] = OrderedDict()
for key,val in vals.items():
if len(key)>3:
continue
if not key:
continue
if key[-1]!="b" and key[-1]!="e":
continue
pdb_info[pdb_id][key] = val
for structure,vals in structures.items():
entry = structure
pdb_info_all[entry] = OrderedDict()
for key,val in vals.items():
if len(key)>3:
continue
if not key:
continue
if key[-1]!="b" and key[-1]!="e":
continue
pdb_info_all[entry][key] = val
data = self.data["SegEnds_Xtal_BW#"]
Xtal_SegEnds_BW = {}
for structure,vals in data.items():
entry = structure
Xtal_SegEnds_BW[entry] = OrderedDict()
for key,val in vals.items():
if not key:
continue
if len(key)>3 and key[-1]!="b" and key[-1]!="e":
continue
Xtal_SegEnds_BW[entry][key] = val
data = self.data["SegEnds_NonXtal_BW#"]
NonXtal_SegEnds_BW = {}
for structure,vals in data.items():
entry = structure
NonXtal_SegEnds_BW[entry] = OrderedDict()
for key,val in vals.items():
if not key:
continue
if len(key)>3 and key[-1]!="b" and key[-1]!="e" and key!="XtalTempl":
continue
NonXtal_SegEnds_BW[entry][key] = val
data = self.data["SegEnds_NonXtal_Prot#"]
NonXtal_SegEnds_Prot = {}
for structure,vals in data.items():
entry = structure
NonXtal_SegEnds_Prot[entry] = OrderedDict()
for key,val in vals.items():
if not key:
continue
if len(key)>3 and key[-1]!="b" and key[-1]!="e" and key!="Xtal Templ":
continue
NonXtal_SegEnds_Prot[entry][key] = val
# data = self.data["Bulges_Constr_Xtal_GPCRdb#"]
# Xtal_Bulges_Constr_GPCRdb = {}
# for structure,vals in data.items():
# entry = structure
# Xtal_Bulges_Constr_GPCRdb[entry] = OrderedDict()
# for key,val in vals.items():
# if not key:
# continue
# Xtal_Bulges_Constr_GPCRdb[entry][key] = val
data = self.data["Bulges_Constr_NonXtal_GPCRdb#"]
NonXtal_Bulges_Constr_GPCRdb = {}
for structure,vals in data.items():
entry = structure
NonXtal_Bulges_Constr_GPCRdb[entry] = OrderedDict()
for key,val in vals.items():
if not key:
continue
NonXtal_Bulges_Constr_GPCRdb[entry][key] = val
data = self.data["Seqs"]
Seqs = {}
for structure,vals in data.items():
entry = structure
Seqs[entry] = OrderedDict()
for key,val in vals.items():
if not key:
continue
Seqs[entry][key] = val
pdb_info = OrderedDict(sorted(pdb_info.items()))
with open(self.mod_xtal_seg_end_file, 'w') as outfile:
yaml.dump(pdb_info, outfile, indent=4)
pdb_info_all = OrderedDict(sorted(pdb_info_all.items()))
with open(self.xtal_seg_end_file, 'w') as outfile:
yaml.dump(pdb_info_all, outfile, indent=4)
Xtal_SegEnds_BW = OrderedDict(sorted(Xtal_SegEnds_BW.items()))
with open(self.xtal_seg_end_bw_file, 'w') as outfile:
yaml.dump(Xtal_SegEnds_BW, outfile, indent=4)
NonXtal_SegEnds_BW = OrderedDict(sorted(NonXtal_SegEnds_BW.items()))
with open(self.non_xtal_seg_end_bw_file, 'w') as outfile:
yaml.dump(NonXtal_SegEnds_BW, outfile, indent=4)
NonXtal_SegEnds_Prot = OrderedDict(sorted(NonXtal_SegEnds_Prot.items()))
with open(self.non_xtal_seg_end_file, 'w') as outfile:
yaml.dump(NonXtal_SegEnds_Prot, outfile, indent=4)
# Xtal_Bulges_Constr_GPCRdb = OrderedDict(sorted(Xtal_Bulges_Constr_GPCRdb.items()))
# with open(self.xtal_anomalities_file, 'w') as outfile:
# yaml.dump(Xtal_Bulges_Constr_GPCRdb, outfile, indent=4)
NonXtal_Bulges_Constr_GPCRdb = OrderedDict(sorted(NonXtal_Bulges_Constr_GPCRdb.items()))
with open(self.all_anomalities_file, 'w') as outfile:
yaml.dump(NonXtal_Bulges_Constr_GPCRdb, outfile, indent=4)
Seqs = OrderedDict(sorted(Seqs.items()))
with open(self.sequence_file, 'w') as outfile:
yaml.dump(Seqs, outfile, indent=4)
def main_template_search(self):
output_csv = ''
changes = {}
counter = 0
for protein, values in self.data['Xtal_Templ'].items():
values = self.data['Xtal_Templ'][protein]
crh = ClosestReceptorHomolog(protein)
closest_hom = crh.find_closest_receptor_homolog()
if values['Template']!=closest_hom.entry_name:
changes[protein] = [values['Template'], closest_hom.entry_name]
output_csv+='{},{}\n'.format(protein, closest_hom.entry_name)
counter+=1
with open(os.sep.join([settings.DATA_DIR,'structure_data','annotation','xtal_templates.csv']),'w') as f:
f.write(output_csv)
if len(changes)>0:
print('Changed {} entries out of {} (reference: [changed_from, changed_to]):'.format(len(changes), counter))
print(changes)
print('INFO: xtal_templates.csv file updated. Please update Structural_Annotation.xlsx Xtal_Templ sheet with this .csv')
return changes
def find_representatives(self):
grouped = {}
counter = 0
xtals, nums, states, resolutions = [], [], [], []
out = OrderedDict()
exceptions = ['4L6R']
with open(os.sep.join([settings.DATA_DIR,'structure_data','annotation','xtal_representatives.yaml']), 'w') as outfile:
for key, values in self.data['SegEnds_Xtal_Prot#'].items():
if counter==0:
prev_rec = values['UniProt']
counter+=1
if values['PDB']=='_wt' or 'dist' in key:
continue
if values['Repr']!='-':
if values['Repr']=='Repr_Act':
actstat = 'Active'
elif values['Repr']=='Repr_Inter':
actstat = 'Intermediate'
elif values['Repr']=='Repr_Inact':
actstat = 'Inactive'
out[values['PDB']] = actstat
yaml.dump(out, outfile, default_flow_style=False)
# if prev_rec!=values['UniProt'] or counter==len(self.data['SegEnds_Xtal_Prot#']):
# if counter==len(self.data['SegEnds_Xtal_Prot#']):
# xtals.append(key)
# nums.append(values['#Res'])
# states.append(values['State'])
# resolutions.append(values['Resolution'])
# if len(xtals)>0:
# max_num_ia, max_x_ia, max_num_a, max_x_a, ia_count, a_count = 0, 0, 0, 0, 0, 0
# for x, n, s, r in zip(xtals, nums, states, resolutions):
# if s=='Inact':
# if ia_count==0:
# max_res_ia = r
# if n>max_num_ia and x[-4:] not in exceptions:
# max_num_ia = n
# max_x_ia = x
# max_res_ia = r
# elif n==max_num_ia and x[-4:] not in exceptions:
# if r<max_res_ia:
# max_num_ia = n
# max_x_ia = x
# max_res_ia = r
# ia_count+=1
# elif s=='Act':
# if a_count==0:
# max_res_a = r
# if n>max_num_a and x[-4:] not in exceptions:
# max_num_a = n
# max_x_a = x
# elif n==max_num_a and x[-4:] not in exceptions:
# if r<max_res_a:
# max_num_a = n
# max_x_a = x
# max_res_a = r
# a_count+=1
# for x, n in zip(xtals, nums):
# if x==max_x_ia:
# out[x] = 'Repr_Inact'
# elif x==max_x_a:
# out[x] = 'Repr_Act'
# else:
# out[x] = '-'
# yaml.dump(out, outfile, indent=4)
# xtals, nums, states, resolutions = [], [], [], []
# out = OrderedDict()
# xtals.append(key)
# nums.append(values['#Res'])
# states.append(values['State'])
# resolutions.append(values['Resolution'])
# else:
# xtals.append(key)
# nums.append(values['#Res'])
# states.append(values['State'])
# resolutions.append(values['Resolution'])
# prev_rec = values['UniProt']
| apache-2.0 | 7,577,168,397,185,332,000 | 42.090573 | 132 | 0.506992 | false |
bwhite/picarus | server/holding/faces.py | 1 | 2228 | elif action == 'i/faces':
# TODO: Temporary, remove when done
names = set(['George_W_Bush', 'Colin_Powell', 'Tony_Blair', 'Donald_Rumsfeld', 'Gerhard_Schroeder',
'Ariel_Sharon', 'Hugo_Chavez', 'Junichiro_Koizumi', 'Serena_Williams', 'John_Ashcroft'])
self._slice_validate(start_row, stop_row, 'r')
import cv2
r = None
labels = {}
pos = 0
neg = 0
data = []
lab = []
num_train = 2000
for n, (cur_row, cur_cols) in enumerate(hadoopy_hbase.scanner(thrift, self.table,
start_row=start_row, per_call=10,
stop_row=stop_row, columns=['data:image', 'meta:class'])):
cur_class = cur_cols['meta:class']
if cur_class not in names:
continue
if cur_class not in labels:
labels[cur_class] = len(labels)
label = labels[cur_class]
image = cv2.imdecode(np.fromstring(cur_cols['data:image'], np.uint8), 0)
# Crop
image = np.ascontiguousarray(image[62:-62, 62:-62])
#if n == 0:
# cv2.imwrite('out.png', image)
if n < num_train:
lab.append(label)
data.append(image)
else:
if r is None:
r = cv2.createLBPHFaceRecognizer()
r.train(data, np.array(lab))
print('TRAINED-----------------------')
pred = r.predict(image)[0]
print((pred, label))
if pred == label:
pos += 1
else:
neg += 1
print((cur_class, image.shape, n, pos, neg, pos / float(pos + neg + .00000001)))
| apache-2.0 | -8,709,221,514,537,708,000 | 52.047619 | 136 | 0.374327 | false |
ToFuProject/tofu | tofu/tests/tests02_data/test_04_core_new.py | 1 | 23127 |
"""
This module contains tests for tofu.geom in its structured version
"""
# Built-in
import os
import warnings
# Standard
import numpy as np
import scipy.constants as scpct
import matplotlib.pyplot as plt
# tofu-specific
from tofu import __version__
import tofu.data as tfd
import tofu.utils as tfu
_here = os.path.abspath(os.path.dirname(__file__))
VerbHead = 'tofu.data.DataCollection'
#######################################################
#
# Setup and Teardown
#
#######################################################
def setup_module(module):
print("") # this is to get a newline after the dots
LF = os.listdir(_here)
lss = ['TFD_', 'Test', '.npz']
LF = [lf for lf in LF if all([ss in lf for ss in lss])]
LF = [
lf for lf in LF
if not lf[lf.index('_Vv')+2:lf.index('_U')] == __version__
]
print("Removing the following previous test files:")
print(LF)
for lf in LF:
os.remove(os.path.join(_here, lf))
# print("setup_module before anything in this file")
def teardown_module(module):
# os.remove(VesTor.Id.SavePath + VesTor.Id.SaveName + '.npz')
# os.remove(VesLin.Id.SavePath + VesLin.Id.SaveName + '.npz')
# print("teardown_module after everything in this file")
# print("") # this is to get a newline
LF = os.listdir(_here)
lss = ['TFD_', 'Test', '.npz']
LF = [lf for lf in LF if all([ss in lf for ss in lss])]
LF = [
lf for lf in LF
if lf[lf.index('_Vv')+2:lf.index('_U')] == __version__
]
print("Removing the following test files:")
print(LF)
for lf in LF:
os.remove(os.path.join(_here, lf))
pass
# def my_setup_function():
# print ("my_setup_function")
# def my_teardown_function():
# print ("my_teardown_function")
# @with_setup(my_setup_function, my_teardown_function)
# def test_numbers_3_4():
# print 'test_numbers_3_4 <============================ actual test code'
# assert multiply(3,4) == 12
# @with_setup(my_setup_function, my_teardown_function)
# def test_strings_a_3():
# print 'test_strings_a_3 <============================ actual test code'
# assert multiply('a',3) == 'aaa'
#######################################################
#
# Creating Ves objects and testing methods
#
#######################################################
class Test01_DataCollection(object):
@classmethod
def setup_class(cls, Name='data1', SavePath='./', verb=False):
# time vectors
t0 = np.linspace(0, 10, 10)
t1 = np.linspace(0, 10, 50)
t2 = np.linspace(0, 10, 100)
cls.lt = [t0, t1, t2]
# radii vectors
r0 = np.linspace(0, 1, 10)
r1 = np.linspace(0, 1, 50)
r2 = np.linspace(0, 1, 200)
cls.lr = [r0, r1, r2]
# chan
ch0 = np.arange(0, 2)
ch1 = np.arange(0, 5)
cls.lch = [ch0, ch1]
# meshes
mesh0 = {
'type': 'rect',
'R': np.r_[0, 1, 2, 3],
'Z': np.r_[0, 1, 2],
'shapeRZ': ('R', 'Z'),
}
mesh1 = {
'type': 'tri',
'nodes': np.array([[0, 1, 1, 0], [0, 0, 1, 1]]).T,
'faces': np.array([[0, 1, 2], [2, 3, 0]]),
}
cls.lmesh = [mesh0, mesh1]
# traces
trace00, trace01 = 2.*t0, np.sin(t0)
trace10, trace11 = np.cos(t1), t1[:, None]*t0
trace20, trace21 = np.sin(r0), r0[:, None]*r1
trace30, trace31 = np.cos(r2), t0[:, None]*np.sin(r2)
trace40 = t2[:, None, None]*r1[None, :, None]*ch0[None, None, :]
trace41 = t2[None, None, :]*r2[:, None, None]*ch1[None, :, None]
trace50 = np.cos(t0)[:, None, None]*(
mesh0['R'][None, :, None]*mesh0['Z'][None, None, :]
)
trace51 = t1[:, None]*(mesh1['faces'][:, 0:1]).T
cls.ltrace = [trace00, trace01, trace10, trace11,
trace20, trace21, trace30, trace31,
trace40, trace41, trace50, trace51]
# Anisotropic rect + tri
BRr = np.cos(t1)[:, None, None]*(
mesh0['R'][None, :, None]*mesh0['Z'][None, None, :]
)
BPhir = np.cos(t1)[:, None, None]*(
mesh0['R'][None, :, None]*mesh0['Z'][None, None, :]
)
BZr = np.cos(t1)[:, None, None]*(
mesh0['R'][None, :, None]*mesh0['Z'][None, None, :]
)
BRt = t0[:, None]*(mesh1['faces'][:, 0:1]).T
BPhit = t0[:, None]*(mesh1['faces'][:, 0:1]).T
BZt = t0[:, None]*(mesh1['faces'][:, 0:1]).T
cls.lB = [BRr, BPhir, BZr, BRt, BPhit, BZt]
# polygons
lpoly0 = [np.ones((2, 5)), np.ones((2, 8))]
lpoly1 = [np.ones((2, 5)), np.ones((2, 8)), np.ones((2, 5))]
lpoly2 = [np.ones((2, 5)), np.ones((2, 5))]
cls.lpoly = [lpoly0, lpoly1, lpoly2]
# spectral lines
l0 = {
'key': 'l0', 'lambda0': 5e-10,
'origin': '[1]', 'transition': 'A->B',
}
l1 = {
'key': 'l1', 'lambda0': 5e-10,
'origin': '[2]', 'transition': 'B->C',
}
l2 = {
'key': 'l2',
'data': t0[:, None]*t1[None, :], 'ref': ('t0', 't1'),
'lambda0': 5e-10, 'origin': '[2]', 'transition': 'B->C'
}
cls.llines = [l0, l1, l2]
# Configs
# conf0 = tfg.utils.create_config(case='B2')
# conf1 = tfg.utils.create_config(case='B3')
dref = {
't0': {
'data': cls.lt[0], 'group': 'time', 'units': 's',
'quant': 'time',
},
't1': {
'data': cls.lt[1], 'group': 'time', 'units': 'min',
'quant': 'time',
},
'r2': {
'data': cls.lr[2],
'group': 'radius', 'units': 'm', 'quant': 'rho',
},
'mesh0': {
'data': mesh0,
},
'mesh1': {
'data': mesh1,
},
}
ddata = {
'trace00': {'data': cls.ltrace[0], 'ref': ('t0',)},
'trace10': {'data': cls.ltrace[2], 'ref': ('t1',), 'units': 's'},
'trace11': {'data': cls.ltrace[3], 'ref': ('t1', 't0')},
'trace30': {'data': cls.ltrace[6], 'ref': ('r2',)},
'trace31': {'data': cls.ltrace[7], 'ref': ('t0', 'r2')},
'trace50': {'data': trace50, 'ref': ('t0', 'mesh0')},
'BRr': {'data': BRr, 'ref': ('t1', 'mesh0'), 'quant': 'BR'},
'BPhir': {'data': BPhir, 'ref': ('t1', 'mesh0'), 'quant': 'BPhi'},
'BZr': {'data': BZr, 'ref': ('t1', 'mesh0'), 'quant': 'BZ'},
'BRt': {'data': BRt, 'ref': ('t0', 'mesh1'), 'quant': 'BR'},
'BPhit': {'data': BPhit, 'ref': ('t0', 'mesh1'), 'quant': 'BPhi'},
'BZt': {'data': BZt, 'ref': ('t0', 'mesh1'), 'quant': 'BZ'},
}
data = tfd.DataCollection(dref=dref, ddata=ddata, Name=Name)
# Spectrallines
dref = {
't0': {'data': cls.lt[0], 'group': 'time', 'units': 's'},
't1': {'data': cls.lt[1], 'group': 'time', 'units': 'min'},
}
dref_static = {
'source': {
'[1]': {'long': 'blabla'},
'[2]': {'long': 'blibli'},
},
'ion': {
'O3+': {'element': 'O'},
'Ca6+': {'element': 'Ca'},
},
}
dobj = {
'lines': {
'l0': {
'lambda0': 3e-10, 'source': '[1]', 'transition': 'A->B',
},
'l1': {
'lambda0': 4e-10, 'source': '[2]', 'transition': 'B->C',
},
'l2': {
'data': t0[:, None]*t1[None, :], 'ref': ('t0', 't1'),
'lambda0': 5e-10, 'source': '[2]', 'transition': 'B->C',
},
}
}
sl = tfd.DataCollection()
sl._data_none = True
sl.update(dref=dref, dref_static=dref_static, dobj=dobj)
cls.lobj = [data, sl]
@classmethod
def setup(self):
pass
def teardown(self):
pass
@classmethod
def teardown_class(cls):
pass
def test01_init_from_combinations(self):
# Try with minimalist input (implicit with n = 1)
dgroup = 'time'
dref = {'t0': self.lt[0]}
ddata = {
'trace00': self.ltrace[0],
'trace01': {'data': self.ltrace[1], 'units': 'a.u.'},
}
data = tfd.DataCollection(
dgroup=dgroup, dref=dref, ddata=ddata,
Name='data',
)
# Try with minimalist input
dref = {'t0': {'data': self.lt[0], 'group': 'time'},
't1': {'data': self.lt[1], 'group': 'time', 'units': 's'},
'r2': {'data': self.lr[2], 'group': 'radius', 'foo': 'bar'}}
ddata = {
'trace00': {'data': self.ltrace[0], 'ref': 't0'},
'trace10': {'data': self.ltrace[2], 'ref': 't1', 'units': 'a'},
'trace11': {'data': self.ltrace[3], 'ref': ('t1', 't0')},
'trace30': {'data': self.ltrace[6], 'ref': ('r2',), 'foo': 'bar'},
'trace31': {'data': self.ltrace[7], 'ref': ('t0', 'r2')}
}
data = tfd.DataCollection(
dgroup=None, dref=dref, ddata=ddata,
Name='data',
)
# Try with meshes
dref = {
't0': {'data': self.lt[0], 'group': 'time', 'units': 's'},
't1': {'data': self.lt[1], 'group': 'time', 'units': 's'},
'r2': {'data': self.lr[2], 'group': 'radius', 'foo': 'bar'},
'mesh1': {'data': self.lmesh[1], 'foo': 'bar', 'quant': 'rho'},
}
ddata = {
'trace10': {'data': self.ltrace[2], 'ref': 't1', 'units': 'a'},
'trace50': {'data': self.ltrace[-2], 'ref': ('t0', 'mesh0')},
'trace51': {'data': self.ltrace[-1], 'ref': ('t1', 'mesh1')},
'mesh0': {'data': self.lmesh[0], 'foo': 'bar', 'group': 'mesh2d'},
}
data = tfd.DataCollection(
dref=dref, ddata=ddata,
Name='data',
)
# Try with lines
data = tfd.DataCollection()
data.add_data(**self.llines[0])
data.add_data(**self.llines[1])
data.add_ref(key='t0', data=self.lt[0], group='ne')
data.add_ref(key='t1', data=self.lt[1], group='Te')
data.add_data(**self.llines[2])
def test02_wrong_init(self):
# Try with minimalist input
dref = {
't0': {'data': self.lt[0], 'group': 'time'},
't1': {'data': self.lt[1], 'group': 'time'},
}
ddata = {
'trace00': self.ltrace[0], 'ref': 't0',
'trace11': self.ltrace[3], 'ref': ('t0', 't1'),
}
err = False
try:
data = tfd.DataCollection(
dgroup=None, dref=dref, ddata=ddata,
Name='data',
)
except Exception as er:
err = True
assert err, "Exception was not detected properly!"
def test03_add_remove_refdataobj(self):
data = self.lobj[0]
data.add_ref(key='r0', data=self.lr[0], group='radius', foo='bar')
assert 'r0' in data.dref.keys()
data.remove_ref(key='t0')
assert 't0' not in data.dref.keys()
assert 't0' not in data.ddata.keys()
assert all([tt not in data.ddata.keys()
for tt in ['trace00', 'trace11', 'trace31']])
data.add_ref('t0', data=self.lt[0], group='time')
assert 't0' in data.dref.keys()
# Check ambiguous throws error
err = False
try:
data.add_data(key='trace00', data=self.ltrace[0])
except Exception:
err = True
assert err
data.add_data('trace00', data=self.ltrace[0], ref=('t0',))
data.add_data('trace11', data=self.ltrace[3], ref=('t1', 't0'))
data.add_data('trace31', data=self.ltrace[7], ref=('t0', 'r2'),
foo='bar')
assert all([tt in data.ddata.keys()
for tt in ['trace00', 'trace11', 'trace31']])
# Add/remove mesh
data.add_ref(key='mesh0', data=self.lmesh[0], group='mesh2d')
data.add_data(
key='trace51', data=self.ltrace[-1],
ref=('t1', 'mesh1'), quant='rho',
)
data.add_data(
key='BRt', data=self.lB[3], ref=('t0', 'mesh1'),
quant='BR', units='T',
)
data.add_data(
key='BPhit', data=self.lB[4], ref=('t0', 'mesh1'),
quant='BPhi', units='T',
)
data.add_data(
key='BZt', data=self.lB[5], ref=('t0', 'mesh1'),
quant='BZ', units='T',
)
# Add / remove obj and ref_static
self.lobj[1].add_ref_static(key='[3]', which='source', long='bloblo')
self.lobj[1].add_obj(
which='lines', key='l3',
lambda0=5e-10, source='[3]', transition='C->D',
)
self.lobj[1].remove_obj(key='l3')
self.lobj[1].remove_ref_static(key='[3]')
self.lobj[1].remove_ref_static(which='ion')
def test04_select(self):
data = self.lobj[0]
key = data.select(which='data', units='s', returnas=str)
assert key == ['trace10']
out = data.select(units='a.u.', returnas=int)
assert len(out) == 12, out
# test quantitative param selection
out = self.lobj[1].select(which='lines', lambda0=[3.5e-10, 6e-10])
assert len(out) == 2
out = self.lobj[1].select(which='lines', lambda0=(3.5e-10, 6e-10))
assert len(out) == 1
def test05_sortby(self):
for oo in self.lobj:
oo.sortby(which='data', param='units')
def test06_get_summary(self):
for oo in self.lobj:
oo.get_summary()
def test07_getsetaddremove_param(self):
data = self.lobj[0]
out = data.get_param('units')
data.set_param('units', value='T', key='trace00')
data.add_param('shot', value=np.arange(0, len(data.ddata)))
assert np.all(
data.get_param('shot')['shot'] == np.arange(0, len(data.ddata))
)
data.remove_param('shot')
assert 'shot' not in data.get_lparam(which='data')
def test08_switch_ref(self):
data = self.lobj[0]
data.switch_ref('trace00')
# Check t0 removed
assert 'trace00' in data.dref.keys()
assert 'trace00' in data.dgroup['time']['lref']
assert all(['trace00' in v0['ref'] for k0, v0 in data.ddata.items()
if k0 in data.dref['trace00']['ldata']])
# Check t0 removed
assert 't0' not in data.dref.keys()
assert 't0' not in data.dgroup['time']['lref']
assert all(['t0' not in v0['ref'] for k0, v0 in data.ddata.items()
if k0 in data.dref['trace00']['ldata']])
# .. but still in data
assert 't0' in data.ddata.keys()
def test09_convert_spectral(self):
coef, inv = self.lobj[0].convert_spectral(
units_in='eV', units_out='J', returnas='coef',
)
assert coef == scpct.e and inv is False
coef, inv = self.lobj[0].convert_spectral(
units_in='nm', units_out='keV', returnas='coef',
)
assert coef == (0.001*(1/scpct.e)*scpct.h*scpct.c / 1.e-9)
assert inv is True
data = [[0], [1], [2], [3]]
out = self.lobj[0].convert_spectral(
data=data,
units_in='A', units_out='MHz',
)
assert out.shape == (4, 1)
# ------------------------
# Interpolation tools
# ------------------------
def test10_check_qr12RPZ(self):
data = self.lobj[0]
# Directly get 2d quant
out = data._check_qr12RPZ(
quant='mesh0', ref1d=None, ref2d=None,
q2dR=None, q2dPhi=None, q2dZ=None,
)
assert (
out[0] == 'mesh0'
and all([out[ii] is None for ii in [1, 2, 3, 4, 5]])
)
out = data._check_qr12RPZ(
quant='trace51', ref1d=None, ref2d=None,
q2dR=None, q2dPhi=None, q2dZ=None,
)
assert (
out[0] == 'trace51'
and all([out[ii] is None for ii in [1, 2, 3, 4, 5]])
)
# Get 1d quant
out = data._check_qr12RPZ(
quant='r2', ref1d='r2', ref2d='trace51',
q2dR=None, q2dPhi=None, q2dZ=None,
)
assert (
out[0] == 'r2' and out[1] == 'r2' and out[2] == 'trace51'
and all([out[ii] is None for ii in [3, 4, 5]])
)
# Get 1d quant from 2d
out = data._check_qr12RPZ(
quant='trace30', ref1d='r2', ref2d='trace51',
q2dR=None, q2dPhi=None, q2dZ=None,
)
assert (
out[0] == 'trace30' and out[1] == 'r2' and out[2] == 'trace51'
and all([out[ii] is None for ii in [3, 4, 5]])
)
# Get 1d quant from 2d
out = data._check_qr12RPZ(
quant='trace30', ref1d=None, ref2d=None,
q2dR=None, q2dPhi=None, q2dZ=None,
)
assert (
out[0] == 'trace30' and out[1] == 'r2' and out[2] == 'trace51'
and all([out[ii] is None for ii in [3, 4, 5]])
)
def test11_interp_pts2d_to_quant1d(self):
data = self.lobj[0]
# Not specifying points
val, dout = data._interp_pts2d_to_quant1d(quant='trace30')
# Specifying wrong ref2d
error = False
try:
val, dout = data._interp_pts2d_to_quant1d(
quant='trace30', ref2d='mesh1',
)
except Exception as err:
error = err
assert isinstance(error, Exception) and 'Non-valid' in str(error)
# Specifying points and time
pts = np.copy(dout['pts'])*0.5
val, dout = data._interp_pts2d_to_quant1d(
quant='trace30', pts=pts, t=np.linspace(1, 5, 10),
)
# Not specifying points
val, t = data._interp_pts2d_to_quant1d(quant='trace30')
# anisotropic rect with time
pts = data._get_pts_from_mesh(key='mesh0')
vect = pts
val, t = data._interp_pts2d_to_quant1d(
q2dR='BRr', q2dZ='BZr', q2dPhi='BPhir',
pts=pts, vect=vect, t=np.linspace(2, 5, 10),
)
# anisotropic tri
pts = data._get_pts_from_mesh(key='mesh1')
vect = pts
val, t = data._interp_pts2d_to_quant1d(
q2dR='BRt', q2dZ='BZt', q2dPhi='BPhit',
pts=pts, vect=vect,
)
# ------------------------
# Generic TofuObject methods
# ------------------------
def test20_copy_equal(self):
for oo in self.lobj:
obj = oo.copy()
assert obj == oo
def test21_get_nbytes(self):
for oo in self.lobj:
nb, dnb = oo.get_nbytes()
def test22_strip_nbytes(self, verb=False):
lok = self.lobj[0].__class__._dstrip['allowed']
nb = np.full((len(lok),), np.nan)
for oo in self.lobj:
for ii in lok:
oo.strip(ii, verb=verb)
nb[ii] = oo.get_nbytes()[0]
assert np.all(np.diff(nb) <= 0.), nb
for ii in lok[::-1]:
oo.strip(ii, verb=verb)
def test23_saveload(self, verb=False):
for oo in self.lobj:
if oo.Id.Name is None:
try:
pfe = oo.save(deep=False, verb=verb, return_pfe=True)
except Exception as err:
pass
else:
pfe = oo.save(deep=False, verb=verb, return_pfe=True)
obj = tfu.load(pfe, verb=verb)
# Just to check the loaded version works fine
assert oo == obj
os.remove(pfe)
# #############################################################################
# #############################################################################
# Specific to SpectralLines
# #############################################################################
class Test02_SpectralLines(object):
@classmethod
def setup_class(cls, Name='data1', SavePath='./', verb=False):
cls.sl = tfd.SpectralLines.from_openadas(
lambmin=3.94e-10,
lambmax=4e-10,
element=['Ar', 'W'],
)
@classmethod
def setup(self):
pass
def teardown(self):
pass
@classmethod
def teardown_class(cls):
pass
def test01_add_from_openadas(self):
lines = self.sl.dobj['lines']
self.sl.add_from_openadas(
lambmin=3.90e-10,
lambmax=3.96e-10,
element='W',
)
assert all([k0 in self.sl.dobj['lines'].keys() for k0 in lines.keys()])
def test02_sortby(self):
self.sl.sortby(param='lambda0', which='lines')
self.sl.sortby(param='ion', which='lines')
def test03_convert_lines(self):
self.sl.convert_lines(units='Hz')
def test04_calc_pec(self):
ne = np.r_[1e15, 1e18, 1e21]
Te = np.r_[1e3, 2e3, 3e3, 4e3, 5e3]
dpec = self.sl.calc_pec(ne=ne, Te=Te[:ne.size], grid=False)
dpec = self.sl.calc_pec(
key='Ar16_9_oa_pec40_cl', ne=ne, Te=Te[:ne.size], grid=False,
)
dpec = self.sl.calc_pec(ne=ne, Te=Te, grid=True)
dpec = self.sl.calc_pec(
key='Ar16_9_oa_pec40_cl', ne=ne, Te=Te[:ne.size], grid=False,
)
def test05_calc_intensity(self):
ne = np.r_[1e15, 1e18, 1e21]
Te = np.r_[1e3, 2e3, 3e3, 4e3, 5e3]
concentration = np.r_[0.1, 0.2, 0.3]
dint = self.sl.calc_intensity(
ne=ne, Te=Te[:ne.size], concentration=concentration, grid=False,
)
key = ['Ar16_9_oa_pec40_cl']
concentration = {k0: np.r_[0.1, 0.2, 0.3] for k0 in key}
dint = self.sl.calc_intensity(
key=key,
ne=ne, Te=Te[:ne.size], concentration=concentration, grid=False,
)
concentration = np.random.random((ne.size, Te.size))
dint = self.sl.calc_intensity(
ne=ne, Te=Te, concentration=concentration, grid=True,
)
key = ['Ar16_9_oa_pec40_cl']
concentration = {k0: concentration for k0 in key}
dint = self.sl.calc_intensity(
key=key,
ne=ne, Te=Te, concentration=concentration, grid=True,
)
def test06_plot(self):
ax = self.sl.plot()
plt.close('all')
def test07_plot_pec_single(self):
Te = 1.e3
ne = 1.e20
ax = self.sl.plot_pec_single(Te=Te, ne=ne)
def test08_plot_pec(self):
Te = np.linspace(1, 7, 7)*1e3
ne = np.logspace(15, 21, 7)
ax = self.sl.plot_pec(Te=1e3, ne=ne)
ax = self.sl.plot_pec(Te=Te, ne=1e19)
ax = self.sl.plot_pec(Te=Te, ne=ne)
plt.close('all')
| mit | 2,179,586,283,554,658,800 | 31.897582 | 79 | 0.469235 | false |
trondkr/OceanLight | IOwrite.py | 1 | 2407 | from datetime import datetime, timedelta
from netCDF4 import Dataset
from netCDF4 import num2date
import numpy as np
import time
import os
__author__ = 'Trond Kristiansen'
__email__ = '[email protected]'
__created__ = datetime(2014, 1, 23)
__modified__ = datetime(2014, 1, 23)
__version__ = "0.1"
__status__ = "Development"
def help ():
"""
This function generates a netCDF4 file and saves the runnings average values for
specific years into file for each IPCC AR5 model.
Used to gether with extractIce.py
"""
def writeCMIP5File(modelName,scenario,myvarname,lon,lat,time,mydata,mydataanomaly,outfilename):
myformat='NETCDF3_CLASSIC'
if os.path.exists(outfilename):
os.remove(outfilename)
print "Results written to netcdf file: %s"%(outfilename)
if myvarname=="sic": myvar="SIC"
f1 = Dataset(outfilename, mode='w', format=myformat)
f1.title = "IPCC AR5 %s"%(myvar)
f1.description = "IPCC AR5 running averages of %s for model %s for scenario %s"%(myvar,modelName,scenario)
f1.history = "Created " + str(datetime.now())
f1.source = "Trond Kristiansen ([email protected])"
f1.type = "File in NetCDF3 format created using iceExtract.py"
f1.Conventions = "CF-1.0"
"""Define dimensions"""
f1.createDimension('x', len(lon))
f1.createDimension('y', len(lat))
f1.createDimension('time', None)
vnc = f1.createVariable('longitude', 'd', ('x',),zlib=False)
vnc.long_name = 'Longitude'
vnc.units = 'degree_east'
vnc.standard_name = 'longitude'
vnc[:] = lon
vnc = f1.createVariable('latitude', 'd', ('y',),zlib=False)
vnc.long_name = 'Latitude'
vnc.units = 'degree_north'
vnc.standard_name = 'latitude'
vnc[:] = lat
v_time = f1.createVariable('time', 'd', ('time',),zlib=False)
v_time.long_name = 'Years'
v_time.units = 'Years'
v_time.field = 'time, scalar, series'
v_time[:]=time
v_temp=f1.createVariable('SIC', 'd', ('time', 'y', 'x',),zlib=False)
v_temp.long_name = "Sea-ice area fraction (%)"
v_temp.units = "%"
v_temp.time = "time"
v_temp.field="SIC, scalar, series"
v_temp.missing_value = 1e20
if myvarname=='sic':
f1.variables['SIC'][:,:,:] = mydata
f1.close()
| mit | -4,307,165,508,289,405,400 | 31.093333 | 111 | 0.607811 | false |
tkolhar/robottelo | robottelo/ui/role.py | 1 | 2544 | # -*- encoding: utf-8 -*-
"""Implements Roles UI."""
from robottelo.constants import FILTER
from robottelo.ui.base import Base, UIError
from robottelo.ui.locators import common_locators, locators, tab_locators
from robottelo.ui.navigator import Navigator
from selenium.webdriver.support.select import Select
class Role(Base):
"""Implements the CRUD functions for Roles."""
def navigate_to_entity(self):
"""Navigate to Role entity page"""
Navigator(self.browser).go_to_roles()
def _search_locator(self):
"""Specify locator for Role entity search procedure"""
return locators['roles.role']
def create(self, name):
"""Creates new Role with default permissions."""
self.click(locators['roles.new'])
if self.wait_until_element(locators['roles.name']):
self.find_element(locators['roles.name']).send_keys(name)
self.click(common_locators['submit'])
else:
raise UIError(
'Could not create new role "{0}"'.format(name)
)
def delete(self, name, really=True):
"""Delete existing role."""
self.delete_entity(
name,
really,
locators['roles.delete'],
locators['roles.dropdown'],
)
def update(self, name, new_name=None, add_permission=False,
resource_type=None, permission_list=None, organization=None):
"""Update role name/permissions/org."""
element = self.search(name)
if element is None:
raise UIError('Could not find role "{0}"'.format(name))
if new_name:
element.click()
if self.wait_until_element(locators['roles.name']):
self.field_update('roles.name', new_name)
if add_permission:
strategy, value = locators['roles.dropdown']
self.click((strategy, value % name))
self.click(locators['roles.add_permission'])
if resource_type:
Select(
self.find_element(
locators['roles.select_resource_type'])
).select_by_visible_text(resource_type)
if permission_list:
self.configure_entity(
permission_list, FILTER['role_permission'])
if organization:
self.click(tab_locators['roles.tab_org'])
self.configure_entity(organization, FILTER['role_org'])
self.click(common_locators['submit'])
| gpl-3.0 | -7,250,490,926,521,367,000 | 36.411765 | 76 | 0.584513 | false |
lehins/django-wepay | djwepay/models.py | 1 | 14141 | """All models are direct mappings to the WePay objects. By default only the
fields that correspond to the values returned from WePay lookup calls
(ex. `/account <https://www.wepay.com/developer/reference/account#lookup>`_) are
included in the models. All fields follow the rules outlined in `Storing Data
<https://www.wepay.com/developer/reference/storing_data>`_, unless otherwise
specified in object's documentation. For that reason values, which have there
names end with '_uri' (ex. ``account_uri``) are not included as model fields,
instead they are added as dynamic cached object properties, which are inherited
from Api objects defined in :mod:`djwepay.api`.
"""
from django.db import models
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from djwepay.api import *
from djwepay.fields import MoneyField
from djwepay.managers import *
from json_field import JSONField
__all__ = [
'App', 'User', 'Account', 'Checkout', 'Preapproval', 'Withdrawal',
'CreditCard', 'SubscriptionPlan', 'Subscription', 'SubscriptionCharge',
'get_wepay_model_name', 'get_wepay_model'
]
@python_2_unicode_compatible
class BaseModel(models.Model):
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
ordering = ['-date_created']
def save(self, *args, **kwargs):
''' On save, update timestamps '''
self.date_modified = timezone.now()
if not self.date_created:
self.date_created = self.date_modified
return super(BaseModel, self).save(*args, **kwargs)
def __str__(self):
return "%s: %s - %s" % (self._meta.verbose_name, self.pk, self.state)
class App(AppApi, BaseModel):
"""
This model stores all of the relevant WePay application information. Only one
instance of it at a time is supported per django application, which is
controlled by :ref:`WEPAY_APP_ID` setting.
"""
# fields returned with a lookup call
client_id = models.BigIntegerField(primary_key=True)
status = models.CharField(max_length=255)
state = models.CharField(max_length=255)
api_version = models.CharField(max_length=255)
theme_object = JSONField(null=True, blank=True)
gaq_domains = JSONField(null=True, blank=True)
# Administrative objects attached to account, they are null=True just
# for initialization of the App, but are required for proper functionality.
account = models.ForeignKey(
get_wepay_model_name('account'), related_name='apps', null=True,
help_text="Account attached to App where you can collect money.")
user = models.ForeignKey(
get_wepay_model_name('user'), related_name='apps', null=True,
help_text="Owner of this App")
client_secret = models.CharField(max_length=255)
objects = AppManager()
class Meta(BaseModel.Meta):
abstract = is_abstract('app')
db_table = 'djwepay_app'
verbose_name = 'WePay App'
class User(UserApi, BaseModel):
user_id = models.BigIntegerField(primary_key=True)
app = models.ForeignKey(
get_wepay_model_name('app'), related_name='users', null=True)
user_name = models.CharField(max_length=255)
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
email = models.EmailField(max_length=255)
state = models.CharField(max_length=255)
# access_token=None means it has been revoked.
access_token = models.CharField(null=True, max_length=255)
token_type = "BEARER"
expires_in = models.BigIntegerField(null=True, blank=True)
objects = UserManager()
class Meta(BaseModel.Meta):
abstract = is_abstract('user')
db_table = 'djwepay_user'
verbose_name = 'WePay User'
@property
def full_email(self):
return "%s <%s>" % (self.user_name, self.email)
class Account(AccountApi, BaseModel):
account_id = models.BigIntegerField(primary_key=True)
user = models.ForeignKey(
get_wepay_model_name('user'), related_name='accounts', null=True)
name = models.CharField(max_length=255)
state = models.CharField(max_length=255)
description = models.CharField(max_length=255)
reference_id = models.CharField(max_length=255, blank=True)
gaq_domains = JSONField(null=True, blank=True)
theme_object = JSONField(null=True, blank=True)
type = models.CharField(max_length=255)
create_time = models.BigIntegerField(null=True)
balances = JSONField(null=True, blank=True)
statuses = JSONField(null=True, blank=True)
action_reasons = JSONField(null=True, blank=True)
country = models.CharField(max_length=2)
currencies = JSONField(null=True, blank=True)
def _get_owner_user_id(self):
return self.user_id
def _set_owner_user_id(self, value):
if self.user is None or self.user_id != value:
try:
user = User.objects.get(user_id=value)
self.user = user
except User.DoesNotExist: pass
owner_user_id = property(_get_owner_user_id, _set_owner_user_id)
objects = AccountManager()
class Meta(BaseModel.Meta):
abstract = is_abstract('account')
db_table = 'djwepay_account'
verbose_name = 'WePay Account'
class Checkout(CheckoutApi, BaseModel):
checkout_id = models.BigIntegerField(primary_key=True)
account = models.ForeignKey(
get_wepay_model_name('account'), related_name='checkouts')
preapproval = models.ForeignKey(
get_wepay_model_name('preapproval'), related_name='checkouts', null=True)
state = models.CharField(max_length=255)
soft_descriptor = models.CharField(max_length=255)
short_description = models.CharField(max_length=255)
long_description = models.CharField(max_length=2047, blank=True)
currency = "USD"
amount = MoneyField(null=True)
fee = MoneyField(null=True)
gross = MoneyField(null=True)
app_fee = MoneyField(null=True)
fee_payer = models.CharField(max_length=255)
reference_id = models.CharField(max_length=255, blank=True)
payer_email = models.EmailField(max_length=255, blank=True)
payer_name = models.CharField(max_length=255, blank=True)
cancel_reason = models.CharField(max_length=255, blank=True)
refund_reason = models.CharField(max_length=255, blank=True)
auto_capture = models.BooleanField(default=True)
require_shipping = models.BooleanField(default=False)
shipping_address = JSONField(null=True)
tax = MoneyField(null=True)
amount_refunded = MoneyField(null=True)
amount_charged_back = MoneyField(null=True)
create_time = models.BigIntegerField(null=True)
mode = models.CharField(max_length=255)
objects = AccountObjectsManager()
class Meta(BaseModel.Meta):
abstract = is_abstract('checkout')
db_table = 'djwepay_checkout'
verbose_name = 'WePay Checkout'
class Preapproval(PreapprovalApi, BaseModel):
preapproval_id = models.BigIntegerField(primary_key=True)
app = models.ForeignKey(
get_wepay_model_name('app'), null=True, related_name='preapprovals')
account = models.ForeignKey(
get_wepay_model_name('account'), null=True, related_name='preapprovals')
short_description = models.CharField(max_length=255)
long_description = models.CharField(max_length=2047, blank=True)
currency = "USD"
amount = MoneyField(null=True)
fee_payer = models.CharField(max_length=255)
state = models.CharField(max_length=255)
app_fee = MoneyField(null=True)
period = models.CharField(max_length=255)
frequency = models.IntegerField(null=True)
start_time = models.BigIntegerField(null=True)
end_time = models.BigIntegerField(null=True)
reference_id = models.CharField(max_length=255)
shipping_address = JSONField(null=True)
shipping_fee = MoneyField(null=True)
tax = MoneyField(null=True)
auto_recur = models.BooleanField(default=False)
payer_name = models.CharField(max_length=255)
payer_email = models.EmailField(max_length=255, blank=True)
create_time = models.BigIntegerField(null=True)
next_due_time = models.BigIntegerField(null=True)
last_checkout = models.ForeignKey(
get_wepay_model_name('checkout'), null=True, related_name='+')
last_checkout_time = models.BigIntegerField(null=True)
mode = models.CharField(max_length=255)
objects = PreapprovalManager()
class Meta(BaseModel.Meta):
abstract = is_abstract('preapproval')
db_table = 'djwepay_preapproval'
verbose_name = 'WePay Preapproval'
class Withdrawal(WithdrawalApi, BaseModel):
withdrawal_id = models.BigIntegerField(primary_key=True)
account = models.ForeignKey(
get_wepay_model_name('account'), related_name='withdrawals')
state = models.CharField(max_length=255)
amount = MoneyField(null=True)
note = models.CharField(max_length=255)
recipient_confirmed = models.NullBooleanField()
type = models.CharField(max_length=255)
create_time = models.BigIntegerField(null=True)
capture_time = models.BigIntegerField(null=True)
objects = AccountObjectsManager()
class Meta(BaseModel.Meta):
abstract = is_abstract('withdrawal')
db_table = 'djwepay_withdrawal'
verbose_name = 'WePay Preapproval'
class CreditCard(CreditCardApi, BaseModel):
credit_card_id = models.BigIntegerField(primary_key=True)
app = models.ForeignKey(
get_wepay_model_name('app'), related_name='credit_cards')
credit_card_name = models.CharField(max_length=255)
state = models.CharField(max_length=255)
user_name = models.CharField(max_length=255)
email = models.CharField(max_length=255, blank=True)
reference_id = models.CharField(max_length=255, blank=True)
create_time = models.BigIntegerField(null=True)
input_source = models.CharField(max_length=255, blank=True)
virtual_terminal_mode = models.CharField(max_length=255, blank=True)
expiration_month = models.IntegerField(null=True)
expiration_year = models.IntegerField(null=True)
last_four = models.CharField(max_length=255, blank=True)
class Meta(BaseModel.Meta):
abstract = is_abstract('credit_card')
db_table = 'djwepay_credit_card'
verbose_name = 'WePay Credit Card'
class SubscriptionPlan(SubscriptionPlanApi, BaseModel):
subscription_plan_id = models.BigIntegerField(primary_key=True)
account = models.ForeignKey(
get_wepay_model_name('account'), related_name='subscription_plans')
name = models.CharField(max_length=255)
short_description = models.CharField(max_length=2047)
currency = models.CharField(max_length=3)
amount = MoneyField(null=True)
period = models.CharField(max_length=255)
app_fee = MoneyField(null=True)
fee_payer = models.CharField(max_length=255)
state = models.CharField(max_length=255)
create_time = models.BigIntegerField(null=True)
number_of_subscriptions = models.BigIntegerField(null=True)
trial_length = models.BigIntegerField(null=True)
setup_fee = MoneyField(null=True)
reference_id = models.CharField(max_length=255)
objects = AccountObjectsManager()
class Meta(BaseModel.Meta):
abstract = is_abstract('subscription_plan')
db_table = 'djwepay_subscription_plan'
verbose_name = 'WePay Subscription Plan'
class Subscription(SubscriptionApi, BaseModel):
subscription_id = models.BigIntegerField(primary_key=True)
subscription_plan = models.ForeignKey(
get_wepay_model_name('subscription_plan'), related_name='subscriptions')
payer_name = models.CharField(max_length=255)
payer_email = models.CharField(max_length=255)
currency = models.CharField(max_length=255)
amount = MoneyField(null=True)
period = models.CharField(max_length=255)
app_fee = MoneyField(null=True)
fee_payer = models.CharField(max_length=255)
state = models.CharField(max_length=255)
create_time = models.BigIntegerField(null=True)
payment_method_id = models.BigIntegerField(null=True)
payment_method_type = models.CharField(max_length=255)
quantity = models.BigIntegerField(null=True)
mode = models.CharField(max_length=255)
trial_days_remaining = models.BigIntegerField(null=True)
transition_expire_time = models.BigIntegerField(null=True)
transition_prorate = models.NullBooleanField()
transition_quantity = models.BigIntegerField(null=True)
transition_subscription_plan = models.ForeignKey(
get_wepay_model_name('subscription_plan'),
related_name='transition_subscriptions')
reference_id = models.CharField(max_length=255)
objects = SubscriptionManager()
class Meta(BaseModel.Meta):
abstract = is_abstract('subscription')
db_table = 'djwepay_subscription'
verbose_name = 'WePay Subscription'
class SubscriptionCharge(SubscriptionChargeApi, BaseModel):
subscription_charge_id = models.BigIntegerField(primary_key=True)
subscription_plan = models.ForeignKey(
get_wepay_model_name('subscription_plan'), related_name='subscription_charges')
subscription = models.ForeignKey(
get_wepay_model_name('subscription'), related_name='subscription_charges')
type = models.CharField(max_length=255)
amount = MoneyField(null=True)
currency = models.CharField(max_length=3)
fee = MoneyField(null=True)
app_fee = MoneyField(null=True)
gross = MoneyField(null=True)
quantity = models.BigIntegerField(null=True)
amount_refunded = MoneyField(null=True)
amount_charged_back = MoneyField(null=True)
state = models.CharField(max_length=255)
create_time = models.BigIntegerField(null=True)
end_time = models.BigIntegerField(null=True)
prorate_time = models.BigIntegerField(null=True)
class Meta(BaseModel.Meta):
abstract = is_abstract('subscription_charge')
db_table = 'djwepay_subscription_charge'
verbose_name = 'WePay Subscription Charge'
| mit | -1,835,955,764,543,517,700 | 39.173295 | 87 | 0.702779 | false |
tobykurien/MakerDroid | assetsrc/public.mp3/skeinforge/skeinforge_tools/craft_plugins/comb.py | 1 | 18588 | """
This page is in the table of contents.
Comb is a script to comb the extrusion hair of a gcode file.
The comb manual page is at:
http://www.bitsfrombytes.com/wiki/index.php?title=Skeinforge_Comb
Comb bends the extruder travel paths around holes in the slices, to avoid stringers. It moves the extruder to the inside of perimeters before turning the extruder on so any start up ooze will be inside the shape.
==Operation==
The default 'Activate Comb' checkbox is off. When it is on, the functions described below will work, when it is off, the functions will not be called.
==Settings==
===Minimum Departure Distance over Perimeter Width===
Default is zero.
Defines the ratio of the minimum distance that the extruder will travel and loop before leaving a perimeter. A high value means the extruder will loop many times before leaving, so that the ooze will finish within the perimeter, a low value means the extruder will not loop and the stringers will be thicker. Since it sometimes loops when there's no need, the default is zero.
===Running Jump Space over Perimeter Width===
Default is zero.
Defines the ratio of the running jump space that is added before going from one island to another to the perimeter width. The default is zero because sometimes an unnecessary running jump space is added, if you want to use it a reasonable value is five. For an extruder with acceleration code, an extra space before leaving the island means that it will be going at high speed as it exits the island, which means the stringer across the islands will be thinner. If the extruder does not have acceleration code, the speed will not be greater so there would be no benefit and 'Running Jump Space over Perimeter Width' should be left at zero.
==Examples==
The following examples comb the file Screw Holder Bottom.stl. The examples are run in a terminal in the folder which contains Screw Holder Bottom.stl and comb.py.
> python comb.py
This brings up the comb dialog.
> python comb.py Screw Holder Bottom.stl
The comb tool is parsing the file:
Screw Holder Bottom.stl
..
The comb tool has created the file:
.. Screw Holder Bottom_comb.gcode
> python
Python 2.5.1 (r251:54863, Sep 22 2007, 01:43:31)
[GCC 4.2.1 (SUSE Linux)] on linux2
Type "help", "copyright", "credits" or "license" for more information.
>>> import comb
>>> comb.main()
This brings up the comb dialog.
>>> comb.writeOutput( 'Screw Holder Bottom.stl' )
The comb tool is parsing the file:
Screw Holder Bottom.stl
..
The comb tool has created the file:
.. Screw Holder Bottom_comb.gcode
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities import euclidean
from fabmetheus_utilities import gcodec
from fabmetheus_utilities import intercircle
from fabmetheus_utilities.fabmetheus_tools import fabmetheus_interpret
from fabmetheus_utilities import settings
from skeinforge.skeinforge_utilities import skeinforge_craft
from skeinforge.skeinforge_utilities import skeinforge_polyfile
from skeinforge.skeinforge_utilities import skeinforge_profile
import sys
__author__ = "Enrique Perez ([email protected])"
__date__ = "$Date: 2008/21/04 $"
__license__ = "GPL 3.0"
def getCraftedText( fileName, text, combRepository = None ):
"Comb a gcode linear move text."
return getCraftedTextFromText( gcodec.getTextIfEmpty( fileName, text ), combRepository )
def getCraftedTextFromText( gcodeText, combRepository = None ):
"Comb a gcode linear move text."
if gcodec.isProcedureDoneOrFileIsEmpty( gcodeText, 'comb' ):
return gcodeText
if combRepository == None:
combRepository = settings.getReadRepository( CombRepository() )
if not combRepository.activateComb.value:
return gcodeText
return CombSkein().getCraftedGcode( combRepository, gcodeText )
def getNewRepository():
"Get the repository constructor."
return CombRepository()
def writeOutput( fileName = '' ):
"Comb a gcode linear move file."
fileName = fabmetheus_interpret.getFirstTranslatorFileNameUnmodified( fileName )
if fileName != '':
skeinforge_craft.writeChainTextWithNounMessage( fileName, 'comb' )
class CombRepository:
"A class to handle the comb settings."
def __init__( self ):
"Set the default settings, execute title & settings fileName."
skeinforge_profile.addListsToCraftTypeRepository( 'skeinforge.skeinforge_tools.craft_plugins.comb.html', self )
self.fileNameInput = settings.FileNameInput().getFromFileName( fabmetheus_interpret.getGNUTranslatorGcodeFileTypeTuples(), 'Open File for Comb', self, '' )
self.openWikiManualHelpPage = settings.HelpPage().getOpenFromAbsolute( 'http://www.bitsfrombytes.com/wiki/index.php?title=Skeinforge_Comb' )
self.activateComb = settings.BooleanSetting().getFromValue( 'Activate Comb', self, False )
self.minimumDepartureDistanceOverPerimeterWidth = settings.FloatSpin().getFromValue( 0.0, 'Minimum Departure Distance over Perimeter Width (ratio):', self, 50.0, 0.0 )
self.runningJumpSpaceOverPerimeterWidth = settings.FloatSpin().getFromValue( 0.0, 'Running Jump Space over Perimeter Width (ratio):', self, 10.0, 0.0 )
self.executeTitle = 'Comb'
def execute( self ):
"Comb button has been clicked."
fileNames = skeinforge_polyfile.getFileOrDirectoryTypesUnmodifiedGcode( self.fileNameInput.value, fabmetheus_interpret.getImportPluginFileNames(), self.fileNameInput.wasCancelled )
for fileName in fileNames:
writeOutput( fileName )
class CombSkein:
"A class to comb a skein of extrusions."
def __init__( self ):
self.betweenTable = {}
self.betweenTable = {}
self.boundaryLoop = None
self.distanceFeedRate = gcodec.DistanceFeedRate()
self.extruderActive = False
self.layer = None
self.layerTable = {}
self.layerZ = None
self.lineIndex = 0
self.lines = None
self.nextLayerZ = None
self.oldLocation = None
self.oldZ = None
self.operatingFeedRatePerMinute = None
self.travelFeedRatePerMinute = None
def addGcodePathZ( self, feedRateMinute, path, z ):
"Add a gcode path, without modifying the extruder, to the output."
for point in path:
self.distanceFeedRate.addGcodeMovementZWithFeedRate( feedRateMinute, point, z )
def addIfTravel( self, splitLine ):
"Add travel move around loops if the extruder is off."
location = gcodec.getLocationFromSplitLine( self.oldLocation, splitLine )
if not self.extruderActive and self.oldLocation != None:
if len( self.getBoundaries() ) > 0:
highestZ = max( location.z, self.oldLocation.z )
self.addGcodePathZ( self.travelFeedRatePerMinute, self.getPathsBetween( self.oldLocation.dropAxis( 2 ), location.dropAxis( 2 ) ), highestZ )
self.oldLocation = location
def addRunningJumpPath( self, end, loop, pathAround ):
"Get the running jump path from the perimeter to the intersection or running jump space."
if self.combRepository.runningJumpSpaceOverPerimeterWidth.value < 1.0:
return
if len( pathAround ) < 2:
return
loop = intercircle.getLargestInsetLoopFromLoopNoMatterWhat( loop, self.combInset )
penultimatePoint = pathAround[ - 2 ]
lastPoint = pathAround[ - 1 ]
nearestEndDistanceIndex = euclidean.getNearestDistanceIndex( end, loop )
nearestEndIndex = ( nearestEndDistanceIndex.index + 1 ) % len( loop )
nearestEnd = euclidean.getNearestPointOnSegment( loop[ nearestEndDistanceIndex.index ], loop[ nearestEndIndex ], end )
nearestEndMinusLast = nearestEnd - lastPoint
nearestEndMinusLastLength = abs( nearestEndMinusLast )
if nearestEndMinusLastLength <= 0.0:
return
nearestEndMinusLastSegment = nearestEndMinusLast / nearestEndMinusLastLength
betweens = self.getBetweens()
if self.getIsRunningJumpPathAdded( betweens, end, lastPoint, nearestEndMinusLastSegment, pathAround, penultimatePoint, self.runningJumpSpace ):
return
doubleCombInset = 2.0 * self.combInset
shortJumpSpace = 0.5 * self.runningJumpSpace
if shortJumpSpace < doubleCombInset:
return
if self.getIsRunningJumpPathAdded( betweens, end, lastPoint, nearestEndMinusLastSegment, pathAround, penultimatePoint, shortJumpSpace ):
return
shortJumpSpace = 0.25 * self.runningJumpSpace
if shortJumpSpace < doubleCombInset:
return
self.getIsRunningJumpPathAdded( betweens, end, lastPoint, nearestEndMinusLastSegment, pathAround, penultimatePoint, shortJumpSpace )
def addToLoop( self, location ):
"Add a location to loop."
if self.layer == None:
if not self.oldZ in self.layerTable:
self.layerTable[ self.oldZ ] = []
self.layer = self.layerTable[ self.oldZ ]
if self.boundaryLoop == None:
self.boundaryLoop = [] #starting with an empty array because a closed loop does not have to restate its beginning
self.layer.append( self.boundaryLoop )
if self.boundaryLoop != None:
self.boundaryLoop.append( location.dropAxis( 2 ) )
def getBetweens( self ):
"Set betweens for the layer."
if self.layerZ in self.betweenTable:
return self.betweenTable[ self.layerZ ]
if self.layerZ not in self.layerTable:
return []
self.betweenTable[ self.layerZ ] = []
for boundaryLoop in self.layerTable[ self.layerZ ]:
self.betweenTable[ self.layerZ ] += intercircle.getInsetLoopsFromLoop( self.betweenInset, boundaryLoop )
return self.betweenTable[ self.layerZ ]
def getBoundaries( self ):
"Get boundaries for the layer."
if self.layerZ in self.layerTable:
return self.layerTable[ self.layerZ ]
return []
def getCraftedGcode( self, combRepository, gcodeText ):
"Parse gcode text and store the comb gcode."
self.combRepository = combRepository
self.lines = gcodec.getTextLines( gcodeText )
self.parseInitialization( combRepository )
for lineIndex in xrange( self.lineIndex, len( self.lines ) ):
line = self.lines[ lineIndex ]
self.parseBoundariesLayers( combRepository, line )
for lineIndex in xrange( self.lineIndex, len( self.lines ) ):
line = self.lines[ lineIndex ]
self.parseLine( line )
return self.distanceFeedRate.output.getvalue()
def getIsAsFarAndNotIntersecting( self, begin, end ):
"Determine if the point on the line is at least as far from the loop as the center point."
if begin == end:
print( 'this should never happen but it does not really matter, begin == end in getIsAsFarAndNotIntersecting in comb.' )
print( begin )
return True
return not euclidean.isLineIntersectingLoops( self.getBetweens(), begin, end )
def getIsRunningJumpPathAdded( self, betweens, end, lastPoint, nearestEndMinusLastSegment, pathAround, penultimatePoint, runningJumpSpace ):
"Add a running jump path if possible, and return if it was added."
jumpStartPoint = lastPoint - nearestEndMinusLastSegment * runningJumpSpace
if euclidean.isLineIntersectingLoops( betweens, penultimatePoint, jumpStartPoint ):
return False
pathAround[ - 1 ] = jumpStartPoint
return True
def getPathBetween( self, betweenFirst, betweenSecond, isLeavingPerimeter, loopFirst ):
"Add a path between the perimeter and the fill."
loopFirst = intercircle.getLargestInsetLoopFromLoopNoMatterWhat( loopFirst, self.combInset )
nearestFirstDistanceIndex = euclidean.getNearestDistanceIndex( betweenFirst, loopFirst )
nearestSecondDistanceIndex = euclidean.getNearestDistanceIndex( betweenSecond, loopFirst )
firstBeginIndex = ( nearestFirstDistanceIndex.index + 1 ) % len( loopFirst )
secondBeginIndex = ( nearestSecondDistanceIndex.index + 1 ) % len( loopFirst )
nearestFirst = euclidean.getNearestPointOnSegment( loopFirst[ nearestFirstDistanceIndex.index ], loopFirst[ firstBeginIndex ], betweenFirst )
nearestSecond = euclidean.getNearestPointOnSegment( loopFirst[ nearestSecondDistanceIndex.index ], loopFirst[ secondBeginIndex ], betweenSecond )
clockwisePath = [ nearestFirst ]
widdershinsPath = [ nearestFirst ]
loopBeforeLeaving = euclidean.getAroundLoop( firstBeginIndex, firstBeginIndex, loopFirst )
if nearestFirstDistanceIndex.index == nearestSecondDistanceIndex.index:
if euclidean.getPathLength( widdershinsPath ) < self.minimumDepartureDistance:
widdershinsPath = [ nearestFirst ] + loopBeforeLeaving
reversedLoop = loopBeforeLeaving[ : ]
reversedLoop.reverse()
clockwisePath = [ nearestFirst ] + reversedLoop
else:
widdershinsLoop = euclidean.getAroundLoop( firstBeginIndex, secondBeginIndex, loopFirst )
widdershinsPath += widdershinsLoop
clockwiseLoop = euclidean.getAroundLoop( secondBeginIndex, firstBeginIndex, loopFirst )
clockwiseLoop.reverse()
clockwisePath += clockwiseLoop
clockwisePath.append( nearestSecond )
widdershinsPath.append( nearestSecond )
if euclidean.getPathLength( widdershinsPath ) > euclidean.getPathLength( clockwisePath ):
loopBeforeLeaving.reverse()
widdershinsPath = clockwisePath
if isLeavingPerimeter:
totalDistance = euclidean.getPathLength( widdershinsPath )
loopLength = euclidean.getPolygonLength( loopBeforeLeaving )
while totalDistance < self.minimumDepartureDistance:
widdershinsPath = [ nearestFirst ] + loopBeforeLeaving + widdershinsPath[ 1 : ]
totalDistance += loopLength
return widdershinsPath
def getPathsBetween( self, begin, end ):
"Insert paths between the perimeter and the fill."
aroundBetweenPath = []
points = [ begin ]
lineX = []
switchX = []
segment = euclidean.getNormalized( end - begin )
segmentYMirror = complex( segment.real, - segment.imag )
beginRotated = segmentYMirror * begin
endRotated = segmentYMirror * end
y = beginRotated.imag
boundaries = self.getBoundaries()
for boundaryIndex in xrange( len( boundaries ) ):
boundary = boundaries[ boundaryIndex ]
boundaryRotated = euclidean.getPointsRoundZAxis( segmentYMirror, boundary )
euclidean.addXIntersectionIndexesFromLoopY( boundaryRotated, boundaryIndex, switchX, y )
switchX.sort()
maximumX = max( beginRotated.real, endRotated.real )
minimumX = min( beginRotated.real, endRotated.real )
for xIntersection in switchX:
if xIntersection.x > minimumX and xIntersection.x < maximumX:
point = segment * complex( xIntersection.x, y )
points.append( point )
lineX.append( xIntersection )
points.append( end )
lineXIndex = 0
pathBetweenAdded = False
while lineXIndex < len( lineX ) - 1:
lineXFirst = lineX[ lineXIndex ]
lineXSecond = lineX[ lineXIndex + 1 ]
loopFirst = boundaries[ lineXFirst.index ]
isLeavingPerimeter = False
if lineXSecond.index != lineXFirst.index:
isLeavingPerimeter = True
pathBetween = self.getPathBetween( points[ lineXIndex + 1 ], points[ lineXIndex + 2 ], isLeavingPerimeter, loopFirst )
if isLeavingPerimeter:
if not pathBetweenAdded:
self.addRunningJumpPath( points[ lineXIndex + 3 ], boundaries[ lineXSecond.index ], pathBetween )
pathBetweenAdded = True
else:
pathBetween = self.getSimplifiedAroundPath( points[ lineXIndex ], points[ lineXIndex + 3 ], loopFirst, pathBetween )
pathBetweenAdded = True
aroundBetweenPath += pathBetween
lineXIndex += 2
return aroundBetweenPath
def getSimplifiedAroundPath( self, begin, end, loop, pathAround ):
"Get the simplified path between the perimeter and the fill."
pathAround = self.getSimplifiedBeginPath( begin, loop, pathAround )
return self.getSimplifiedEndPath( end, loop, pathAround )
def getSimplifiedBeginPath( self, begin, loop, pathAround ):
"Get the simplified begin path between the perimeter and the fill."
if len( pathAround ) < 2:
return pathAround
pathIndex = 0
while pathIndex < len( pathAround ) - 1:
if not self.getIsAsFarAndNotIntersecting( begin, pathAround[ pathIndex + 1 ] ):
return pathAround[ pathIndex : ]
pathIndex += 1
return pathAround[ - 1 : ]
def getSimplifiedEndPath( self, end, loop, pathAround ):
"Get the simplified end path between the perimeter and the fill."
if len( pathAround ) < 2:
return pathAround
pathIndex = len( pathAround ) - 1
while pathIndex > 0:
if not self.getIsAsFarAndNotIntersecting( end, pathAround[ pathIndex - 1 ] ):
return pathAround[ : pathIndex + 1 ]
pathIndex -= 1
return pathAround[ : 1 ]
def parseBoundariesLayers( self, combRepository, line ):
"Parse a gcode line."
splitLine = gcodec.getSplitLineBeforeBracketSemicolon( line )
if len( splitLine ) < 1:
return
firstWord = splitLine[ 0 ]
if firstWord == 'M103':
self.boundaryLoop = None
elif firstWord == '(<boundaryPoint>':
location = gcodec.getLocationFromSplitLine( None, splitLine )
self.addToLoop( location )
elif firstWord == '(<layer>':
self.boundaryLoop = None
self.layer = None
self.oldZ = float( splitLine[ 1 ] )
def parseInitialization( self, combRepository ):
"Parse gcode initialization and store the parameters."
for self.lineIndex in xrange( len( self.lines ) ):
line = self.lines[ self.lineIndex ]
splitLine = gcodec.getSplitLineBeforeBracketSemicolon( line )
firstWord = gcodec.getFirstWord( splitLine )
self.distanceFeedRate.parseSplitLine( firstWord, splitLine )
if firstWord == '(</extruderInitialization>)':
self.distanceFeedRate.addLine( '(<procedureDone> comb </procedureDone>)' )
return
elif firstWord == '(<perimeterWidth>':
perimeterWidth = float( splitLine[ 1 ] )
self.combInset = 1.2 * perimeterWidth
self.betweenInset = 0.4 * perimeterWidth
self.uTurnWidth = 0.5 * self.betweenInset
self.minimumDepartureDistance = combRepository.minimumDepartureDistanceOverPerimeterWidth.value * perimeterWidth
self.runningJumpSpace = combRepository.runningJumpSpaceOverPerimeterWidth.value * perimeterWidth
elif firstWord == '(<travelFeedRatePerSecond>':
self.travelFeedRatePerMinute = 60.0 * float( splitLine[ 1 ] )
self.distanceFeedRate.addLine( line )
def parseLine( self, line ):
"Parse a gcode line and add it to the comb skein."
splitLine = gcodec.getSplitLineBeforeBracketSemicolon( line )
if len( splitLine ) < 1:
return
firstWord = splitLine[ 0 ]
if firstWord == 'G1':
self.addIfTravel( splitLine )
self.layerZ = self.nextLayerZ
elif firstWord == 'M101':
self.extruderActive = True
elif firstWord == 'M103':
self.extruderActive = False
elif firstWord == '(<layer>':
self.nextLayerZ = float( splitLine[ 1 ] )
if self.layerZ == None:
self.layerZ = self.nextLayerZ
self.distanceFeedRate.addLine( line )
def main():
"Display the comb dialog."
if len( sys.argv ) > 1:
writeOutput( ' '.join( sys.argv[ 1 : ] ) )
else:
settings.startMainLoopFromConstructor( getNewRepository() )
if __name__ == "__main__":
main()
| gpl-3.0 | 2,445,846,317,411,144,000 | 43.257143 | 642 | 0.755918 | false |
wildfish/django-nodetest | nodetest/utils.py | 1 | 1140 | from os.path import join
from uuid import uuid4
from shutil import copyfile
_js_repl = """;(function () {
var repl = require('repl');
var os = require('os');
var empty = '(' + os.EOL + ')';
repl.start({
prompt: "NODE> ",
eval: function (cmd, context, filename, callback) {
if (cmd === ".scope") cmd = empty;
if (cmd === empty) return callback();
var result = eval(cmd);
callback(null, result)
}
})
})();
"""
def _make_temp_name(js_src):
return '{}/__{}.js'.format(
js_src.rsplit('/', 1)[0],
uuid4().hex
)
def make_temp_file(root_dir, js_src):
temp_name = _make_temp_name(js_src)
src = join(root_dir, js_src)
dst = join(root_dir, temp_name)
full_dest = copyfile(src, dst)
return {
'absolute_path': full_dest,
'relative_path': temp_name
}
def parse_repl(src):
with open(src, 'r') as src_file:
file_content = src_file.read()
file_content = file_content.replace('/*REPL*/', _js_repl)
with open(src, 'w') as src_file:
src_file.write(file_content)
| bsd-3-clause | -7,775,578,083,173,779,000 | 22.75 | 65 | 0.536842 | false |
programa-stic/barf-project | barf/core/reil/emulator/memory.py | 1 | 7471 | # Copyright (c) 2014, Fundacion Dr. Manuel Sadosky
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
import random
REIL_MEMORY_ENDIANNESS_LE = 0x0 # Little Endian
REIL_MEMORY_ENDIANNESS_BE = 0x1 # Big Endian
class ReilMemory(object):
"""A REIL memory model (byte addressable).
"""
def __init__(self, address_size):
# TODO: Set endianness through a parameter.
# TODO: Check that all addresses have size address_size.
# TODO: Use endianness parameter.
# Memory's address size.
self.__address_size = address_size
# Memory's endianness.
self.__endianness = REIL_MEMORY_ENDIANNESS_LE
# Dictionary that implements the memory itself.
self._memory = {}
@property
def address_size(self):
return self.__address_size
# Read methods
# ======================================================================== #
def read(self, address, size):
"""Read arbitrary size content from memory.
"""
value = 0x0
for i in range(0, size):
value |= self._read_byte(address + i) << (i * 8)
return value
def _read_byte(self, address):
"""Read a byte from memory.
"""
# Initialize memory location with a random value.
if address not in self._memory:
self._memory[address] = random.randint(0x00, 0xff)
return self._memory[address]
# Write methods
# ======================================================================== #
def write(self, address, size, value):
"""Write arbitrary size content to memory.
"""
for i in range(0, size):
self.__write_byte(address + i, (value >> (i * 8)) & 0xff)
def __write_byte(self, address, value):
"""Write byte in memory.
"""
self._memory[address] = value & 0xff
# Misc methods
# ======================================================================== #
def reset(self):
# Dictionary that implements the memory itself.
self._memory = {}
# Magic methods
# ======================================================================== #
def __str__(self):
lines = []
for addr in sorted(self._memory.keys()):
lines += ["0x%08x : 0x%08x" % (addr, self._memory[addr])]
return "\n".join(lines)
class ReilMemoryEx(ReilMemory):
"""Reil memory extended class"""
def __init__(self, address_size):
super(ReilMemoryEx, self).__init__(address_size)
# Previous state of memory.
self.__memory_prev = {}
# Write operations counter.
self.__write_count = 0
# Read methods
# ======================================================================== #
def read_inverse(self, value, size):
"""Return a list of memory addresses that contain the specified
value.
"""
addr_candidates = [addr for addr, val in self._memory.items() if val == (value & 0xff)]
addr_matches = []
for addr in addr_candidates:
match = True
for i in range(0, size):
byte_curr = (value >> (i * 8)) & 0xff
try:
match = self._memory[addr + i] == byte_curr
except KeyError:
match = False
if not match:
break
if match:
addr_matches += [addr]
return addr_matches
def try_read(self, address, size):
"""Try to read memory content at specified address.
If any location was not written before, it returns a tuple
(False, None). Otherwise, it returns (True, memory content).
"""
value = 0x0
for i in range(0, size):
addr = address + i
if addr in self._memory:
value |= self._read_byte(addr) << (i * 8)
else:
return False, None
return True, value
def try_read_prev(self, address, size):
"""Try to read previous memory content at specified address.
If any location was not written before, it returns a tuple
(False, None). Otherwise, it returns (True, memory content).
"""
value = 0x0
for i in range(0, size):
addr = address + i
if addr in self.__memory_prev:
_, val_byte = self.__try_read_byte_prev(addr)
value |= val_byte << (i * 8)
else:
return False, None
return True, value
def __try_read_byte_prev(self, address):
"""Read previous value for memory location.
Return a tuple (True, Byte) in case of successful read,
(False, None) otherwise.
"""
# Initialize memory location with a random value
if address not in self.__memory_prev:
return False, None
return True, self.__memory_prev[address]
# Write methods
# ======================================================================== #
def write(self, address, size, value):
"""Write arbitrary size content to memory.
"""
for i in range(0, size):
self.__write_byte(address + i, (value >> (i * 8)) & 0xff)
self.__write_count += 1
def __write_byte(self, address, value):
"""Write byte in memory.
"""
# Save previous address content.
if address in self._memory:
self.__memory_prev[address] = self._memory[address]
self._memory[address] = value & 0xff
# Misc methods
# ======================================================================== #
def reset(self):
super(ReilMemoryEx, self).reset()
# Previous state of memory.
self.__memory_prev = {}
# Write operations counter.
self.__write_count = 0
def get_addresses(self):
"""Get accessed addresses.
"""
return list(self._memory.keys())
def get_write_count(self):
"""Get number of write operations performed on the memory.
"""
return self.__write_count
| bsd-2-clause | 517,048,683,388,702 | 30.259414 | 95 | 0.549458 | false |
PySimulator/PySimulator | PySimulator/Plugins/SimulationResult/__init__.py | 1 | 1172 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
def get_immediate_subdirectories(directory):
return [name for name in os.listdir(directory) if os.path.isdir(os.path.join(directory, name)) and name[0] != '.']
PlugInNames = get_immediate_subdirectories(os.path.abspath(os.path.dirname(__file__)))
plugin = []
for i in range(len(PlugInNames)):
try:
mod = __import__(PlugInNames[i] + "." + PlugInNames[i], locals(), globals(), [PlugInNames[i] + "." + PlugInNames[i]])
plugin.append(mod)
except ImportError as e:
print PlugInNames[i] + " plug-in could not be loaded. Error message: '" + e.message + "'"
except SyntaxError as e:
print PlugInNames[i] + " plug-in could not be loaded. Error message: '" + str(e) + "'"
except Exception as e:
info = str(e)
if info == '' or info is None:
print PlugInNames[i] + " plug-in could not be loaded."
else:
print PlugInNames[i] + " plug-in could not be loaded. Error message: '" + info + "'"
fileExtension = []
description = []
for p in plugin:
fileExtension.append(p.fileExtension)
description.append(p.description)
| lgpl-3.0 | -7,544,794,805,492,171,000 | 38 | 125 | 0.625641 | false |
tsw-apropos/mapbiographer | mapBiographer/polygon_tool.py | 1 | 7016 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
lmbMapToolPolygon
A QGIS plugin
Effectively onduct direct to digital map biographies and traditional land
use studies
-------------------
begin : 2014-05-13
copyright : (C) 2014 by Apropos Information Systems Inc.
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* any later version. *
* *
* Derived from vertexTracerTool.py & freehandeditingtool.py *
* *
***************************************************************************/
"""
from PyQt4 import QtCore, QtGui
from qgis.core import *
from qgis.gui import *
import qgis.utils
import time
class lmbMapToolPolygon(QgsMapTool):
rbFinished = QtCore.pyqtSignal('QgsGeometry*')
def __init__(self, canvas):
# get canvas
QgsMapTool.__init__(self,canvas)
self.canvas = canvas
# control variables
self.started = False
self.firstTimeOnSegment = True
# related to temp output but function unclear
self.prevPoint = None
# custom cursor
self.cursor = QtGui.QCursor(QtGui.QPixmap(["16 16 3 1",
" c None",
". c #FF0000",
"+ c #000000",
" ",
" +.+ ",
" ++.++ ",
" +.....+ ",
" +. .+ ",
" +. . .+ ",
" +. . .+ ",
" ++. . .++",
" ... ...+... ...",
" ++. . .++",
" +. . .+ ",
" +. . .+ ",
" ++. .+ ",
" ++.....+ ",
" ++.++ ",
" +.+ "]))
#
# track when delete is released to permit deletion of last point
def keyReleaseEvent(self, event):
# remove the last added point when the delete key is pressed
if event.key() == QtCore.Qt.Key_Backspace:
self.rb.removeLastPoint()
#
# canvas move events
def canvasMoveEvent(self,event):
if self.started:
#Get the click
x = event.pos().x()
y = event.pos().y()
eventPoint = QtCore.QPoint(x,y)
layer = self.canvas.currentLayer()
if layer <> None:
point = QgsMapToPixel.toMapCoordinates(self.canvas.getCoordinateTransform(), x, y)
self.rb.movePoint(point)
#
# canvas release events
def canvasReleaseEvent(self,event):
# left click
if event.button() == 1:
# select the current layer
layer = self.canvas.currentLayer()
# if it is the start of a polygon set the rubberband up
if self.started == False:
self.rb = QgsRubberBand(self.canvas, layer.geometryType())
self.rb.setColor(QtGui.QColor('#ff0000'))
self.rb.setWidth(1)
self.rb.setOpacity(0.5)
self.started = True
# get coordinates if we are connecting to an editable layer
if layer <> None:
x = event.pos().x()
y = event.pos().y()
point = self.canvas.getCoordinateTransform().toMapCoordinates(x, y)
# put rubber band at cursor
self.rb.movePoint(point)
# set new point
self.appendPoint(point)
# right click
elif event.button() == 2:
self.sendGeometry()
#
# append point
def appendPoint(self, point):
# only add point if different from previous
if not (self.prevPoint == point) :
self.rb.addPoint(point)
self.prevPoint = QgsPoint(point)
#
# send geometry
def sendGeometry(self):
layer = self.canvas.currentLayer()
coords = []
#
# NOTE: code from vertex tracer skipped first point by using range of
# 1 to # of vertices. Changed to zero to include all points and have a
# complete feature.
# Also skip last point when right click was pressed to avoid extra points
# being placed
#
[coords.append(self.rb.getPoint(0,i)) for i in range(0,self.rb.numberOfVertices()-1)]
coords_tmp = coords[:]
coords = []
crsSrc = QgsCoordinateReferenceSystem(qgis.utils.iface.mapCanvas().mapSettings().destinationCrs())
crsDest = QgsCoordinateReferenceSystem(layer.crs())
xform = QgsCoordinateTransform(crsSrc,crsDest)
for point in coords_tmp:
transformedPoint = xform.transform(point)
coords.append(transformedPoint)
coords_tmp = coords[:]
coords = []
lastPt = None
for pt in coords_tmp:
if (lastPt <> pt) :
coords.append(pt)
lastPt = pt
g = QgsGeometry().fromPolygon([coords])
if g <> None and g.isGeosValid():
self.rbFinished.emit(g)
self.started = False
#
# activate tool
def activate(self):
self.canvas.setCursor(self.cursor)
#
# deactivate tool
def deactivate(self):
try:
self.rb.reset()
except AttributeError:
pass
#
# send false if queried if zoom tool
def isZoomTool(self):
return False
#
# send false if queried if transient (performs zoom or pan operation)
def isTransient(self):
return False
#
# send true if queried if edit tool
def isEditTool(self):
return True
| gpl-2.0 | 3,999,823,729,145,701,000 | 34.434343 | 106 | 0.428877 | false |
rahul-x-verma/Polaris | polaris/static/algorithm/map.py | 1 | 1660 | from pprint import pprint
class Map():
"""
Stores a map of the transit system in a graph data structure. Each vertex
represents one bus stopping at a given geographical location. Each edge
represents the time between one stop and the next (including waiting time if
applicable).
"""
def __init__(self):
"""
Creates an empty map.
"""
self.adjacency_list = {}
self.vertices = {}
def insert(self, stop):
"""
Adds a vertex and its neighbors to the graph.
"""
self.vertices[stop.uid] = stop
self.adjacency_list[stop] = {}
for neighbor in stop.neighbors:
self.adjacency_list[stop][neighbor[0]] = neighbor[1]
def find_path(self, start, end):
"""
Uses breadth-first search to find the buses necessary to go between two
stops.
"""
S = [start]
pi = {}
while end not in pi.keys():
if not S:
return []
curr = S.pop()
for vertex in self.adjacency_list[curr]:
if vertex not in pi.keys():
S.append(vertex)
pi[vertex] = curr
if pi[end]:
result = [end]
while (start != end):
end = pi[end]
result = [end] + result
return result
else:
return []
def distance(self, v1, v2):
return self.adjacency_list[v1][v2]
def show(self):
"""
Returns a human-readable version of the map.
"""
pprint(self.adjacency_list)
| apache-2.0 | -4,744,783,799,445,522,000 | 26.213115 | 80 | 0.506627 | false |
sarthak2108/AI-Projects | Minesweeper/convert2CNF.py | 1 | 4718 | import sys
def parse_file(filepath):
############################################
# Read the layout file to the board array.
# Note how the order in which the rows are
# read is reversed in the final array. This
# accomodates the requirement that positions
# arenumbered from the bottom left.
############################################
board = []
fin = open(filepath)
line = fin.readline()
tokens = line.replace('\n', '').split(' ')
height = int(tokens[0])
width = int(tokens[1])
reverse_board = []
for line in fin:
tokens = line.replace('\n', '').split(',')
row = []
for each_token in tokens:
if each_token == 'X':
row.append(each_token)
else:
row.append(int(each_token))
reverse_board.append(row)
fin.close()
while len(reverse_board) != 0:
board.append(reverse_board.pop())
return board
def convert2CNF(board, output):
############################################
# Interpret the number of constraints.
#
# We count the total number of clauses and
# variables which are necessary in formatting
# the input file for MINISAT. Each varialbe
# is named after the board position it
# represents. A positive sign means it has a
# bomb, while a negative sign idicates
# otherwise.
#
# We use the following trick to reduce the
# exponential number of clauses generated in
# converting DNF to CNF to something that is
# polynomial.
# We simply compute how many combinations
# of eight adjacent positions there is which
# are gauranteed to have a t least one bomb.
# We only consider the minimum number of
# positions required to guarantee a bomb, as
# all the other rules are entailed from them.
# This drastically reduces the computational
# cost from exponential to polynomail.
############################################
from itertools import permutations
height = len(board)
width = len(board[0])
number_of_variables = height * width
number_of_clauses = 0
clauses = []
for i in range(height):
for j in range(width):
if board[i][j] != 'X':
position = (i * width) + j + 1
pos = get_adjacent_positions(i , j, height, width)
if board[i][j] == 0:
number_of_clauses += 1
clauses.append([-position])
for p in pos:
clauses.append([-p])
number_of_clauses += 1
else:
permute = []
for k in range(len(pos)):
if k < board[i][j] - 1:
permute.append(0)
else:
permute.append(1)
permuted = list(set(list(permutations(permute))))
number_of_clauses += (len(permuted) + 1)
clauses.append([-position])
for p in permuted:
clause = []
for bits in range(len(p)):
if p[bits] == 1:
clause.append(pos[bits])
clauses.append(clause)
fout = open(output, 'w')
string = 'p cnf ' + str(number_of_variables) + ' ' + str(number_of_clauses)
fout.write(string)
for clause in clauses:
string = '\n'
for literal in clause:
string += str(literal) + ' '
string += '0'
fout.write(string)
fout.close()
def get_adjacent_positions(i, j, height, width):
############################################
# Determines the adjacent positions of a
# particular position of the board array.
############################################
pos = []
if i - 1 >= 0:
if j - 1 >= 0:
pos.append(((i - 1) * width) + (j - 1) + 1)
pos.append(((i - 1) * width) + j + 1)
if j + 1 < width:
pos.append(((i - 1) * width) + (j + 1) + 1)
if j - 1 >= 0:
pos.append((i * width) + (j - 1) + 1)
if j + 1 < width:
pos.append((i * width) + (j + 1) + 1)
if i + 1 < height:
if j - 1 >= 0:
pos.append(((i + 1) * width) + (j - 1) + 1)
pos.append(((i + 1) * width) + j + 1)
if j + 1 < width:
pos.append(((i + 1) * width) + (j + 1) + 1)
return pos
if __name__ == '__main__':
if len(sys.argv) < 3:
print 'Layout or output file not specified.'
exit(-1)
board = parse_file(sys.argv[1])
convert2CNF(board, sys.argv[2])
| mit | 3,087,753,827,598,349,000 | 35.015267 | 79 | 0.481984 | false |
JuhaniImberg/DragonPy | dragonpy/tests/test_cli.py | 1 | 6790 | #!/usr/bin/env python
"""
DragonPy - Dragon 32 emulator in Python
=======================================
:copyleft: 2013-2015 by the DragonPy team, see AUTHORS for more details.
:license: GNU GPL v3 or above, see LICENSE for more details.
"""
from __future__ import absolute_import, division, print_function
import subprocess
import unittest
from click.testing import CliRunner
import MC6809
import dragonpy
from dragonpy.core.cli import cli
from dragonpy.utils.starter import run_dragonpy, run_mc6809
class CliTestCase(unittest.TestCase):
def assert_contains_members(self, members, container):
for member in members:
msg = "%r not found in:\n%s" % (member, container)
# self.assertIn(member, container, msg) # Bad error message :(
if not member in container:
self.fail(msg)
def assert_not_contains_members(self, members, container):
for member in members:
if member in container:
self.fail("%r found in:\n%s" % (member, container))
def assert_is_help(self, output):
self.assert_contains_members([
"Usage: ", " [OPTIONS] COMMAND [ARGS]...", # Don't check "filename": It's cli or cli.py in unittests!
"DragonPy is a Open source (GPL v3 or later) emulator for the 30 years old",
"homecomputer Dragon 32 and Tandy TRS-80 Color Computer (CoCo)...",
"Homepage: https://github.com/jedie/DragonPy",
"--machine [CoCo2b|Dragon32|Dragon64|Multicomp6809|Vectrex|sbc09]",
"Commands:",
"download_roms Download/Test only ROM files",
"editor Run only the BASIC editor",
"log_list List all exiting loggers and exit.",
"nosetests Run all tests via nose",
"run Run a machine emulation",
], output)
class TestStarter(CliTestCase):
"""
Test the "starter functions" that invoke DragonPy / MC6809 via subprocess.
"""
def _run(self, func, *args, **kwargs):
p = func(*args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
**kwargs
)
retcode = p.wait()
cli_out = p.stdout.read()
p.stdout.close()
cli_err = p.stderr.read()
p.stderr.close()
if retcode != 0:
msg = (
"subprocess returned %s.\n"
" *** stdout: ***\n"
"%s\n"
" *** stderr: ***\n"
"%s\n"
"****************\n"
) % (retcode, cli_out, cli_err)
self.assertEqual(retcode, 0, msg=msg)
return cli_out, cli_err
def _run_dragonpy(self, *args, **kwargs):
return self._run(run_dragonpy, *args, **kwargs)
def _run_MC6809(self, *args, **kwargs):
return self._run(run_mc6809, *args, **kwargs)
def test_run_dragonpy_version(self):
cli_out, cli_err = self._run_dragonpy(
"--version",
# verbose=True
)
self.assertIn(dragonpy.__version__, cli_out)
self.assertEqual(cli_err, "")
def test_run_dragonpy_help(self):
cli_out, cli_err = self._run_dragonpy(
"--help",
# verbose=True
)
self.assert_is_help(cli_out)
self.assertEqual(cli_err, "")
def test_run_MC6809_version(self):
cli_out, cli_err = self._run_MC6809(
"--version",
# verbose=True
)
self.assertIn(MC6809.__version__, cli_out)
self.assertEqual(cli_err, "")
def test_run_MC6809_help(self):
cli_out, cli_err = self._run_MC6809(
"--help",
# verbose=True
)
self.assert_contains_members([
"Usage: ", " [OPTIONS] COMMAND [ARGS]...", # Don't check "filename": It's cli or cli.py in unittests!
"Homepage: https://github.com/6809/MC6809",
"Run a 6809 Emulation benchmark",
], cli_out)
self.assertEqual(cli_err, "")
class CLITestCase(CliTestCase):
"""
Test the click cli via click.CliRunner().invoke()
"""
def _invoke(self, *args):
runner = CliRunner()
result = runner.invoke(cli, args)
if result.exit_code != 0:
msg = (
"\nstart CLI with: '%s'\n"
"return code: %r\n"
" *** output: ***\n"
"%s\n"
" *** exception: ***\n"
"%s\n"
"****************\n"
) % (" ".join(args), result.exit_code, result.output, result.exception)
self.assertEqual(result.exit_code, 0, msg=msg)
return result
def test_main_help(self):
result = self._invoke("--help")
# print(result.output)
# print(cli_err)
self.assert_is_help(result.output)
errors = ["Error", "Traceback"]
self.assert_not_contains_members(errors, result.output)
def test_version(self):
result = self._invoke("--version")
self.assertIn(dragonpy.__version__, result.output)
def test_log_list(self):
result = self._invoke("log_list")
# print(result.output)
# print(cli_err)
self.assert_contains_members([
"A list of all loggers:",
"DragonPy.cpu6809",
"dragonpy.Dragon32.MC6821_PIA",
], result.output)
errors = ["Error", "Traceback"]
self.assert_not_contains_members(errors, result.output)
def test_run_help(self):
result = self._invoke("run", "--help")
# print(result.output)
# print(cli_err)
self.assert_contains_members([
"Usage: cli run [OPTIONS]",
], result.output)
errors = ["Error", "Traceback"]
self.assert_not_contains_members(errors, result.output)
def test_editor_help(self):
result = self._invoke("editor", "--help")
# print(result.output)
# print(cli_err)
self.assert_contains_members([
"Usage: cli editor [OPTIONS]",
], result.output)
errors = ["Error", "Traceback"]
self.assert_not_contains_members(errors, result.output)
def test_download_roms(self):
result = self._invoke("download_roms")
# print(result.output)
# print(cli_err)
self.assert_contains_members([
"ROM file: d64_ic17.rom",
"Read ROM file",
"ROM SHA1:",
"ok",
"file size is",
], result.output)
errors = ["Error", "Traceback"]
self.assert_not_contains_members(errors, result.output)
| gpl-3.0 | 824,019,706,141,063,800 | 30.435185 | 113 | 0.534315 | false |
ctn-waterloo/nengo_pushbot | examples/robot_control_keyboard.py | 1 | 1082 | # control the motors of the robot
# also contains code for connecting to SpiNNaker
import nengo
spinnaker = False
import nengo_pushbot
import numpy as np
model = nengo.Network(label='pushbot')
with model:
input = nengo.Node([0,0], label='keyboard')
#a = nengo.Ensemble(500, dimensions=2, label='a')
if spinnaker:
bot = nengo_pushbot.PushBotNetwork('1,0,EAST')
else:
bot = nengo_pushbot.PushBotNetwork('10.162.177.49')
bot.show_image()
nengo.Connection(input, bot.motor, synapse=0.01, transform=[[-1, -1], [-0.3, 0.3]])
if __name__ == '__main__':
import nengo_gui.javaviz
jv = nengo_gui.javaviz.View(model)
if spinnaker:
import nengo_spinnaker
config = nengo_spinnaker.Config()
config[input].f_of_t = True
config[input].f_period = 2*np.pi
sim = nengo_spinnaker.Simulator(model)
else:
sim = nengo.Simulator(model)
jv.update_model(sim)
jv.view()
sim.run(5000)
#import nengo_spinnaker
#sim = nengo_spinnaker.Simulator(model)
#sim.run(10)
| mit | -546,890,297,734,732,860 | 21.081633 | 87 | 0.631238 | false |
Arkapravo/morse-0.6 | src/morse/geolandloader/geoShapefileLoader.py | 1 | 13736 | import shapelib, dbflib
import Blender
from Blender import *
import math
from math import sqrt
#
# The the shapefile module
#
# SHAPELIB Object Types
#----------------------------------------------
#define SHPT_NULL 0
#----------------------------------------------
#2D Shape Types (pre ArcView 3.x):
#
#define SHPT_POINT 1 Points
#define SHPT_ARC 3 Arcs (Polylines, possible in parts)
#define SHPT_POLYGON 5 Polygons (possible in parts)
#define SHPT_MULTIPOINT 8 MultiPoint (related points)
#----------------------------------------------
# 3D Shape Types (may include "measure" values for vertices):
#
#define SHPT_POINTZ 11
#define SHPT_ARCZ 13
#define SHPT_POLYGONZ 15
#define SHPT_MULTIPOINTZ 18
#----------------------------------------------
# 2D + Measure Types:
#
#define SHPT_POINTM 21
#define SHPT_ARCM 23
#define SHPT_POLYGONM 25
#define SHPT_MULTIPOINTM 28
#----------------------------------------------
# Complex (TIN-like) with Z, and Measure:
#
#define SHPT_MULTIPATCH 31
#----------------------------------------------
# --------------------------- def read_shapefile(filename):
# open the shapefile
#shp = shapelib.ShapeFile(filename)
# the info method returns a tuple (num_shapes, type, min, max) where
# num_shapes is the number of shapes, type is the type code (one of
# the SHPT* constants defined in the shapelib module) and min and
# max are 4-element lists with the min. and max. values of the
# vertices.
#logging.info(shp.info())
# read_object reads a shape
#obj = shp.read_object(0)
# The vertices method returns the shape as a list of lists of tuples.
#logging.info(obj.vertices()[0][:10])
# The extents returns a tuple with two 4-element lists with the min.
# and max. values of the vertices.
#logging.info(obj.extents())
# The type attribute is the type code (one of the SHPT* constants
# defined in the shapelib module)
#logging.info(obj.type)
# The id attribute is the shape id
#logging.info(obj.id)
# the cobject method returns a PyCObject containing the shapelib
# SHPHandle. This is useful for passing shapefile objects to
# C-Python extensions.
#logging.info(shp.cobject())
# --------------------------- end of def read_shapefile(filename):
#--------------------------------------------------#
def distance2D(A, B):
x = B.co[0] - A.co[0]
y = B.co[1] - A.co[1]
return sqrt((x*x + y*y))
#--------------------------------------------------#
def LoadBuildings(shp, hostObject):
# Default parameters for ground altitude and building height
BAlt = 321
BAltOffset = -5
BHeight = 12
RoofHeight = 15
# Preparing Texture
buildingTex = Texture.New('buildingTex')
buildingTex.setType('Image')
img = Image.Load('/tmp/building.jpg')
buildingTex.image = img
# Default Material used for Buildings
buildingMat = Material.New('buildingMat')
buildingMat.rgbCol = [0.78,0.75, 0.4]
buildingMat.emit = 0.3
buildingMat.setSpec(0.0)
buildingMat.setTexture(0, buildingTex)
# Building barracks
Nshapes = (shp.info())[0]
logging.info('Loading ', Nshapes, ' BUildings')
for i in range(Nshapes):
# Oshp is a shape object
Oshp = shp.read_object(i)
#for v in Oshp.vertices()[0]
#--- Building the building with the shapefile's vertices
groundCoverage = Oshp.vertices()[0]
N = len(groundCoverage)
logging.info('Building ', (i+1), 'with ', N, ' vertices')
buildingMesh = Blender.Mesh.New('buildingMesh');
#--- Average the altitude of building first floor
if (N > 0):
thisBuildingZ = hostObject.findZOfClosestPoint(groundCoverage[0]) - BAltOffset
else:
thisBuildingZ = hostObject.meanZ - BAltOffset
for i in range(N-1):
# Get the Z of the closest vertices in DTM to adjust BZ
# for j = TODO
# Extend the vertices of the current building's mesh
buildingMesh.verts.extend(groundCoverage[i][0] - hostObject.UTMXOrigin,
groundCoverage[i][1] - hostObject.UTMYOrigin ,
thisBuildingZ)
#--- Filling the face of the building's ground
if (N == 5) or (N == 4):
ff = NMesh.Face([buildingMesh.verts]);
buildingMesh.faces.extend(ff);
#--- Creating the walls
for i in range(N-1):
buildingMesh.verts.extend(groundCoverage[i][0] - hostObject.UTMXOrigin,
groundCoverage[i][1] - hostObject.UTMYOrigin ,
thisBuildingZ + BHeight)
#--- Filling the faces of the building's walls
for i in range(N-1):
if i < N-2:
ff = NMesh.Face([buildingMesh.verts[i], buildingMesh.verts[i+(N-1)], buildingMesh.verts[i+N], buildingMesh.verts[i+1]]);
else:
ff = NMesh.Face([buildingMesh.verts[i], buildingMesh.verts[i+(N-1)], buildingMesh.verts[N-1], buildingMesh.verts[0]]);
buildingMesh.faces.extend(ff);
#--- Building the roof the roof the roof is on Fire... ah lala
if (N == 5):
# A two points roof
longWallIs01 = 0
roofLength = distance2D(buildingMesh.verts[0], buildingMesh.verts[1])
if roofLength > distance2D(buildingMesh.verts[1], buildingMesh.verts[2]):
longWallIs01 = 1
else:
longWallIs01 = 0
roofLength = distance2D(buildingMesh.verts[1], buildingMesh.verts[2])
# Adding faces to the roof
if longWallIs01 == 1:
xroof = ((buildingMesh.verts[0]).co[0] + (buildingMesh.verts[N-2]).co[0]) / 2.0
yroof = ((buildingMesh.verts[0]).co[1] + (buildingMesh.verts[N-2]).co[1]) / 2.0
buildingMesh.verts.extend(xroof, yroof, thisBuildingZ + RoofHeight)
xroof = ((buildingMesh.verts[1]).co[0] + (buildingMesh.verts[2]).co[0]) / 2.0
yroof = ((buildingMesh.verts[1]).co[1] + (buildingMesh.verts[2]).co[1]) / 2.0
buildingMesh.verts.extend(xroof, yroof, thisBuildingZ + RoofHeight)
ff = NMesh.Face([buildingMesh.verts[N-1], buildingMesh.verts[N], buildingMesh.verts[(2*(N-1))+1], buildingMesh.verts[(2*(N-1))] ])
buildingMesh.faces.extend(ff)
ff = NMesh.Face([buildingMesh.verts[N-1], buildingMesh.verts[(2*(N-1))], buildingMesh.verts[N+2] ])
buildingMesh.faces.extend(ff)
ff = NMesh.Face([buildingMesh.verts[N], buildingMesh.verts[(2*(N-1))+1], buildingMesh.verts[N+1] ])
buildingMesh.faces.extend(ff)
ff = NMesh.Face([buildingMesh.verts[N+1], buildingMesh.verts[N+2], buildingMesh.verts[(2*(N-1))], buildingMesh.verts[(2*(N-1))+1] ])
buildingMesh.faces.extend(ff)
else:
xroof = ((buildingMesh.verts[0]).co[0] + (buildingMesh.verts[1]).co[0]) / 2.0
yroof = ((buildingMesh.verts[0]).co[1] + (buildingMesh.verts[1]).co[1]) / 2.0
buildingMesh.verts.extend(xroof, yroof, thisBuildingZ + RoofHeight)
xroof = ((buildingMesh.verts[2]).co[0] + (buildingMesh.verts[3]).co[0]) / 2.0
yroof = ((buildingMesh.verts[2]).co[1] + (buildingMesh.verts[3]).co[1]) / 2.0
buildingMesh.verts.extend(xroof, yroof, thisBuildingZ + RoofHeight)
ff = NMesh.Face([buildingMesh.verts[N], buildingMesh.verts[N+1], buildingMesh.verts[(2*(N-1))+1], buildingMesh.verts[(2*(N-1))] ])
buildingMesh.faces.extend(ff)
ff = NMesh.Face([buildingMesh.verts[N-1], buildingMesh.verts[(2*(N-1))], buildingMesh.verts[N] ])
buildingMesh.faces.extend(ff)
ff = NMesh.Face([buildingMesh.verts[N+1], buildingMesh.verts[(2*(N-1))+1], buildingMesh.verts[N+2] ])
buildingMesh.faces.extend(ff)
ff = NMesh.Face([buildingMesh.verts[N+2], buildingMesh.verts[N-1], buildingMesh.verts[(2*(N-1))], buildingMesh.verts[(2*(N-1))+1] ])
buildingMesh.faces.extend(ff)
else:
# A one points roof
#xroof and yroof have already been precalculated
xroof = 0
yroof = 0
for i in range((N-1)):
xroof = xroof + (buildingMesh.verts[i]).co[0]
yroof = yroof + (buildingMesh.verts[i]).co[1]
#--- Average building 2D center
xroof = xroof / float(N-1)
yroof = yroof / float(N-1)
#--- Adding roof top points at verts index= (2*(N-1))
buildingMesh.verts.extend(xroof, yroof, thisBuildingZ + RoofHeight)
#--- Making faces around the roof
for i in range((N-1), (2*(N-1))):
if i == ((2*(N-1))-1):
ff = NMesh.Face([buildingMesh.verts[i], buildingMesh.verts[N-1], buildingMesh.verts[(2*(N-1))] ])
else:
ff = NMesh.Face([buildingMesh.verts[i], buildingMesh.verts[i+1], buildingMesh.verts[(2*(N-1))] ])
buildingMesh.faces.extend(ff);
#--- Adding material
buildingMesh.materials = [buildingMat]
#--- Creating new Object in current scene
scene = Blender.Scene.GetCurrent()
buildingObject = scene.objects.new(buildingMesh)
Blender.Window.Redraw()
#----------------------------------------------------------#
def LoadRoads(shp, hostObject):
Nshapes = (shp.info())[0]
for i in range(Nshapes):
# Oshp is a shape object
Oshp = shp.read_object(i)
# We can read the vertices from the shape object
#logging.info(Oshp.vertices()[0])
#----------------------------------------------------------#
def make_shapefile(filename):
obj = shapelib.SHPObject(shapelib.SHPT_POLYGON, 1, [[(10, 10), (20, 10), (20, 20), (10, 10)]])
logging.info(obj.extents())
logging.info(obj.vertices())
outfile = shapelib.create(filename, shapelib.SHPT_POLYGON)
outfile.write_object(-1, obj)
del outfile
#----------------------------------------------------------#
#
# Test the DBF file module.
#
def make_dbf(file):
# create a new dbf file and add three fields.
dbf = dbflib.create(file)
dbf.add_field("NAME", dbflib.FTString, 20, 0)
dbf.add_field("INT", dbflib.FTInteger, 10, 0)
dbf.add_field("FLOAT", dbflib.FTDouble, 10, 4)
#----------------------------------------------------------#
def add_dbf_records(file):
# add some records to file
dbf = dbflib.open(file, "r+b")
# Records can be added as a dictionary...
dbf.write_record(0, {'NAME': "Weatherwax", "INT":1, "FLOAT":3.1415926535})
# ... or as a sequence
dbf.write_record(1, ("Ogg", 2, -1000.1234))
#----------------------------------------------------------#
def list_dbf(file):
# logging.info(the contents of a dbf file to stdout)
dbf = dbflib.DBFFile(file)
logging.info("%d records, %d fields" % (dbf.record_count(), dbf.field_count()))
format = ""
for i in range(dbf.field_count()):
type, name, len, decc = dbf.field_info(i)
if type == 0:
format = format + " %%(%s)%ds" % (name, len)
elif type == 1:
format = format + " %%(%s)%dd" % (name, len)
elif type == 2:
format = format + " %%(%s)%dg" % (name, len)
logging.info(format)
for i in range(dbf.record_count()):
logging.info(format % dbf.read_record(i))
#----------------------------------------------------------#
# Main method to load shapefiles according to
# the meaning of their contents which can be indicated
# by shpNature.
#
# filename: Absolute Path to the shapefile.
# hostObject: Mesh in which the information should be added
# shpNature: int indicating the nature of the shape description (Roads, buildings, etc.)
#
# We define the following values for shpNature
# shpNature : 0 : Unknown content
# shpNature : any not defined value is considered as 0.
#
# shpNature : 20 : Drivable surface
# shpNature : 21 : Common Road in hard concrete (highways, streets etc.)
# shpNature : 22 : Country tracks
#
# shpNature : 30 : Buildings with flat roofs
# shpNature : 31 : Buildings with flat roofs
#
# shpNature : 40 : Vegetation high grass
# shpNature : 41 : Vegetation woods with nice trees: feuillus
# shpNature : 42 : Vegetation woods with nice trees: coniferes
#
#----------------------------------------------------------#
def LoadShapefile(filename, shpNature, hostObject):
# The shapelib object
shp = []
# open the shapefile
if os.path.isfile(filename):
(dirname, filerelname) = os.path.split(filename)
(bodyname, fileext) = os.path.splitext(filerelname)
logging.info('(DD) Is FILE OK')
# In order to open a shapefile the shapelib needs to have the shx corresponding files
if os.path.isfile(os.path.join(dirname, bodyname+'.SHX')) or os.path.isfile(os.path.join(dirname, bodyname+'.shx')):
logging.info('(DD) IS SHALELIB OK')
shp = shapelib.ShapeFile(filename)
else:
return 0
if shp:
Tshp = (shp.info())[1]
logging.info('(II) Reading shapefile with type ', Tshp ,' contents (', filerelname,') as ', shpNature)
#-------- Loading Roads
if (shpNature >= 20) and (shpNature <= 29):
LoadRoads(shp, hostObject)
#-------- Loading Buildings
if (shpNature >= 30) and (shpNature <= 39):
LoadBuildings(shp, hostObject)
| bsd-3-clause | -1,722,526,847,447,420,700 | 41.395062 | 150 | 0.564939 | false |
moonboy13/brew-journal | brew_journal/recipies/migrations/0002_auto_20160224_0318.py | 1 | 1120 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('recipies', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='recipe',
name='account',
field=models.ForeignKey(related_name='account', to=settings.AUTH_USER_MODEL, null=True),
),
migrations.AddField(
model_name='recipehops',
name='recipe',
field=models.ForeignKey(related_name='recipe_hops', to='recipies.Recipe', null=True),
),
migrations.AddField(
model_name='recipemalts',
name='recipe',
field=models.ForeignKey(related_name='recipe_malts', to='recipies.Recipe', null=True),
),
migrations.AlterField(
model_name='recipe',
name='last_brew_date',
field=models.DateTimeField(null=True, blank=True),
),
]
| apache-2.0 | -6,611,607,104,091,185,000 | 30.111111 | 100 | 0.590179 | false |
josh314/sf-crime | universal-probs.py | 1 | 1553 | ######################################################################
# A benchmark script which assigns same probabilities to each test set member.
# Probabilities are determined by the relative fraction of a category of
# crime to the total number of crimes in the training set.
######################################################################
import pandas as pd
import numpy as np
import gzip
import os.path
import sf_crime_config as conf
#File locations
train_file = conf.train_raw
test_file = conf.test_raw
submission_file = os.path.join(conf.submission_dir, \
'universal-probs-submission.csv.gz')
#load training file to data frame
train = pd.read_csv(train_file,header=0)
print("train imported")
#Aggregate total number of each type of crime in training set
crime_numbers = train.groupby('Category').size()
#Create a row of overall probabilities out of the crime numbers
#This vector is thus normalized to sum up to 1.
crime_ratios = crime_numbers / len(train)
#Convert to list
probs = crime_ratios.values.tolist()
#load test file to data frame
test = pd.read_csv(test_file,header=0)
#Create a matrix of probabilities for each row of test data
#Each row gets the same values -- the overall probs.
probs_array = np.array([probs]*len(test))
#Create empty data frame for submission file
columns = crime_ratios.index.tolist()
df = pd.DataFrame(probs_array, columns=columns)
df.insert(loc=0,column='Id',value=test['Id'])
with gzip.open(submission_file,'wt') as archive:
df.to_csv(archive,index=False)
| cc0-1.0 | -1,150,541,506,081,730,200 | 34.295455 | 78 | 0.678042 | false |
CharKwayTeow/uci-webreg-crawler | python/download_all_courses_in_a_department.py | 1 | 1253 | #!/usr/bin/python
# This is a script to download course lists of a department in all quarters.
# Usage: python download_all_courses_in_a_department.py department_name
# Example: python download_all_courses_in_a_department.py COMPSCI
import urllib2, sys, os, string
def find_between( s, first, last ):
try:
start = s.index( first ) + len( first )
end = s.index( last, start )
return s[start:end]
except ValueError:
return ""
url = 'http://websoc.reg.uci.edu/perl/WebSoc'
webfile = urllib2.urlopen(url).read()
department = sys.argv[1]
if not os.path.exists(department):
os.makedirs(department)
print 'Begin to download all courses in', department, ':'
for line in webfile.splitlines():
if "<option value=\"20" in line:
param = find_between(line, "value=\"", "\" style")
term = find_between(line, ">", "<")
query_url = "http://websoc.reg.uci.edu/perl/WebSoc?Submit=Display+Web+Results&YearTerm="+ param + "&Dept=" + department
response = urllib2.urlopen(query_url).read()
#build file name
trans = string.maketrans(' ', '_')
filename = term.translate(trans)
print term
#store to html file
fo = open(department + '/' + filename + ".html", "w+")
fo.write(response)
fo.close()
| gpl-2.0 | -5,043,633,817,063,758,000 | 28.139535 | 121 | 0.663208 | false |
hellohaptik/chatbot_ner | ner_v1/detectors/pattern/pnr/pnr_detection.py | 1 | 12091 | from __future__ import absolute_import
import re
from ner_v1.detectors.base_detector import BaseDetector
from language_utilities.constant import ENGLISH_LANG
class PNRDetector(BaseDetector):
"""Detects PNR (serial) codes (Passenger Record Number, usually present with train or flight bookings) in given text
and tags them. Usually flight pnr codes are 5 to 8 characters long.
Detects all PNR/serial codes of variable length about 5 to 20 characters in given text and replaces them by
entity_name. Different detectors are used depending on the entity_name used to initialize the PNRDetector object.
A task_dict, a dictonary mapping detector functions to entity_name is used for this.
For example if 'train_pnr' is used to initialize PNRDetector(),
_detect_railway_pnr() would be called to detect pnr codes. In case if entity_name is not present in
task_dict , _detect_serial_pnr() is used to detect pnr codes.
Attributes:
text: string to extract entities from
entity_name: string by which the detected pnr codes would be replaced with on calling detect_entity()
tagged_text: string with pnr codes replaced with tag defined by entity name
processed_text: string with pnr codes detected removed
pnr_list: list of pnr codes detected
original_pnr_text: list to store substrings of the text detected as pnr codes
tag: entity_name prepended and appended with '__'
task_dict : A dictonary mapping detector functions to entity_name. For example if 'train_pnr' is used to
initialize PNRDetector(), _detect_railway_pnr() would be called to detect pnr codes
In case if entity_name is not present in task_dict , _detect_serial_pnr() is used to detect pnr
codes
For Example:
text = "Your flight booking was sucessful. Your pnr is 4sgx3e."
pnr_detector = PNRDetector("pnr_number")
pnr_numbers, original_pnr_numbers = pnr_detector.detect_entity(text)
pnr_detector.tagged_text
Output:
' Your flight booking was sucessful. Your pnr is __pnr__number__. '
pnr_numbers, original_pnr_numbers
Output:
(['4sgx3e'], ['4sgx3e'])
Note:
text and tagged_text will have a extra space prepended and appended after calling detect_entity(text)
More Examples:
text = "Your flight booking was sucessful. Your pnr is 43333."
...
pnr_numbers, original_pnr_numbers
(['43333'], ['43333'])
text = "Your flight booking was sucessful. Your pnr is 433."
...
pnr_numbers, original_pnr_numbers
([], [])
text = "Your flight booking was sucessful. Your pnr is sgxsgx."
...
pnr_numbers, original_pnr_numbers
(['sgxsgx'], ['sgxsgx'])
"""
def __init__(self, entity_name, source_language_script=ENGLISH_LANG, translation_enabled=False):
"""Initializes a PNRDetector object
Args:
entity_name: A string by which the detected pnr codes would be replaced with on calling detect_entity()
source_language_script: ISO 639 code for language of entities to be detected by the instance of this class
translation_enabled: True if messages needs to be translated in case detector does not support a
particular language, else False
"""
# assigning values to superclass attributes
self._supported_languages = [ENGLISH_LANG]
super(PNRDetector, self).__init__(source_language_script, translation_enabled)
self.entity_name = entity_name
self.task_dict = {
'train_pnr': self._detect_railway_pnr,
'Default': self._detect_serial_pnr
}
self.text = ''
self.tagged_text = ''
self.processed_text = ''
self.pnr_list = []
self.original_pnr_text = []
self.tag = '__' + self.entity_name + '__'
@property
def supported_languages(self):
return self._supported_languages
def detect_entity(self, text, **kwargs):
"""Detects pnr codes in the text string
Args:
text: string to extract entities from
**kwargs: it can be used to send specific arguments in future
Returns:
A tuple of two lists with first list containing the detected pnr codes and second list containing their
corresponding substrings in the given text.
For example:
(['4sgx3e'], ['4sgx3e'])
Additionally this function assigns these lists to self.pnr_list and self.original_pnr_text attributes
respectively.
"""
self.text = ' ' + text + ' '
self.processed_text = self.text
self.tagged_text = self.text
pnr_data = self.task_dict.get(self.entity_name, self.task_dict['Default'])()
self.pnr_list = pnr_data[0]
self.original_pnr_text = pnr_data[1]
return pnr_data
def _detect_railway_pnr(self):
"""Detects railway pnr codes in the text string
Detects Indian Railways 10 to 12 digits PNR codes in the text
Returns:
A tuple of two lists with first list containing the detected pnr codes and second list containing their
corresponding substrings in the given text.
For example, if text is "My train pnr is 2459547855, can you check the train status for me ?"
It returns
(['2459547855'], ['2459547855'])
Additionally this function assigns these lists to self.pnr_list and self.original_original_pnr_text
attributes respectively.
"""
# print 'detection for default task'
railway_pnr_list = []
original_list = []
railway_pnr_list, original_list = self._detect_railway_pnr_format(railway_pnr_list, original_list)
self._update_processed_text(original_list)
railway_pnr_list, original_list = self._detect_railway_pnr_long_format(railway_pnr_list, original_list)
self._update_processed_text(original_list)
return railway_pnr_list, original_list
def _detect_railway_pnr_format(self, railway_pnr_list=None, original_list=None):
"""
Detects Indian Railways 10 to 12 digits pnr codes from self.text conforming to formats defined by
regex pattern.
This function is called by _detect_railway_pnr()
Args:
railway_pnr_list: Optional, list to store detected pnr codeses
original_list: Optional, list to store corresponding original substrings of text which were detected as
pnr codeses
Returns:
A tuple of two lists with first list containing the detected pnr codeses and second list containing
their corresponding substrings in the given text.
For example:
(['2459547855'], ['2459547855'])
"""
if railway_pnr_list is None:
railway_pnr_list = []
if original_list is None:
original_list = []
patterns = re.findall(r'\b([0-9]{10,12})\b', self.processed_text.lower())
for pattern in patterns:
railway_pnr_list.append(pattern)
original_list.append(pattern)
return railway_pnr_list, original_list
def _detect_railway_pnr_long_format(self, railway_pnr_list=None, original_list=None):
"""
Detects railway PNR 10 digit number with special characters
Args:
railway_pnr_list: Optional, list to store detected pnr codeses
original_list: Optional, list to store corresponding original substrings of text which were detected as
pnr codeses
Returns:
A tuple of two lists with first list containing the detected pnr codeses and second list containing
their corresponding substrings in the given text.
For example:
(['2459547855'], ['2459547855'])
"""
if railway_pnr_list is None:
railway_pnr_list = []
if original_list is None:
original_list = []
patterns = re.findall(r'\b([0-9\-\s\(\)\.]{10,20})\b', self.processed_text.lower())
for pattern in patterns:
clean_pnr = self._clean_pnr(pattern)
if len(clean_pnr) == 10:
railway_pnr_list.append(clean_pnr)
original_list.append(pattern)
return railway_pnr_list, original_list
def _clean_pnr(self, pnr):
"""
This function clean special character from pnr text
Args:
pnr: PNR containing special characters
Returns:
pnr: PNR with special characters removed
"""
return re.sub('[\-\s\.\(\)]+', '', pnr)
def _detect_serial_pnr(self):
"""
Detects generic serial/pnr codes from self.text conforming to formats defined by regex pattern.
Returns:
A tuple of two lists with first list containing the detected pnr codeses and second list containing
their corresponding substrings in the given text.
For example:
(['4sgx3e'], ['4sgx3e'])
"""
# print 'detection for default task'
pnr_list = []
original_list = []
pnr_list, original_list = self._detect_serial_key(pnr_list, original_list)
self._update_processed_text(original_list)
return pnr_list, original_list
def _detect_serial_key(self, pnr_list=None, original_list=None):
"""
Detects generic serial/pnr codes from self.text conforming to formats defined by regex pattern.
This function is called by _detect_railway_pnr()
Args:
pnr_list: Optional, list to store detected pnr codeses
original_list: Optional, list to store corresponding original substrings of text which were detected as
pnr codeses
Returns:
A tuple of two lists with first list containing the detected pnr codeses and second list containing
their corresponding substrings in the given text.
For example:
(['4sgx3e'], ['4sgx3e'])
"""
if pnr_list is None:
pnr_list = []
if original_list is None:
original_list = []
pnr = None
pattern = re.compile(r'\s(([0-9]+[a-zA-Z]|[a-zA-Z]+[0-9])[A-Za-z0-9]*)\s').search(self.processed_text.lower())
pattern2 = re.compile(r'\se([0-9]{4,20})\s').search(self.processed_text.lower())
pattern3 = re.compile(r'\s([A-Z]{4,20})\s').search(self.processed_text.lower())
pattern4 = re.compile(r'\s([A-Za-z0-9]*[^AaEeIiOoUu\+\-,!@#\$\^&\*\(\);/\|<>\s]{4,10}[A-Za-z0-9]+)[\s\.]') \
.search(self.processed_text.lower())
if pattern and len(pattern.group(1)) > 3:
pnr = pattern.group(1)
elif pattern2:
pnr = pattern2.group(1)
elif pattern3:
pnr = pattern3.group(1)
elif pattern4:
pnr = pattern4.group(1)
if pnr:
pnr_list.append(pnr)
original_list.append(pnr)
return pnr_list, original_list
def _update_processed_text(self, original_pnr_strings):
"""
Replaces detected pnr codes with tag generated from entity_name used to initialize the object with
A final string with all pnr codes replaced will be stored in object's tagged_text attribute
A string with all pnr codes removed will be stored in object's processed_text attribute
Args:
original_pnr_strings: list of substrings of original text to be replaced with tag created from entity_name
"""
for detected_text in original_pnr_strings:
self.tagged_text = self.tagged_text.replace(detected_text, self.tag)
self.processed_text = self.processed_text.replace(detected_text, '')
| gpl-3.0 | 1,228,918,726,222,171,000 | 40.12585 | 120 | 0.619055 | false |
wangjeaf/CSSCheckStyle | ckstyle/plugins/FEDFixNestedStatement.py | 1 | 1147 | #/usr/bin/python
#encoding=utf-8
from .Base import *
class FEDFixNestedStatement(ExtraChecker):
'''{
"summary":"修复嵌套的CSS",
"desc":"@keyframes, @media之类的"
}'''
def __init__(self):
self.id = 'fix-nested-ruleset'
self.errorLevel = ERROR_LEVEL.ERROR
self.errorMsg = ''
self.private = True
def check(self, ruleSet, config):
return True
def fix(self, ruleSet, config):
if not ruleSet.nested:
return
ruleSet.fixedSelector = ruleSet.fixedSelector.replace('"', '\'')
statement = ruleSet.fixedStatement
if (hasattr(config, 'operation') and getattr(config, 'operation') == 'compress'):
from ckstyle.doCssCompress import prepare
checker = prepare(statement, '', config)
# 嵌套的CSS,如果是压缩,也需要精简
msg = checker.doCompress(config._curBrowser)
ruleSet.fixedStatement = msg
else:
from ckstyle.doCssFix import doFix
checker, msg = doFix(statement, '', config)
ruleSet.fixedStatement = msg
| bsd-3-clause | -6,618,577,144,759,278,000 | 29.583333 | 89 | 0.587648 | false |
fos/fos-legacy | fos/core/actor.py | 1 | 8156 | import numpy as np
from fos.actor.primitives import AABBPrimitive
from pyglet.gl import GLfloat
from pyglet.gl import *
class Actor(object):
""" Define a visualization object in Fos """
def __init__(self,
affine = None,
aabb = None,
force_center_data = False,
**kwargs):
""" Create an actor
Parameters
----------
affine : 4x4 array
the affine is expected to be normal, i.e. it has only
rotational and translational components, but no shears
the affine is applied to the input vertices data to position
the actor in the world space. If set to none, an affine is
generated to positions the actor optimally for the camera view
aabb : (corner1, corner2)
the axis-aligned bounding box. axis-aligned means aligned
with the world coordinate system axes
corner1 : 3x1 array
bottom-left-front point of the box when look into z direction
corner2 : 3x1 array
top-right-back point of the box
If set to None, an axis aligned boundingbox is computed
using the input vertices
force_center_data : boolean
if set to true, the mean of vertices location is subtracted from
all the vertices. this is useful to compute a better bounding
box and if the data has no meaningful affine
obb : (center, orientation, halfwidths)
center : 3x1 array
the center point of the aabb
orientation : 3x3 array
orthogonal unit vectors
halfwidths : 3x1 array
box halfwidths along each axis
"""
# data related information
self.vertices = None
self.living = False
self.show_aabb = True
# self.connectivity = None
# self.field = None # scalar, vector, tensor
# self.colormap = None
# self.texture = None
# movement related information. use the
# self.velocity = None
# self.acceleration = None
# event related information
# self.event_queue = None
# mouse or keyboard events on the actor
# self.event_handlers = None
# related: menu options for the actor
def setup(self):
""" Data preparation """
# display lists, vbo
# prepare different resolutions
pass
def update(self, dt):
""" Update the actor
dt from the global timer """
pass
def draw_aabb(self):
""" Draw the actor """
if self.show_aabb:
glPushMatrix()
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
glLineWidth(1.0)
glColor3f(1.0, 1.0, 0.0)
glEnableClientState(GL_VERTEX_ARRAY)
glVertexPointer(3, GL_FLOAT, 0, self.aabb.vertices_ptr)
glDrawElements(self.aabb.mode,self.aabb.indices_nr,self.aabb.type,self.aabb.indices_ptr)
glDisableClientState(GL_VERTEX_ARRAY)
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
glPopMatrix()
def delete(self):
""" Removing the geometry """
pass
def info(self):
""" Show information about the actor """
# debug mode
print "this actor is at ", self
print "number of vertices", len(self.vertices)
print "is the actor living ?", self.living
if not self.aabb is None:
print "has boundary box", self.aabb
def to_raytracer_file(self):
""" Save the geometry to a file readable by a raytracer """
pass
def process_pickray(self, near, far):
""" Process the pick ray like intersecting with the actor """
pass
def process_keys(self,symbol,modifiers):
pass
def process_mouse_motion(self,x,y,dx,dy):
pass
def make_aabb(self, aabb = None, margin = 30):
""" Make the axis aligned bounding box.
Parameters
----------
aabb : 2-tuple of numpy arrays of shape(3,)
Defining the box by left-bottom-front and the top-right-back
coordinate points. If None, a bounding box based on the
vertices is computed.
margin : float
A margin to be added to the computed bounding box
"""
# if no aabb is given, compute one
if aabb == None:
# compute an axis aligned bounding box
# based on the vertices
coord1 = np.array([self.vertices[:,0].min(),
self.vertices[:,1].min(),
self.vertices[:,2].min()], dtype = np.float32)
coord2 = np.array([self.vertices[:,0].max(),
self.vertices[:,1].max(),
self.vertices[:,2].max()], dtype = np.float32)
self.aabb = AABBPrimitive(blf = coord1, trb = coord2, margin = margin)
else:
assert len(aabb) == 2
# otherwise set to given aabb
self.aabb = AABBPrimitive(blf = aabb[0], trb = aabb[1], margin = margin)
def get_aabb_coords(self):
"""
Returns AABB coordinates blf and trb in world space (using the affine)
"""
ab1 = self.aabb.coord[0]
ab2 = self.aabb.coord[1]
r1 = np.dot(self.affine, np.array( [ab1[0], ab1[1], ab1[2], 1.0] ) )
r2 = np.dot(self.affine, np.array( [ab2[0], ab2[1], ab2[2], 1.0] ) )
return (r1[:3], r2[:3])
def make_obb(self):
pass
# just reuse the aabb points
# leftbottom, righttop = self.aabb
#
# center = np.mean( np.vstack( (leftbottom, righttop) ), axis = 0)
# halfwidths = (leftbottom - righttop) / 2.0
# # in our default network, this obb is axis-aligned, thus the
# # obb is the identity
# orientation = np.eye( 3, 3 )
#
# self.obb = (center, halfwidths, orientation)
def bounding_sphere(self):
""" Compute the bounding sphere """
pass
# can use PCA?
def bouding_ellipsoid(self):
""" Compute the bounding elipsoid """
pass
# can use PCA?
## affine logic
###
def set_affine(self, affine):
# update the affine
print "update affine", self.affine
self.affine = affine
self._update_glaffine()
def scale(self, scale_factor):
""" Scales the actor by scale factor.
Multiplies the diagonal of the affine for
the first 3 elements """
self.affine[0,0] *= scale_factor
self.affine[1,1] *= scale_factor
self.affine[2,2] *= scale_factor
self._update_glaffine()
def translate(self, dx, dy, dz):
""" Translate the actor.
Remember the OpenGL has right-handed
coordinate system """
self.affine[0,3] += dx
self.affine[1,3] += dy
self.affine[2,3] += dz
self._update_glaffine()
def set_position(self, x, y, z):
""" Position the actor.
Remember the OpenGL has right-handed
coordinate system """
self.affine[0,3] += x
self.affine[1,3] += y
self.affine[2,3] += z
self._update_glaffine()
def _update_glaffine(self):
self.glaffine = (GLfloat * 16)(*tuple(self.affine.T.ravel()))
# life processes
###
def start(self, lifespan = 10, tickingtime = 2.0):
print "the actor is alive"
self.living = True
self.internal_timestamp = 0.0
# call self.update every tickingtime
def stop(self):
print "the actor stops living"
self.living = False
def cont(self):
print "continue to live happily"
self.living = True
| bsd-3-clause | 8,687,800,132,622,186,000 | 31.624 | 100 | 0.538009 | false |
zesty/sgraph | test_sgraph.py | 1 | 1807 | #! env python3
import unittest
import sgraph
class TestSgraph(unittest.TestCase):
def setUp(self):
graph = []
with open('graph') as f: # one per line FIXME fixture?
for edge in f.readlines():
src, dest, *cost = list(edge.strip())
cost = int(''.join(cost)) # bc maybe size >= 10; FIXME multi-char cities needs diff input format
graph.append((src, dest, cost))
self.sgl = sgraph.SGraph(graph)
def test01(self):
self.assertEqual(9, self.sgl.route_distance(['A', 'B', 'C']))
def test02(self):
self.assertEqual(5, self.sgl.route_distance(['A', 'D']))
def test03(self):
self.assertEqual(13, self.sgl.route_distance(['A', 'D', 'C']))
def test04(self):
self.assertEqual(22, self.sgl.route_distance(['A', 'E', 'B', 'C', 'D']))
def test05(self):
self.assertRaises(sgraph.SGraph.NoSuchRoute, self.sgl.route_distance, ['A', 'E', 'D'])
try:
x = self.sgl.route_distance(['A', 'E', 'D'])
print(str(x)) # never
except sgraph.SGraph.NoSuchRoute as e:
self.assertEqual('NO SUCH ROUTE', str(e))
def test06(self):
self.assertEqual(2, self.sgl.count_routes_max_stops('C', 'C', 3))
def test07(self):
self.assertEqual(3, self.sgl.count_routes_exact_stops('A', 'C', 4))
def test08(self):
self.assertEqual(9, self.sgl.shortest_route('A', 'C'))
def test09(self):
self.assertEqual(9, self.sgl.shortest_route('B', 'B'))
def test10(self):
self.assertEqual(7, self.sgl.count_routes_max_distance('C', 'C', 30))
def test11(self):
self.assertEqual(float('inf'), self.sgl.shortest_route('A', 'A'))
if __name__ == '__main__':
unittest.main()
| mit | 7,265,645,486,266,781,000 | 30.155172 | 113 | 0.570006 | false |
tanayseven/Voix | flask/lib/python2.7/site-packages/whoosh/matching/wrappers.py | 1 | 14976 | # Copyright 2010 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
import sys
from whoosh.compat import xrange
from whoosh.matching import mcore
class WrappingMatcher(mcore.Matcher):
"""Base class for matchers that wrap sub-matchers.
"""
def __init__(self, child, boost=1.0):
self.child = child
self.boost = boost
def __repr__(self):
return "%s(%r, boost=%s)" % (self.__class__.__name__, self.child,
self.boost)
def copy(self):
kwargs = {}
if hasattr(self, "boost"):
kwargs["boost"] = self.boost
return self.__class__(self.child.copy(), **kwargs)
def depth(self):
return 1 + self.child.depth()
def _replacement(self, newchild):
return self.__class__(newchild, boost=self.boost)
def replace(self, minquality=0):
# Replace the child matcher
r = self.child.replace(minquality)
if r is not self.child:
# If the child changed, return a new wrapper on the new child
return self._replacement(r)
else:
return self
def max_quality(self):
return self.child.max_quality()
def id(self):
return self.child.id()
def all_ids(self):
return self.child.all_ids()
def is_active(self):
return self.child.is_active()
def reset(self):
self.child.reset()
def children(self):
return [self.child]
def supports(self, astype):
return self.child.supports(astype)
def value(self):
return self.child.value()
def value_as(self, astype):
return self.child.value_as(astype)
def spans(self):
return self.child.spans()
def skip_to(self, id):
return self.child.skip_to(id)
def next(self):
self.child.next()
def supports_block_quality(self):
return self.child.supports_block_quality()
def skip_to_quality(self, minquality):
return self.child.skip_to_quality(minquality / self.boost)
def block_quality(self):
return self.child.block_quality() * self.boost
def weight(self):
return self.child.weight() * self.boost
def score(self):
return self.child.score() * self.boost
class MultiMatcher(mcore.Matcher):
"""Serializes the results of a list of sub-matchers.
"""
def __init__(self, matchers, idoffsets, current=0):
"""
:param matchers: a list of Matcher objects.
:param idoffsets: a list of offsets corresponding to items in the
``matchers`` list.
"""
self.matchers = matchers
self.offsets = idoffsets
self.current = current
self._next_matcher()
def __repr__(self):
return "%s(%r, %r, current=%s)" % (self.__class__.__name__,
self.matchers, self.offsets,
self.current)
def is_active(self):
return self.current < len(self.matchers)
def reset(self):
for mr in self.matchers:
mr.reset()
self.current = 0
def children(self):
return [self.matchers[self.current]]
def _next_matcher(self):
matchers = self.matchers
while (self.current < len(matchers)
and not matchers[self.current].is_active()):
self.current += 1
def copy(self):
return self.__class__([mr.copy() for mr in self.matchers],
self.offsets, current=self.current)
def depth(self):
if self.is_active():
return 1 + max(mr.depth() for mr in self.matchers[self.current:])
else:
return 0
def replace(self, minquality=0):
m = self
if minquality:
# Skip sub-matchers that don't have a high enough max quality to
# contribute
while (m.is_active()
and m.matchers[m.current].max_quality() < minquality):
m = self.__class__(self.matchers, self.offsets, m.current + 1)
m._next_matcher()
if not m.is_active():
return mcore.NullMatcher()
# TODO: Possible optimization: if the last matcher is current, replace
# this with the last matcher, but wrap it with a matcher that adds the
# offset. Have to check whether that's actually faster, though.
return m
def max_quality(self):
return self.matchers[self.current].max_quality()
def id(self):
current = self.current
return self.matchers[current].id() + self.offsets[current]
def all_ids(self):
offsets = self.offsets
for i, mr in enumerate(self.matchers):
for id in mr.all_ids():
yield id + offsets[i]
def spans(self):
return self.matchers[self.current].spans()
def supports(self, astype):
return self.matchers[self.current].supports(astype)
def value(self):
return self.matchers[self.current].value()
def value_as(self, astype):
return self.matchers[self.current].value_as(astype)
def next(self):
if not self.is_active():
raise mcore.ReadTooFar
self.matchers[self.current].next()
if not self.matchers[self.current].is_active():
self._next_matcher()
def skip_to(self, id):
if not self.is_active():
raise mcore.ReadTooFar
if id <= self.id():
return
matchers = self.matchers
offsets = self.offsets
r = False
while self.current < len(matchers) and id > self.id():
mr = matchers[self.current]
sr = mr.skip_to(id - offsets[self.current])
r = sr or r
if mr.is_active():
break
self._next_matcher()
return r
def supports_block_quality(self):
return all(mr.supports_block_quality() for mr
in self.matchers[self.current:])
def block_quality(self):
return self.matchers[self.current].block_quality()
def weight(self):
return self.matchers[self.current].weight()
def score(self):
return self.matchers[self.current].score()
def ExcludeMatcher(child, excluded, boost=1.0):
return FilterMatcher(child, excluded, exclude=True, boost=boost)
class FilterMatcher(WrappingMatcher):
"""Filters the postings from the wrapped based on whether the IDs are
present in or absent from a set.
"""
def __init__(self, child, ids, exclude=False, boost=1.0):
"""
:param child: the child matcher.
:param ids: a set of IDs to filter by.
:param exclude: by default, only IDs from the wrapped matcher that are
**in** the set are used. If this argument is True, only IDs from
the wrapped matcher that are **not in** the set are used.
"""
super(FilterMatcher, self).__init__(child)
self._ids = ids
self._exclude = exclude
self.boost = boost
self._find_next()
def __repr__(self):
return "%s(%r, %r, %r, boost=%s)" % (self.__class__.__name__,
self.child, self._ids,
self._exclude, self.boost)
def reset(self):
self.child.reset()
self._find_next()
def copy(self):
return self.__class__(self.child.copy(), self._ids, self._exclude,
boost=self.boost)
def _replacement(self, newchild):
return self.__class__(newchild, self._ids, exclude=self._exclude,
boost=self.boost)
def _find_next(self):
child = self.child
ids = self._ids
r = False
if self._exclude:
while child.is_active() and child.id() in ids:
r = child.next() or r
else:
while child.is_active() and child.id() not in ids:
r = child.next() or r
return r
def next(self):
self.child.next()
self._find_next()
def skip_to(self, id):
self.child.skip_to(id)
self._find_next()
def all_ids(self):
ids = self._ids
if self._exclude:
return (id for id in self.child.all_ids() if id not in ids)
else:
return (id for id in self.child.all_ids() if id in ids)
def all_items(self):
ids = self._ids
if self._exclude:
return (item for item in self.child.all_items()
if item[0] not in ids)
else:
return (item for item in self.child.all_items() if item[0] in ids)
class InverseMatcher(WrappingMatcher):
"""Synthetic matcher, generates postings that are NOT present in the
wrapped matcher.
"""
def __init__(self, child, limit, missing=None, weight=1.0, id=0):
super(InverseMatcher, self).__init__(child)
self.limit = limit
self._weight = weight
self.missing = missing or (lambda id: False)
self._id = id
self._find_next()
def copy(self):
return self.__class__(self.child.copy(), self.limit,
weight=self._weight, missing=self.missing,
id=self._id)
def _replacement(self, newchild):
return self.__class__(newchild, self.limit, missing=self.missing,
weight=self._weight, id=self._id)
def is_active(self):
return self._id < self.limit
def reset(self):
self.child.reset()
self._id = 0
self._find_next()
def supports_block_quality(self):
return False
def _find_next(self):
child = self.child
missing = self.missing
# If the current docnum isn't missing and the child matcher is
# exhausted (so we don't have to worry about skipping its matches), we
# don't have to do anything
if not child.is_active() and not missing(self._id):
return
# Catch the child matcher up to where this matcher is
if child.is_active() and child.id() < self._id:
child.skip_to(self._id)
# While self._id is missing or is in the child matcher, increase it
while child.is_active() and self._id < self.limit:
if missing(self._id):
self._id += 1
continue
if self._id == child.id():
self._id += 1
child.next()
continue
break
def id(self):
return self._id
def all_ids(self):
return mcore.Matcher.all_ids(self)
def next(self):
if self._id >= self.limit:
raise mcore.ReadTooFar
self._id += 1
self._find_next()
def skip_to(self, id):
if self._id >= self.limit:
raise mcore.ReadTooFar
if id < self._id:
return
self._id = id
self._find_next()
def weight(self):
return self._weight
def score(self):
return self._weight
class RequireMatcher(WrappingMatcher):
"""Matches postings that are in both sub-matchers, but only uses scores
from the first.
"""
def __init__(self, a, b):
from whoosh.matching.binary import IntersectionMatcher
self.a = a
self.b = b
self.child = IntersectionMatcher(a, b)
def copy(self):
return self.__class__(self.a.copy(), self.b.copy())
def supports_block_quality(self):
return self.a.supports_block_quality()
def replace(self, minquality=0):
if not self.child.is_active():
# If one of the sub-matchers is inactive, go inactive
return mcore.NullMatcher()
elif minquality and self.a.max_quality() < minquality:
# If the required matcher doesn't have a high enough max quality
# to possibly contribute, return an inactive matcher
return mcore.NullMatcher()
new_a = self.a.replace(minquality)
new_b = self.b.replace()
if not new_a.is_active():
return mcore.NullMatcher()
elif new_a is not self.a or new_b is not self.b:
# If one of the sub-matchers changed, return a new Require
return self.__class__(new_a, self.b)
else:
return self
def max_quality(self):
return self.a.max_quality()
def block_quality(self):
return self.a.block_quality()
def skip_to_quality(self, minquality):
skipped = self.a.skip_to_quality(minquality)
self.child._find_next()
return skipped
def weight(self):
return self.a.weight()
def score(self):
return self.a.score()
def supports(self, astype):
return self.a.supports(astype)
def value(self):
return self.a.value()
def value_as(self, astype):
return self.a.value_as(astype)
class ConstantScoreMatcher(WrappingMatcher):
def __init__(self, child, score=1.0):
super(ConstantScoreMatcher, self).__init__(child)
self._score = score
def copy(self):
return self.__class__(self.child.copy(), score=self._score)
def _replacement(self, newchild):
return self.__class__(newchild, score=self._score)
def block_quality(self):
return self._score
def score(self):
return self._score
| gpl-3.0 | -2,723,485,462,847,553,500 | 28.892216 | 78 | 0.584001 | false |
chimkentec/KodiMODo_rep | plugin.video.tree.tv.dev/core/auth.py | 1 | 1953 | # -*- coding: utf-8 -*-
import pickle, re
import xbmcup.app, xbmcup.system, xbmcup.net
from defines import *
class Auth:
def __init__(self):
self.success = '"ok"'
self.cookie_file = xbmcup.system.fs('sandbox://'+COOKIE_FILE)
self.login = xbmcup.app.setting['username']
self.password = xbmcup.app.setting['password']
#xbmcup.system.fs.delete('sandbox://'+COOKIE_FILE)
def autorize(self):
try:
if(self.login == '' or self.password == ''):
self.reset_auth()
return False
url = '%s/users/index/auth?mail=%s&pass=%s&social=0&_=1422391861285' % (SITE_URL, self.login, self.password)
response = xbmcup.net.http.get(url)
except xbmcup.net.http.exceptions.RequestException:
return False
else:
return self._check_response(response)
def _check_response(self, response):
is_logged = response.text == self.success
if(is_logged):
self.save_cookies(response.cookies)
xbmcup.app.setting['is_logged'] = 'true'
else:
xbmcup.system.fs.delete('sandbox://'+COOKIE_FILE)
return is_logged
def save_cookies(self, cookiejar):
with open(self.cookie_file, 'wb') as f:
pickle.dump(cookiejar, f)
def get_cookies(self):
if(xbmcup.system.fs.exists('sandbox://'+COOKIE_FILE)):
with open(self.cookie_file, 'rb') as f:
return pickle.load(f)
return {}
def reset_auth(self, reset_settings=False):
xbmcup.app.setting['is_logged'] = 'false'
if reset_settings == True:
xbmcup.app.setting['username'] = ''
xbmcup.app.setting['password'] = ''
xbmcup.system.fs.delete('sandbox://'+COOKIE_FILE)
def check_auth(self, page):
reg = re.compile('/users/index/logout', re.S).findall(page)
return len(reg) > 0 | gpl-3.0 | 2,525,044,675,271,386,600 | 31.566667 | 120 | 0.576549 | false |
PrestigeDox/Watashi-SelfBot | cogs/converter.py | 1 | 2237 | import discord
from discord.ext import commands
from bs4 import BeautifulSoup
from urllib.parse import quote_plus
class Converter:
def __init__(self, bot):
self.bot = bot
self.aiohttp_session = bot.aiohttp_session
self.url = 'https://google.com/search'
self.headers = {
'User-Agent': 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR '
'2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; MS-RTC LM 8; '
'InfoPath.3; .NET4.0C; .NET4.0E) chromeframe/8.0.552.224',
'Accept-Language': 'en-us',
'Cache-Control': 'no-cache'
}
@commands.command()
async def convert(self, ctx, *, query=None):
""" Calculate some expressions! """
# Handle no query being provided
if query is None:
return await ctx.error('Please provide a query!')
from_unit = query.split()[0]
to_unit = query.split()[1]
try:
val = float(query.split()[2])
except ValueError:
return await ctx.error('Invalid query.')
# Doing this in the f-string later would become f-string-ception and that doesn't work
qstr = quote_plus(f'{val} {from_unit} to {to_unit}')
# Tries its best to imitate a real browser visit, an old user-agent is used to make scraping easier
async with self.aiohttp_session.get(f'{self.url}?q={qstr}&source=hp', headers=self.headers) as r:
html = await r.text()
# Beautiful soup
soup = BeautifulSoup(html, 'lxml')
# The span inside div._Qeb has the result for the expression, if it doesnt exist google doesn't like
# your expression or its just invalid
if not soup.select('div#ires div._Qeb span'):
return await ctx.error('Could not convert expression.')
# Values with units
from_val = soup.select("div#ires div._Qeb span")[0].text.split()[0]
to_val = soup.select("div#ires div._Peb")[0].text.split()[0]
await ctx.message.edit(content=f"{from_val}{from_unit} = {to_val}{to_unit}")
def setup(bot):
bot.add_cog(Converter(bot))
| mit | 4,840,375,041,808,833,000 | 37.568966 | 115 | 0.596334 | false |
FirmlyReality/docklet | tools/upgrade_file2db.py | 2 | 3353 | import sys
sys.path.append("../src/")
import os,json
from datetime import datetime
from model import db, VCluster, Container, PortMapping, Image, BillingHistory
timeFormat = "%Y-%m-%d %H:%M:%S"
dockletPath = "/opt/docklet/global"
usersdir = dockletPath + "/users/"
try:
VCluster.query.all()
except Exception as err:
print("Create database...")
db.create_all()
print("Update vcluster...")
for user in os.listdir(usersdir):
tmppath = usersdir+user+"/clusters/"
if not os.path.exists(tmppath):
continue
print("Update User: "+str(user))
clusterfiles = os.listdir(tmppath)
for cluname in clusterfiles:
cluFile = open(tmppath+cluname,"r")
cluinfo = json.loads(cluFile.read())
vcluster = VCluster(cluinfo['clusterid'],cluname,user,cluinfo['status'],cluinfo['size'],cluinfo['nextcid'],cluinfo['proxy_server_ip'],cluinfo['proxy_public_ip'])
vcluster.create_time = datetime.strptime(cluinfo['create_time'],timeFormat)
vcluster.start_time = cluinfo['start_time']
for coninfo in cluinfo['containers']:
lastsavet = datetime.strptime(coninfo['lastsave'],timeFormat)
con = Container(coninfo['containername'], coninfo['hostname'], coninfo['ip'], coninfo['host'], coninfo['image'], lastsavet, coninfo['setting'])
vcluster.containers.append(con)
for pminfo in cluinfo['port_mapping']:
pm = PortMapping(pminfo['node_name'], pminfo['node_ip'], int(pminfo['node_port']), int(pminfo['host_port']))
vcluster.port_mapping.append(pm)
if "billing_history" in cluinfo.keys():
for nodename in cluinfo['billing_history'].keys():
bhinfo = cluinfo['billing_history'][nodename]
bh = BillingHistory(nodename,bhinfo['cpu'],bhinfo['mem'],bhinfo['disk'],bhinfo['port'])
vcluster.billing_history.append(bh)
try:
db.session.add(vcluster)
db.session.commit()
except Exception as err:
print(err)
cluFile.close()
print("Update Images...")
for shareStr in ['private/','public/']:
print("Update "+shareStr+" Images...")
for user in os.listdir(dockletPath+"/images/"+shareStr):
print("Update User: "+user)
tmppath = dockletPath+"/images/"+shareStr+user+"/"
files = os.listdir(tmppath)
images = []
for file in files:
if file[0] == "." or file[-3] != ".":
continue
images.append(file[:-3])
for img in images:
infofile = open(tmppath+"."+img+".info","r")
imginfo = infofile.read().split('\n')
infofile.close()
desfile = open(tmppath+"."+img+".description","r")
desinfo = desfile.read()
dbimage = Image.query.filter_by(imagename=img,ownername=user).first()
if dbimage is None:
dbimage = Image(img,False,False,user,desinfo)
dbimage.create_time = datetime.strptime(imginfo[0],timeFormat)
if shareStr == 'public/':
dbimage.hasPublic = True
else:
dbimage.hasPrivate = True
try:
db.session.add(dbimage)
db.session.commit()
except Exception as err:
print(err)
print("Finished!")
| bsd-3-clause | -6,557,784,391,905,363,000 | 40.9125 | 169 | 0.593498 | false |
coldmanck/fast-rcnn | tools/demo_kaggle_all.py | 1 | 6632 | #!/usr/bin/env python
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""
Demo script showing detections in sample images.
See README.md for installation instructions before running.
"""
import matplotlib
matplotlib.use('Agg')
import _init_paths
from fast_rcnn.config import cfg
from fast_rcnn.test import im_detect
from utils.cython_nms import nms
from utils.timer import Timer
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as sio
import caffe, os, sys, cv2
import argparse
import os.path
CLASSES = ('__background__','whale')
NETS = {'vgg16': ('VGG16',
'vgg16_fast_rcnn_iter_40000.caffemodel'),
'vgg_cnn_m_1024': ('VGG_CNN_M_1024',
'vgg_cnn_m_1024_fast_rcnn_iter_40000.caffemodel'),
'caffenet': ('CaffeNet',
'caffenet_fast_rcnn_iter_40000.caffemodel')}
def vis_detections(im, class_name, dets, image_name, thresh=0.5):
"""Draw detected bounding boxes."""
inds = np.where(dets[:, -1] >= thresh)[0]
max_inds = 0
max_score = 0.0
if len(inds) == 0:
print('Warning: no target detected!')
return
elif len(inds) > 1:
print('Warning: ' + str(len(inds)) + ' targets detected! Choose the highest one')
for i in inds:
if(dets[i, -1] > max_score):
max_inds = i
max_score = dets[i, -1]
im = im[:, :, (2, 1, 0)]
fig, ax = plt.subplots(figsize=(12, 12))
ax.imshow(im, aspect='equal')
# for i in inds:
# bbox = dets[i, :4]
# score = dets[i, -1]
bbox = dets[max_inds, :4]
score = dets[max_inds, -1]
ax.add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='red', linewidth=3.5)
)
ax.text(bbox[0], bbox[1] - 2,
'{:s} {:.3f}'.format(class_name, score),
bbox=dict(facecolor='blue', alpha=0.5),
fontsize=14, color='white')
# end for
ax.set_title(('{} detections with '
'p({} | box) >= {:.1f}').format(class_name, class_name,
thresh),
fontsize=14)
plt.axis('off')
plt.tight_layout()
plt.draw()
### SAVE IMAGES ? ###
save_img_dir = os.path.join(cfg.ROOT_DIR, 'result', 'test_img')
if not os.path.exists(save_img_dir):
os.makedirs(save_img_dir)
plt.savefig(os.path.join(save_img_dir, image_name + '_' + class_name))
boxes = {'boxes': ((bbox[0], bbox[1]), bbox[2] - bbox[0], bbox[3] - bbox[1])}
save_mat_dir = os.path.join(cfg.ROOT_DIR, 'result', 'test_box')
if not os.path.exists(save_mat_dir):
os.makedirs(save_mat_dir)
sio.savemat(os.path.join(save_mat_dir, image_name + '.mat'), {'boxes': boxes})
def demo(net, image_name, classes):
"""Detect object classes in an image using pre-computed object proposals."""
# Load pre-computed Selected Search object proposals
# box_file = os.path.join(cfg.ROOT_DIR, 'data', 'demo',image_name + '_boxes.mat')
test_mats_path = '/home/coldmanck/kaggle/test_bbox'
box_file = os.path.join(test_mats_path ,image_name + '_boxes.mat')
obj_proposals = sio.loadmat(box_file)['boxes']
# Load the demo image
test_images_path = '/home/coldmanck/kaggle/ImagesTest'
# im_file = os.path.join(cfg.ROOT_DIR, 'data', 'demo', image_name + '.jpg')
im_file = os.path.join(test_images_path, image_name + '.jpg')
im = cv2.imread(im_file)
# Detect all object classes and regress object bounds
timer = Timer()
timer.tic()
scores, boxes = im_detect(net, im, obj_proposals)
timer.toc()
print ('Detection took {:.3f}s for '
'{:d} object proposals').format(timer.total_time, boxes.shape[0])
# Visualize detections for each class
CONF_THRESH = 0.7
NMS_THRESH = 0.3
for cls in classes:
cls_ind = CLASSES.index(cls)
cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
cls_scores = scores[:, cls_ind]
keep = np.where(cls_scores >= CONF_THRESH)[0]
cls_boxes = cls_boxes[keep, :]
cls_scores = cls_scores[keep]
dets = np.hstack((cls_boxes,
cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(dets, NMS_THRESH)
dets = dets[keep, :]
print 'All {} detections with p({} | box) >= {:.1f} in {}'.format(cls, cls,
CONF_THRESH, image_name)
vis_detections(im, cls, dets, image_name, thresh=CONF_THRESH)
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--cpu', dest='cpu_mode',
help='Use CPU mode (overrides --gpu)',
action='store_true')
parser.add_argument('--net', dest='demo_net', help='Network to use [vgg16]',
choices=NETS.keys(), default='vgg16')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
prototxt = os.path.join(cfg.ROOT_DIR, 'models', NETS[args.demo_net][0],
'test_kaggle.prototxt')
caffemodel = os.path.join(cfg.ROOT_DIR, 'data', 'fast_rcnn_models',
NETS[args.demo_net][1])
if not os.path.isfile(caffemodel):
raise IOError(('{:s} not found.\nDid you run ./data/scripts/'
'fetch_fast_rcnn_models.sh?').format(caffemodel))
if args.cpu_mode:
caffe.set_mode_cpu()
else:
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
net = caffe.Net(prototxt, caffemodel, caffe.TEST)
print '\n\nLoaded network {:s}'.format(caffemodel)
# print 'Demo for data/demo/w_11107.jpg'
test_images_path = '/home/coldmanck/kaggle/ImagesTest'
# length = len([name for name in os.listdir(test_images_path) if os.path.isfile(os.path.join(test_images_path, name))])
for name in os.listdir(test_images_path):
if os.path.isfile(os.path.join(test_images_path, name)):
demo(net, name.replace('.jpg',''), ('whale',))
plt.close()
# plt.show()
| mit | -3,590,412,734,822,677,000 | 35.043478 | 123 | 0.559258 | false |
taigaio/taiga-back | taiga/projects/notifications/signals.py | 1 | 5168 | # -*- coding: utf-8 -*-
# Copyright (C) 2014-present Taiga Agile LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.contrib.contenttypes.models import ContentType
from django.db import transaction
from django.utils import timezone
from taiga.events import events
from taiga.events import middleware as mw
from . import choices
from . import models
from . import serializers
def _filter_recipients(project, user, recipients):
notify_policies = models.NotifyPolicy.objects.filter(
user_id__in=recipients,
project=project,
web_notify_level=True).exclude(user_id=user.id).all()
return [notify_policy.user_id for notify_policy in notify_policies]
def _push_to_web_notifications(event_type, data, recipients,
serializer_class=None):
if not serializer_class:
serializer_class = serializers.ObjectNotificationSerializer
serializer = serializer_class(data)
for user_id in recipients:
with transaction.atomic():
models.WebNotification.objects.create(
event_type=event_type.value,
created=timezone.now(),
user_id=user_id,
data=serializer.data,
)
session_id = mw.get_current_session_id()
events.emit_event_for_user_notification(user_id,
session_id=session_id,
event_type=event_type.value,
data=serializer.data)
def on_assigned_to(sender, user, obj, **kwargs):
event_type = choices.WebNotificationType.assigned
data = {
"project": obj.project,
"user": user,
"obj": obj,
}
recipients = _filter_recipients(obj.project, user,
[obj.assigned_to.id])
_push_to_web_notifications(event_type, data, recipients)
def on_assigned_users(sender, user, obj, new_assigned_users, **kwargs):
event_type = choices.WebNotificationType.assigned
data = {
"project": obj.project,
"user": user,
"obj": obj,
}
recipients = _filter_recipients(obj.project, user,
[user_id for user_id in new_assigned_users])
_push_to_web_notifications(event_type, data, recipients)
def on_watchers_added(sender, user, obj, new_watchers, **kwargs):
event_type = choices.WebNotificationType.added_as_watcher
data = {
"project": obj.project,
"user": user,
"obj": obj,
}
recipients = _filter_recipients(obj.project, user, new_watchers)
_push_to_web_notifications(event_type, data, recipients)
def on_members_added(sender, user, project, new_members, **kwargs):
serializer_class = serializers.NotificationDataSerializer
event_type = choices.WebNotificationType.added_as_member
data = {
"project": project,
"user": user,
}
recipients = _filter_recipients(project, user,
[member.user_id for member in new_members
if member.user_id])
_push_to_web_notifications(event_type, data, recipients, serializer_class)
def on_mentions(sender, user, obj, mentions, **kwargs):
content_type = ContentType.objects.get_for_model(obj)
valid_content_types = ['issue', 'task', 'userstory']
if content_type.model in valid_content_types:
event_type = choices.WebNotificationType.mentioned
data = {
"project": obj.project,
"user": user,
"obj": obj,
}
recipients = _filter_recipients(obj.project, user,
[user.id for user in mentions])
_push_to_web_notifications(event_type, data, recipients)
def on_comment_mentions(sender, user, obj, mentions, **kwargs):
event_type = choices.WebNotificationType.mentioned_in_comment
data = {
"project": obj.project,
"user": user,
"obj": obj,
}
recipients = _filter_recipients(obj.project, user,
[user.id for user in mentions])
_push_to_web_notifications(event_type, data, recipients)
def on_comment(sender, user, obj, watchers, **kwargs):
event_type = choices.WebNotificationType.comment
data = {
"project": obj.project,
"user": user,
"obj": obj,
}
recipients = _filter_recipients(obj.project, user, watchers)
_push_to_web_notifications(event_type, data, recipients)
| agpl-3.0 | -2,407,889,315,559,505,000 | 35.394366 | 80 | 0.625967 | false |
kislerdm/alibava_analysis-tool | ilcinstall_eutel-git/ilcsoft/fastjet.py | 2 | 2846 | ##################################################
#
# FastJet module
#
# Author: Andre Sailer, CERN
# based on GSL module by J. Engels, Desy
# Date: Jul, 2010
#
##################################################
# custom imports
from baseilc import BaseILC
from marlinpkg import MarlinPKG
from util import *
class FastJetClustering(MarlinPKG):
""" Responsible for the FastJetClustering installation process. """
def __init__(self, userInput):
MarlinPKG.__init__(self, "FastJetClustering", userInput )
# required modules
self.reqmodules = [ "Marlin", "MarlinUtil", "CLHEP", "GEAR", "GSL", "LCIO", "FastJet" ]
self.download.root = "marlinreco"
class FastJet(BaseILC):
""" Responsible for the FastJet installation process. """
def __init__(self, userInput):
BaseILC.__init__(self, userInput, "FastJet", "FastJet")
# no cmake build support
self.hasCMakeBuildSupport = False
self.download.supportHEAD = False
self.download.supportedTypes = ["wget"]
self.reqfiles = [[ "lib/libfastjet.so", "lib/libfastjet.a", "lib/libfastjet.dylib" ]]
def setMode(self, mode):
BaseILC.setMode(self, mode)
self.download.url = "http://www.lpthe.jussieu.fr/~salam/fastjet/repo/fastjet-" + self.version + ".tar.gz"
def downloadSources(self):
BaseILC.downloadSources(self)
# move sources to a subdirectory
os.renames( self.version, self.name )
os.renames( self.name, self.version + "/" + self.name )
# create build directory
trymakedir( self.installPath + "/build" )
def compile(self):
""" compile FastJet """
os.chdir( self.installPath + "/build" )
if( self.rebuild ):
os.system( "make distclean" )
if( os.system( "../" + self.name + "/configure --prefix=" + self.installPath + " --enable-shared 2>&1 | tee -a " + self.logfile ) != 0 ):
self.abort( "failed to configure!!" )
if( os.system( "make ${MAKEOPTS} 2>&1 | tee -a " + self.logfile ) != 0 ):
self.abort( "failed to compile!!" )
if( os.system( "make install 2>&1 | tee -a " + self.logfile ) != 0 ):
self.abort( "failed to install!!" )
def cleanupInstall(self):
BaseILC.cleanupInstall(self)
os.chdir( self.installPath + "/build" )
os.system( "make clean" )
def postCheckDeps(self):
BaseILC.postCheckDeps(self)
self.env["FastJet_HOME"] = self.installPath
self.envpath["PATH"].append( "$FastJet_HOME/bin" )
self.envpath["LD_LIBRARY_PATH"].append( "$FastJet_HOME/lib" )
| gpl-2.0 | -3,859,160,402,606,320,000 | 32.880952 | 156 | 0.543921 | false |
cosminbasca/rdftools | rdftools/datagen/lubm_horizontal.py | 1 | 3072 | #
# author: Cosmin Basca
#
# Copyright 2010 University of Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import defaultdict
from base import LubmGenerator, UniTriplesDistribution
from rdftools.gcityhash import city64
from rdftools.log import logger
from rdftools.tools import ParserVisitorTool
import io
__author__ = 'basca'
def _part((s, p, o), perm):
val = ''
for c in perm:
if c == 's':
val += '%s' % s
elif c == 'p':
val += '%s' % p
elif c == 'o':
val += '%s' % o
return val
PERMUTATIONS = ('s', 'p', 'o', 'sp', 'so', 'po', 'spo')
class HashPartitioner(ParserVisitorTool):
def __init__(self, source_file, num_sites=0, permutation=None, **kwargs):
super(HashPartitioner, self).__init__(source_file, **kwargs)
if num_sites == 0:
raise ValueError('num_partitions cannot be 0')
self.num_sites = num_sites
if permutation not in PERMUTATIONS:
raise ValueError('permutaion must be one of {0}, instead got {1}'.format(PERMUTATIONS, permutation))
self._permutation = permutation
self.site_index = []
def on_visit(self, s, p, o, c):
site_idx = city64(_part((s, p, o), self._permutation)) % self.num_sites
self.site_index.append(site_idx)
def get_results(self, *args, **kwargs):
return self.site_index
"""
distribution process:
1) horizontal partitioning of all data (based on stars)
"""
class LubmHorizontal(LubmGenerator):
def __init__(self, output_path, sites, permutation='s', **kwargs):
super(LubmHorizontal, self).__init__(output_path, sites, **kwargs)
self._permutation = permutation
@property
def _distributor_type(self):
return UniHorizontal
def _distributor_kwargs(self, uni_id, uni_rdf):
return dict(permutation=self._permutation)
class UniHorizontal(UniTriplesDistribution):
def _distribute_triples(self, triples, permutation='s'):
logger.info('[distributing] university %s by %s', self.uni_name, permutation)
site_index = HashPartitioner(self.uni_rdf, num_sites=self.num_sites, permutation=permutation)()
site_triples = defaultdict(list)
sites = [0 for i in xrange(self.num_sites)]
for i, triple in enumerate(triples):
sites[site_index[i]] += 1
site_triples[site_index[i]].append(triple)
logger.info('university %s total triples = %s, distribution = %s', self.uni_rdf, len(triples), sites)
return site_triples | apache-2.0 | 1,727,386,674,842,596,900 | 32.769231 | 112 | 0.654297 | false |
duguyue100/telauges | scripts/cifar10_feature.py | 1 | 5921 | import cPickle as pickle;
import numpy as np;
import theano;
import theano.tensor as T;
import matplotlib.pyplot as plt;
import telauges.utils as utils;
from telauges.hidden_layer import AutoEncoder;
n_epochs=100;
training_portion=1;
batch_size=100;
rng=np.random.RandomState(23455);
Xtr, Ytr, Xte, Yte=utils.load_CIFAR10("/home/arlmaster/workspace/telauges/data/CIFAR10");
Xtr=np.mean(Xtr, 3);
Xte=np.mean(Xte, 3);
Xtrain=Xtr.reshape(Xtr.shape[0], Xtr.shape[1]*Xtr.shape[2])/255.0;
Xtest=Xte.reshape(Xte.shape[0], Xte.shape[1]*Xte.shape[2])/255.0;
#Xtrain=np.hstack((Ytr[None].T, Xtrain))[0:10000];
#Xtrain=Xtrain[Xtrain[:,0].argsort()];
print Xtrain.shape;
print Xtest.shape;
#data_train=(Xtrain, Ytr);
#data_test=(Xtest, Yte);
#train_set_x=utils.shared_dataset(data_train);
#test_set_x=utils.shared_dataset(data_test);
train_set_x=theano.shared(np.asarray(Xtrain,
dtype='float32'),
borrow=True);
train_set_y=theano.shared(np.asarray(Ytr,
dtype='float32'),
borrow=True);
train_set_y=T.cast(train_set_y, dtype="int32");
test_set_x=theano.shared(np.asarray(Xtest,
dtype='float32'),
borrow=True);
test_set_y=theano.shared(np.asarray(Yte,
dtype='float32'),
borrow=True);
test_set_y=T.cast(test_set_y, dtype="int32");
n_train_batches=int(train_set_x.get_value(borrow=True).shape[0]*training_portion);
n_test_batches=test_set_x.get_value(borrow=True).shape[0];
print n_train_batches;
print n_test_batches;
n_train_batches /= batch_size; # number of train data batches
n_test_batches /= batch_size; # number of test data batches
print "[MESSAGE] The data is loaded"
print "[MESSAGE] Building model"
X=T.matrix("X");
y=T.ivector("y");
index=T.lscalar();
ae=AutoEncoder(rng=rng,
data_in=X,
n_vis=1024,
n_hidden=500,
encode_activate_mode="sigmoid",
decode_activate_mode="sigmoid");
cost, updates=ae.get_updates(learning_rate=0.1,
corruption_level=0.3);
train_model = theano.function(inputs=[index],
outputs=cost,
updates=updates,
givens={X: train_set_x[index * batch_size: (index + 1) * batch_size]});
print "[MESSAGE] The model is built";
print "[MESSAGE] Start training"
filters=ae.encode_layer.W.get_value(borrow=True);
for i in xrange(100):
plt.subplot(10, 10, i);
plt.imshow(np.reshape(filters[:,i], (32, 32)), cmap = plt.get_cmap('gray'), interpolation='nearest');
plt.axis('off')
plt.show();
epoch = 0;
while (epoch < n_epochs):
epoch = epoch + 1;
c = []
for batch_index in xrange(n_train_batches):
c.append(train_model(batch_index))
print 'Training epoch %d, cost ' % epoch, np.mean(c);
filters=ae.encode_layer.W.get_value(borrow=True);
for i in xrange(100):
plt.subplot(10, 10, i);
plt.imshow(np.reshape(filters[:,i], (32, 32)), cmap = plt.get_cmap('gray'), interpolation='nearest');
plt.axis('off')
plt.show();
## extract feature
train_output_feature=theano.function(inputs=[index],
outputs=ae.get_feature(X),
givens={X: train_set_x[index * batch_size: (index + 1) * batch_size]});
train_feature=np.asarray([]);
for batch_index in xrange(n_train_batches):
temp=train_output_feature(batch_index);
if not train_feature.size:
train_feature=temp;
else:
train_feature=np.vstack((train_feature, temp));
train_feature=np.hstack((train_set_y.eval()[None].T, train_feature));
print train_feature.shape;
#train_feature.view("float32, float32, float32").sort(order=["f1"], axis=0);
#valid_output_feature=theano.function(inputs=[index],
# outputs=ae.get_feature(X),
# givens={X: test_set_x[index * batch_size: (index + 1) * batch_size]});
#valid_feature=np.asarray([]);
#for batch_index in xrange(n_valid_batches):
# temp=valid_output_feature(batch_index);
#
# if not valid_feature.size:
# valid_feature=temp;
# else:
# valid_feature=np.vstack((valid_feature, temp));
#
#valid_feature=np.hstack((valid_set_y.eval()[None].T, valid_feature));
#train_feature=np.vstack((train_feature, valid_feature));
train_feature_random=train_feature;
train_feature.view("float32, float32, float32").sort(order=["f1"], axis=0);
print train_feature.shape;
print "[MESSAGE] Writing training set to file"
pickle.dump(train_feature, open("cifar10_train_feature_500_ordered.pkl", "w"));
pickle.dump(train_feature_random, open("cifar10_train_feature_500_random.pkl", "w"));
print "[MESSAGE] Training set is prepared"
test_output_feature=theano.function(inputs=[index],
outputs=ae.get_feature(X),
givens={X: test_set_x[index * batch_size: (index + 1) * batch_size]});
test_feature=np.asarray([]);
for batch_index in xrange(n_test_batches):
temp=test_output_feature(batch_index);
if not test_feature.size:
test_feature=temp;
else:
test_feature=np.vstack((test_feature, temp));
test_feature=np.hstack((test_set_y.eval()[None].T, test_feature));
test_feature_random=test_feature;
test_feature.view("float32, float32, float32").sort(order=["f1"], axis=0);
print test_feature.shape;
print "[MESSAGE] Writing testing set to file"
pickle.dump(test_feature, open("cifar10_test_feature_500_ordered.pkl", "w"));
pickle.dump(test_feature_random, open("cifar10_test_feature_500_random.pkl", "w"));
print "[MESSAGE] Testing set is prepared" | gpl-3.0 | 6,903,552,789,405,983 | 31.360656 | 108 | 0.612734 | false |
jergosh/slr_pipeline | bin/process_slr_sub.py | 1 | 3289 | from glob import glob
import os
from os import path
import itertools
import re
from Bio import AlignIO
import pandas
import sys
import copy
import argparse
from slr import *
species_RE = re.compile("([A-Z]+)")
yeast_RE = re.compile("Y[A-P][LR][0-9]{3}[WC]")
def grouper(iterable, n, fillvalue=None):
args = [iter(iterable)] * n
return itertools.izip_longest(*args, fillvalue=fillvalue)
argparser = argparse.ArgumentParser()
argparser.add_argument('--clade', metavar='clade', type=str, required=True)
argparser.add_argument('--slrroot', metavar='slr_root', type=str, required=True)
argparser.add_argument('--alnroot', metavar='aln_root', type=str, required=True)
argparser.add_argument('--outfile', metavar='out_file', type=str, required=True)
args = argparser.parse_args()
clade = args.clade
alndir = args.alnroot
slrroot = args.slrroot
slr_all = args.outfile
all_ids = []
all_data = [] # pandas.DataFrame(columns=colnames)
for aln_fn in glob(path.join(alndir, clade, "*", "*_prank.best.fas")):
basename = path.basename(aln_fn).rpartition('_')[0]
prefix = basename.partition('_')[0][:2]
# TODO Make sure colspecs work in all cases
# What if there are multiple human IDs in a single (split) tree?
# Are we allowed to potentially double count things like that?
aln = AlignIO.read(aln_fn, 'fasta')
# TODO refactor this into a function
# One way to get around this would be to decide separately which sequences are 'of interest'
for seqr in aln:
if args.clade == "yeast":
if yeast_RE.match(seqr.id) is None:
continue
else:
species = species_RE.match(seqr.id).groups()[0]
if species[:-1] != "ENS":
continue
all_ids.append(seqr.id)
for subset in [ "1", "2" ]:
slr_fn = path.join(slrroot, clade, prefix, basename+'_'+subset+'_matched.res')
if not path.exists(slr_fn):
print slr_fn, "doesn't exist!"
continue
slr = pandas.read_fwf(open(slr_fn), colspecs=colspecs, comment="\n")
idx = [ i for (i, codon) in enumerate(grouper(seqr.seq, 3)) if ''.join(codon) != '---' ]
slr_subset = copy.deepcopy(slr.ix[idx, :])
slr_subset.ix[:, 0] = idx
slr_subset.ix[:, 0] += 1
slr_out = file(path.join(slrroot, clade, prefix, seqr.id + '_' + basename + '_matched.res'), 'w')
slr_subset.to_csv(slr_out, quoting=False, index=False, sep='\t')
# slr_subset.insert(0, 'dataset', pandas.Series([basename]*slr_subset.shape[0]))
# slr_subset.insert(0, 'stable_id', pandas.Series([seqr.id]*slr_subset.shape[0]))
slr_subset['dataset'] = pandas.Series([basename+'_'+subset]*slr_subset.shape[0], index=slr_subset.index)
slr_subset['stable_id'] = pandas.Series([seqr.id]*slr_subset.shape[0], index=slr_subset.index)
slr_subset['human_idx'] = pandas.Series(range(1, slr_subset.shape[0]+1), index=slr_subset.index)
all_data.append(slr_subset)
all_data = pandas.concat(all_data)
all_data.rename(columns={"# Site": "Site"}, inplace=True)
all_data.to_csv(slr_all, quoting=False, index=False, sep='\t')
print min(all_data["Pval"]), max(all_data["Pval"])
| gpl-2.0 | -5,675,201,279,259,004,000 | 36.375 | 116 | 0.626634 | false |
guyingbo/shadowproxy | setup.py | 1 | 1667 | import os.path
import re
from setuptools import find_namespace_packages, setup
VERSION_RE = re.compile(r"""__version__ = ['"]([0-9.]+)['"]""")
BASE_PATH = os.path.dirname(__file__)
with open(os.path.join(BASE_PATH, "shadowproxy", "__init__.py")) as f:
try:
version = VERSION_RE.search(f.read()).group(1)
except IndexError:
raise RuntimeError("Unable to determine version.")
with open(os.path.join(BASE_PATH, "README.md")) as readme:
long_description = readme.read()
setup(
name="shadowproxy",
description="A proxy server that implements "
"Socks5/Shadowsocks/Redirect/HTTP (tcp) "
"and Shadowsocks/TProxy/Tunnel (udp) protocols.",
long_description=long_description,
long_description_content_type="text/markdown",
license="MIT",
version=version,
author="Yingbo Gu",
author_email="[email protected]",
maintainer="Yingbo Gu",
maintainer_email="[email protected]",
url="https://github.com/guyingbo/shadowproxy",
packages=find_namespace_packages(include=["shadowproxy*"]),
install_requires=[
"pycryptodome>=3.4.3",
"curio==0.9",
"pylru>=1.0.9",
# "microstats>=0.1.0",
"iofree>=0.2.4",
"httptools",
"hkdf",
],
entry_points={"console_scripts": ["shadowproxy = shadowproxy.__main__:main"]},
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
setup_requires=["pytest-runner"],
tests_require=["pytest", "coverage", "pytest-cov"],
)
| mit | -5,718,060,646,353,731,000 | 29.87037 | 82 | 0.621476 | false |
glidernet/python-ogn-client | ogn/parser/utils.py | 1 | 3369 | from datetime import datetime, timedelta, timezone
import math
FEETS_TO_METER = 0.3048 # ratio feets to meter
FPM_TO_MS = FEETS_TO_METER / 60 # ratio fpm to m/s
KNOTS_TO_MS = 0.5144 # ratio knots to m/s
KPH_TO_MS = 0.27778 # ratio kph to m/s
HPM_TO_DEGS = 180 / 60 # ratio between half turn per minute and degrees/s
INCH_TO_MM = 25.4 # ratio inch to mm
def fahrenheit_to_celsius(fahrenheit):
return (fahrenheit - 32.0) * 5.0 / 9.0
def parseAngle(dddmmhht):
return float(dddmmhht[:3]) + float(dddmmhht[3:]) / 60
def createTimestamp(time_string, reference_timestamp):
if time_string[-1] == "z":
dd = int(time_string[0:2])
hh = int(time_string[2:4])
mm = int(time_string[4:6])
result = datetime(reference_timestamp.year,
reference_timestamp.month,
dd,
hh, mm, 0,
tzinfo=timezone.utc if reference_timestamp.tzinfo is not None else None)
# correct wrong month
if result > reference_timestamp + timedelta(days=14):
result = (result.replace(day=1) - timedelta(days=14)).replace(day=result.day)
elif result < reference_timestamp - timedelta(days=14):
result = (result.replace(day=28) + timedelta(days=14)).replace(day=result.day)
else:
hh = int(time_string[0:2])
mm = int(time_string[2:4])
ss = int(time_string[4:6])
result = datetime(reference_timestamp.year,
reference_timestamp.month,
reference_timestamp.day,
hh, mm, ss,
tzinfo=timezone.utc if reference_timestamp.tzinfo is not None else None)
if result > reference_timestamp + timedelta(hours=12):
# shift timestamp to previous day
result -= timedelta(days=1)
elif result < reference_timestamp - timedelta(hours=12):
# shift timestamp to next day
result += timedelta(days=1)
return result
MATH_PI = 3.14159265359
class CheapRuler():
"""Extreme fast distance calculating for distances below 500km."""
def __init__(self, lat):
c = math.cos(lat * MATH_PI / 180)
c2 = 2 * c * c - 1
c3 = 2 * c * c2 - c
c4 = 2 * c * c3 - c2
c5 = 2 * c * c4 - c3
self.kx = 1000 * (111.41513 * c - 0.09455 * c3 + 0.00012 * c5) # longitude correction
self.ky = 1000 * (111.13209 - 0.56605 * c2 + 0.0012 * c4) # latitude correction
def distance(self, a, b):
"""Distance between point a and b. A point is a tuple(lon,lat)."""
dx = (a[0] - b[0]) * self.kx
dy = (a[1] - b[1]) * self.ky
return math.sqrt(dx * dx + dy * dy)
def bearing(self, a, b):
"""Returns the bearing from point a to point b."""
dx = (b[0] - a[0]) * self.kx
dy = (b[1] - a[1]) * self.ky
if dx == 0 and dy == 0:
return 0
result = math.atan2(-dy, dx) * 180 / MATH_PI + 90
return result if result >= 0 else result + 360
def normalized_quality(distance, signal_quality):
"""Signal quality normalized to 10km."""
return signal_quality + 20.0 * math.log10(distance / 10000.0) if distance > 0 else None
| agpl-3.0 | 4,766,096,451,540,963,000 | 34.463158 | 98 | 0.551202 | false |
modelblocks/modelblocks-release | resource-dundee/scripts/process_dundee.py | 1 | 12539 | import sys
import os
from numpy import nan
import pandas as pd
import argparse
#sys.stdin.reconfigure(encoding='latin-1',errors='replace') #'utf-8',errors='replace') #'ignore')
if __name__ == '__main__':
argparser = argparse.ArgumentParser('''
Extract eye-tracking time series from Dundee eye-tracking corpus source.
''')
argparser.add_argument('dundee_dir', help='Path to directory containing Dundee files.')
argparser.add_argument('lineitems_path', help='Path to file with space-tokenized stimulus sentences in order, one per line.')
argparser.add_argument('-v', '--verbose', action='store_true', help='Report verbose log')
argparser.add_argument('-w', '--warn', action='store_true', help='Report warnings to stderr')
args = argparser.parse_args()
textdata = []
if args.verbose:
sys.stderr.write('Processing stimulus data...\n')
sys.stderr.flush()
k = 0
with open(args.lineitems_path, 'r') as f:
for i, line in enumerate(f):
for j, w in enumerate(line.strip().split()):
textdata.append({
'word': w,
'sentid': i,
'sentpos': j + 1,
'startofsentence': int(j == 0)
})
k += 1
k = 0
start_ix = []
for p in sorted([x for x in os.listdir(args.dundee_dir) if x.endswith('wrdp.dat')]):
start_ix.append(k)
with open(args.dundee_dir + '/' + p, 'r', encoding='latin-1') as f:
for i, line in enumerate(f):
line = line.replace('(', '-LRB-').replace(')', '-RRB-')
fields = line.strip().split()
w = fields[0]
doc_id = int(fields[1]) - 1
screen_id = int(fields[2]) - 1
line_id = int(fields[3]) - 1
word_pos_in_line = int(fields[4]) - 1
word_pos_in_screen = int(fields[5]) - 1
word_pos_in_text = int(fields[12]) - 1
if word_pos_in_text == 0:
start_of_file = True
start_of_screen = True
start_of_line = True
elif word_pos_in_screen == 0:
start_of_file = False
start_of_screen = True
start_of_line = True
elif word_pos_in_line == 0:
start_of_file = False
start_of_screen = False
start_of_line = True
else:
start_of_file = False
start_of_screen = False
start_of_line = False
if args.warn and textdata[k]['word'] != w:
sys.stderr.write('WARNING: Saw mismatched words "%s" and "%s" at position %d.\n' % (textdata[k]['word'], w, k))
sys.stderr.flush()
textdata[k]['startoffile'] = int(start_of_file)
textdata[k]['startofscreen'] = int(start_of_screen)
textdata[k]['startofline'] = int(start_of_line)
k += 1
for kp1 in range(1, len(textdata) + 1):
if kp1 == len(textdata):
end_of_file = 1
end_of_screen = 1
end_of_line = 1
end_of_sentence = 1
else:
end_of_file = textdata[kp1]['startoffile']
end_of_screen = textdata[kp1]['startofscreen']
end_of_line = textdata[kp1]['startofline']
end_of_sentence = textdata[kp1]['startofsentence']
textdata[kp1-1]['endoffile'] = end_of_file
textdata[kp1-1]['endofscreen'] = end_of_screen
textdata[kp1-1]['endofline'] = end_of_line
textdata[kp1-1]['endofsentence'] = end_of_sentence
if args.verbose:
sys.stderr.write('Processing fixation data...\n')
sys.stderr.flush()
out = []
# Loop through fixations in order
for i, p in enumerate(sorted([x for x in os.listdir(args.dundee_dir) if x.endswith('ma1p.dat')])):
out_file = []
with open(args.dundee_dir + '/' + p, 'r', encoding='latin-1') as f:
subject = p[:2]
doc_id = int(p[2:4]) - 1
word_id_prev = -1
max_word_id = -1
time = 0
fdurSP = 0
fdurSPsummed = 0
fdurFP = 0
fdurGP = 0
fdurTT = 0
fp_cur = None
fp_blink_cur = None
gp_cur = None
gp_blink_cur = None
sp_cur = None
tt_cur = None
prev_was_blink = False
prev_was_offscreen = False
blinkFP = False
blinkGP = False
s = start_ix[doc_id]
npass = {}
wordid2firstfix = {}
nfix = 0
for j, line in enumerate(f):
line = line.replace('(', '-LRB-').replace(')', '-RRB-').replace('"', "'")
if j > 0:
fields = line.strip().split()
word_cur = fields[0]
word_id_cur = int(fields[6]) - 1
fdur_cur = float(fields[7])
isfix = False
isblink = False
isoffscreen = False
if word_cur.startswith('*'):
if word_cur == '*Blink':
isblink = True
elif word_cur == '*Off-screen':
isoffscreen = True
else:
raise ValueError('Unrecognized star (*) token: %s' % word_cur)
else:
if word_id_cur >= 0:
isfix = True
if isfix:
k = s + word_id_cur
if k in npass:
npass[k] += 1
else:
npass[k] = 1
if word_id_cur not in wordid2firstfix:
wordid2firstfix[word_id_cur] = nfix
if args.warn and textdata[k]['word'] != word_cur:
sys.stderr.write('WARNING: Saw mismatched words "%s" and "%s" at global position %d, file %s, line %d.\n' % (
textdata[k]['word'], word_cur, k, p, j))
sys.stderr.flush()
out_cur = {
'subject': subject,
'docid': doc_id,
'fdurSP': fdur_cur,
'blinkbeforefix': int(prev_was_blink),
'blinkafterfix': 0,
'offscreenbeforefix': int(prev_was_offscreen),
'offscreenafterfix': 0,
'wdelta': word_id_cur - word_id_prev,
'npass': npass[k],
'inregression': int(word_id_cur < max_word_id),
'time': time
}
out_file.append(out_cur)
tt_cur = out_file[wordid2firstfix[word_id_cur]]
if word_id_cur != word_id_prev:
sp_cur = out_cur
sp_blink_cur = out_cur
if word_id_cur > max_word_id:
fp_cur = out_cur
gp_cur = out_cur
fp_blink_cur = out_cur
gp_blink_cur = out_cur
elif word_id_cur < max_word_id:
fp_cur = None
fp_blink_cur = None
out_cur.update(textdata[k])
word_id_prev = word_id_cur
prev_was_blink = False
prev_was_offscreen = False
max_word_id = max(max_word_id, word_id_cur)
nfix += 1
else:
prev_was_blink = prev_was_blink or isblink
prev_was_offscreen = prev_was_offscreen or isoffscreen
if word_id_cur > 0 and isblink:
out_file[-1]['blinkafterfix'] = 1
if word_id_cur > 0 and isoffscreen:
out_file[-1]['offscreenafterfix'] = 1
sp_cur = None
sp_blink_cur = None
fp_cur = None
fp_blink_cur = None
gp_cur = None
gp_blink_cur = None
if sp_cur is not None:
if 'fdurSPsummed' in sp_cur:
sp_cur['fdurSPsummed'] += fdur_cur
else:
sp_cur['fdurSPsummed'] = fdur_cur
if sp_blink_cur is not None:
if 'blinkdurSPsummed' not in sp_blink_cur:
sp_blink_cur['blinkdurSPsummed'] = 0
sp_blink_cur['blinkduringSPsummed'] = 0
if isblink:
sp_blink_cur['blinkdurSPsummed'] += fdur_cur
sp_blink_cur['blinkduringSPsummed'] = 1
if fp_cur is not None:
if 'fdurFP' in fp_cur:
fp_cur['fdurFP'] += fdur_cur
else:
fp_cur['fdurFP'] = fdur_cur
if fp_blink_cur is not None:
if 'blinkdurFP' not in fp_blink_cur:
fp_blink_cur['blinkdurFP'] = 0
fp_blink_cur['blinkduringFP'] = 0
if isblink:
fp_blink_cur['blinkdurFP'] += fdur_cur
fp_blink_cur['blinkduringFP'] = 1
if gp_cur is not None:
if 'fdurGP' in gp_cur:
gp_cur['fdurGP'] += fdur_cur
else:
gp_cur['fdurGP'] = fdur_cur
if gp_blink_cur is not None:
if 'blinkdurGP' not in gp_blink_cur:
gp_blink_cur['blinkdurGP'] = 0
gp_blink_cur['blinkduringGP'] = 0
if isblink:
gp_blink_cur['blinkdurGP'] += fdur_cur
gp_blink_cur['blinkduringGP'] = 1
if tt_cur is not None:
if 'fdurTT' in tt_cur:
tt_cur['fdurTT'] += fdur_cur
else:
tt_cur['fdurTT'] = fdur_cur
time += fdur_cur / 1000
out += out_file
if args.verbose:
sys.stderr.write('Computing tabular output...\n')
sys.stderr.flush()
out = pd.DataFrame(out)
out.docid += 1
out['prevwasfix'] = (out['wdelta'] == 1).astype('int')
out['nextwasfix'] = (out['wdelta'] == -1).astype('int')
if args.verbose:
sys.stderr.write('Writing output...\n')
sys.stderr.flush()
toprint = [
'word',
'subject',
'docid',
'sentpos',
'sentid',
'time',
'wdelta',
'prevwasfix',
'nextwasfix',
'startoffile',
'endoffile',
'startofscreen',
'endofscreen',
'startofline',
'endofline',
'startofsentence',
'endofsentence',
'blinkbeforefix',
'blinkafterfix',
'offscreenbeforefix',
'offscreenafterfix',
'inregression',
'fdurSP',
'fdurSPsummed',
'blinkdurSPsummed',
'blinkduringSPsummed',
'fdurFP',
'blinkdurFP',
'blinkduringFP',
'fdurGP',
'blinkdurGP',
'blinkduringGP',
'fdurTT'
]
out[toprint].to_csv(sys.stdout, sep=' ', index=False, na_rep='NaN')
| gpl-3.0 | -8,582,374,189,916,280,000 | 36.820433 | 137 | 0.412473 | false |
maxplanck-ie/HiCExplorer | hicexplorer/test/long_run/test_hicConvertFormat_trivial_runs_cool.py | 1 | 2368 | import warnings
warnings.simplefilter(action="ignore", category=RuntimeWarning)
warnings.simplefilter(action="ignore", category=PendingDeprecationWarning)
import os.path
from tempfile import NamedTemporaryFile
from hicexplorer import hicConvertFormat
import pytest
REMOVE_OUTPUT = True
# DIFF = 60
DELTA_DECIMAL = 0
ROOT = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "test_data/hicConvertFormat")
original_matrix_h5 = ROOT + "/small_test_matrix.h5"
original_matrix_cool = ROOT + "/small_test_matrix.cool"
original_matrix_h5_li = ROOT + "/small_test_matrix.h5"
@pytest.mark.parametrize("matrices", [original_matrix_cool]) # required
@pytest.mark.parametrize("outputFormat", ['cool', 'h5', 'homer', 'ginteractions', 'mcool'])
@pytest.mark.parametrize("correction_name", ['weight']) # need to check hicInfo for more names
@pytest.mark.parametrize("correction_division", ['', '--correction_division'])
@pytest.mark.parametrize("store_applied_correction", ['', '--store_applied_correction'])
@pytest.mark.parametrize("chromosome", ['chrX'])
@pytest.mark.parametrize("enforce_integer", ['', '--enforce_integer'])
@pytest.mark.parametrize("load_raw_values", ['', '--load_raw_values'])
def test_cool_specific_trivial_run(
matrices,
outputFormat,
correction_name,
correction_division,
store_applied_correction,
chromosome,
enforce_integer,
load_raw_values,
):
"""
Cool input format supports some specific options like correction_name, correction_division...
Therefore, cool input format is explicitly tested in a single test function.
"""
from pathlib import Path
# get suffix of input matrix without the dot
inputFormat = Path(matrices).suffix[1:]
# create file corresponding to output format
outFileName = NamedTemporaryFile(suffix="test_ConvertFormat_trivial_run_cool.{}".format(outputFormat), delete=False)
outFileName.close()
args = "--matrices {} --outFileName {} --outputFormat {} --inputFormat {} --correction_name {} {} {} --chromosome {} {} {}".format(
matrices,
outFileName.name,
outputFormat,
inputFormat,
correction_name,
correction_division,
store_applied_correction,
chromosome,
enforce_integer,
load_raw_values,
).split()
hicConvertFormat.main(args)
| gpl-2.0 | 8,673,742,320,877,814,000 | 37.193548 | 135 | 0.701436 | false |
jeremiedecock/snippets | python/hashlib_md5_sha/md5sum_file.py | 1 | 2678 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import argparse
import hashlib
import os
CHUNK_SIZE = 2**12
def main():
"""Main function"""
# PARSE OPTIONS ###########################################################
parser = argparse.ArgumentParser(description='Print or check MD5 checksums.')
parser.add_argument("filepaths", nargs='+', metavar="FILE", help="file to read")
args = parser.parse_args()
# COMPUTE HASHS ###########################################################
for file_path in args.filepaths:
if os.path.isfile(file_path):
with open(file_path, 'rb') as fd:
try:
hash_generator = hashlib.md5()
#hash_generator = hashlib.sha1()
#hash_generator = hashlib.sha256()
#hash_generator = hashlib.sha512()
data = fd.read(CHUNK_SIZE)
while len(data) > 0:
hash_generator.update(data)
data = fd.read(CHUNK_SIZE)
except:
print("{}: unknown error".format(file_path)) # TODO
finally:
fd.close()
hash_str = hash_generator.hexdigest()
print("{} {}".format(hash_str, file_path))
else:
if os.path.isdir(file_path):
print('"{}" is a directory'.format(file_path))
else:
print("unable to read {}".format(file_path))
if __name__ == '__main__':
main()
| mit | 7,653,002,989,695,408,000 | 37.782609 | 84 | 0.591181 | false |
kynikos/outspline | src/outspline/extensions/organism_basicrules/occur_monthly_number_direct.py | 1 | 13914 | # Outspline - A highly modular and extensible outliner.
# Copyright (C) 2011 Dario Giovannetti <[email protected]>
#
# This file is part of Outspline.
#
# Outspline is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Outspline is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Outspline. If not, see <http://www.gnu.org/licenses/>.
import time as _time
import datetime as _datetime
from exceptions import BadRuleError
_RULE_NAMES = {'local': 'occur_monthly_number_direct_local',
'UTC': 'occur_monthly_number_direct_UTC'}
def make_rule(months, day, hour, minute, rend, ralarm, standard, guiconfig):
"""
@param months: The months for which create occurrences: must be a list of
integers representing the selected months (1 - 12).
@param day: The month day number when to start an occurrence (1 - 31).
@param hour: The hour when to start an occurrence (0 - 23).
@param minute: The minute when to start an occurrence (0 - 59).
@param rend: The positive difference in seconds between the relative start
time and the relative end time.
@param ralarm: The difference in seconds between the relative start time
and the relative alarm time; it is negative if the alarm is
set later than the start time.
@param standard: The time standard to be used, either 'local' or 'UTC'.
@param guiconfig: A place to store any configuration needed only by the
interface.
"""
# Do not use a rstart calculated from the start of the month (which would
# replace day, hour and minute) because the months with a DST time change
# have a variable length
# Make sure this rule can only produce occurrences compliant with the
# requirements defined in organism_api.update_item_rules
# There's no need to check standard because it's imposed by the API
if isinstance(months, list) and len(months) > 0 and \
isinstance(day, int) and 0 < day < 32 and \
isinstance(hour, int) and -1 < hour < 24 and \
isinstance(minute, int) and -1 < minute < 60 and \
(rend is None or (isinstance(rend, int) and rend > 0)) and \
(ralarm is None or isinstance(ralarm, int)):
for m in months:
if not isinstance(m, int) or m < 1 or m > 12:
raise BadRuleError()
nmonths = []
for n in xrange(1, 13):
if n in months:
nmonths.extend([n, ] * (n - len(nmonths)))
# Note that it's ok that nmonths can be shorter than 12 items; do *not*
# do the following:
#else:
# nmonths.extend([nmonths[0], ] * (12 - len(nmonths)))
# Calculate the shortest time difference with the end of a month
# (consider 27 days to make up for possible DST or time zone changes:
# for this reason diff may be negative; the number 28 is due to the
# fact that 1 must be re-added because day starts from 1, not 0)
diff = (28 - day) * 86400 - hour * 3600 - minute * 60
diffs = max(diff, 0)
# Also take a possible negative (late) alarm time into account, in fact
# the occurrence wouldn't be found if the search range included the
# alarm time but not the actual occurrence time span; remember that
# it's normal that the occurrence is not added to the results if the
# search range is between (and doesn't include) the alarm time and the
# actual occurrence time span
if ralarm:
srend = max(rend, ralarm * -1, 0)
else:
srend = max(rend, 0)
# Don't just store the number of months to go back, because it would
# make the algorithm always go back also when it's not necessary
maxoverlap = max(srend - diffs, 0)
return {
'rule': _RULE_NAMES[standard],
'#': (
maxoverlap,
nmonths,
day,
hour,
minute,
rend,
ralarm,
guiconfig,
)
}
else:
raise BadRuleError()
def get_occurrences_range_local(mint, utcmint, maxt, utcoffset, filename, id_,
rule, occs):
# Go back by span in order to keep into account any occurrence that still
# has to end
mintime = mint - rule['#'][0]
months = rule['#'][1]
startd = rule['#'][2]
startH = rule['#'][3]
startM = rule['#'][4]
rend = rule['#'][5]
ralarm = rule['#'][6]
date = _datetime.datetime.fromtimestamp(mintime)
try:
month = months[date.month - 1]
except IndexError:
month = months[0]
year = date.year + 1
else:
year = date.year
while True:
try:
sdate = _datetime.datetime(year, month, startd, startH, startM)
except ValueError:
# Prevent infinite loops
maxdate = _datetime.date.fromtimestamp(maxt)
testdate = _datetime.date(year, month, 1)
if maxdate < testdate:
break
else:
start = int(_time.mktime(sdate.timetuple()))
try:
end = start + rend
except TypeError:
end = None
try:
alarm = start - ralarm
except TypeError:
alarm = None
if start > maxt and (alarm is None or alarm > maxt):
break
# The rule is checked in make_rule, no need to use occs.add
occs.add_safe({'filename': filename,
'id_': id_,
'start': start,
'end': end,
'alarm': alarm})
try:
month = months[month]
except IndexError:
month = months[0]
year += 1
def get_occurrences_range_UTC(mint, utcmint, maxt, utcoffset, filename, id_,
rule, occs):
# Go back by span in order to keep into account any occurrence that still
# has to end
mintime = mint - rule['#'][0]
months = rule['#'][1]
startd = rule['#'][2]
startH = rule['#'][3]
startM = rule['#'][4]
rend = rule['#'][5]
ralarm = rule['#'][6]
# Using utcfromtimestamp gives correct behaviour in Eastern (positive) time
# zones (e.g. Australia/Sydney)
date = _datetime.datetime.utcfromtimestamp(mintime)
try:
month = months[date.month - 1]
except IndexError:
month = months[0]
year = date.year + 1
else:
year = date.year
while True:
try:
sdate = _datetime.datetime(year, month, startd, startH, startM)
except ValueError:
# Prevent infinite loops
maxdate = _datetime.date.fromtimestamp(maxt)
testdate = _datetime.date(year, month, 1)
if maxdate < testdate:
break
else:
start = int(_time.mktime(sdate.timetuple()))
# Every timestamp can have a different UTC offset, depending
# whether it's in a DST period or not
offset = utcoffset.compute(start)
sstart = start - offset
try:
send = sstart + rend
except TypeError:
send = None
try:
salarm = sstart - ralarm
except TypeError:
salarm = None
# Do compare sstart and salarm with maxt, *not* start and alarm
if sstart > maxt and (salarm is None or salarm > maxt):
break
# The rule is checked in make_rule, no need to use occs.add
occs.add_safe({'filename': filename,
'id_': id_,
'start': sstart,
'end': send,
'alarm': salarm})
try:
month = months[month]
except IndexError:
month = months[0]
year += 1
def get_next_item_occurrences_local(base_time, utcbase, utcoffset, filename,
id_, rule, occs):
# Go back by span in order to keep into account any occurrence that still
# has to end
mintime = base_time - rule['#'][0]
months = rule['#'][1]
startd = rule['#'][2]
startH = rule['#'][3]
startM = rule['#'][4]
rend = rule['#'][5]
ralarm = rule['#'][6]
date = _datetime.datetime.fromtimestamp(mintime)
try:
month = months[date.month - 1]
except IndexError:
month = months[0]
year = date.year + 1
else:
year = date.year
while True:
try:
sdate = _datetime.datetime(year, month, startd, startH, startM)
except ValueError:
# Prevent infinite loops
testdate = _datetime.date(year, month, 1)
next_ = occs.get_next_occurrence_time()
if next_:
maxdate = _datetime.date.fromtimestamp(next_)
else:
# Note the 4-week limit, otherwise if this was the only
# existing rule, but it couldn't generate valid occurrences
# (e.g. 31 February only), it would trigger an infinite loop
maxdate = _datetime.date.fromtimestamp(base_time) + \
_datetime.timedelta(weeks=4)
if maxdate < testdate:
break
else:
start = int(_time.mktime(sdate.timetuple()))
try:
end = start + rend
except TypeError:
end = None
try:
alarm = start - ralarm
except TypeError:
alarm = None
occd = {'filename': filename,
'id_': id_,
'start': start,
'end': end,
'alarm': alarm}
next_occ = occs.get_next_occurrence_time()
# The rule is checked in make_rule, no need to use occs.add
if occs.add_safe(base_time, occd) or (next_occ and
start > next_occ and
(alarm is None or alarm > next_occ)):
break
try:
month = months[month]
except IndexError:
month = months[0]
year += 1
def get_next_item_occurrences_UTC(base_time, utcbase, utcoffset, filename,
id_, rule, occs):
# Go back by span in order to keep into account any occurrence that still
# has to end
mintime = base_time - rule['#'][0]
months = rule['#'][1]
startd = rule['#'][2]
startH = rule['#'][3]
startM = rule['#'][4]
rend = rule['#'][5]
ralarm = rule['#'][6]
# Using utcfromtimestamp gives correct behaviour in Eastern (positive) time
# zones (e.g. Australia/Sydney)
date = _datetime.datetime.utcfromtimestamp(mintime)
try:
month = months[date.month - 1]
except IndexError:
month = months[0]
year = date.year + 1
else:
year = date.year
while True:
try:
sdate = _datetime.datetime(year, month, startd, startH, startM)
except ValueError:
# Prevent infinite loops
testdate = _datetime.date(year, month, 1)
next_ = occs.get_next_occurrence_time()
if next_:
maxdate = _datetime.date.fromtimestamp(next_)
else:
# Note the 4-week limit, otherwise if this was the only
# existing rule, but it couldn't generate valid occurrences
# (e.g. 31 February only), it would trigger an infinite loop
maxdate = _datetime.date.fromtimestamp(base_time) + \
_datetime.timedelta(weeks=4)
if maxdate < testdate:
break
else:
start = int(_time.mktime(sdate.timetuple()))
# Every timestamp can have a different UTC offset, depending
# whether it's in a DST period or not
offset = utcoffset.compute(start)
sstart = start - offset
try:
send = sstart + rend
except TypeError:
send = None
try:
salarm = sstart - ralarm
except TypeError:
salarm = None
occd = {'filename': filename,
'id_': id_,
'start': sstart,
'end': send,
'alarm': salarm}
next_occ = occs.get_next_occurrence_time()
# The rule is checked in make_rule, no need to use occs.add
# Do compare sstart and salarm with next_occ, *not* start and alarm
if occs.add_safe(base_time, occd) or (next_occ and
sstart > next_occ and
(salarm is None or salarm > next_occ)):
break
try:
month = months[month]
except IndexError:
month = months[0]
year += 1
| gpl-3.0 | -7,135,692,125,899,080,000 | 33.87218 | 79 | 0.52961 | false |
Erotemic/vtool | vtool_ibeis/symbolic.py | 1 | 6316 | # -*- coding: utf-8 -*-
"""
Sympy helpers
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import six
import utool as ut
import ubelt as ub
def custom_sympy_attrs(mat):
import sympy
def matmul(other, hold=True):
if hold:
new = sympy.MatMul(mat, other)
else:
new = mat.multiply(other)
custom_sympy_attrs(new)
return new
def inv_():
new = mat.inv()
custom_sympy_attrs(new)
return new
setattr(mat, 'matmul', matmul)
setattr(mat, 'inv_', inv_)
return mat
def sympy_mat(arr):
import sympy
mat = sympy.Matrix(arr)
mat = custom_sympy_attrs(mat)
return mat
def evalprint(str_, globals_=None, locals_=None, simplify=False):
import sympy
if globals_ is None:
globals_ = ut.get_parent_frame().f_globals
if locals_ is None:
locals_ = ut.get_parent_frame().f_locals
if isinstance(str_, six.string_types):
var = eval(str_, globals_, locals_)
else:
var = str_
str_ = ut.get_varname_from_stack(var, N=1)
if simplify is True:
var = sympy.simplify(var)
print(ub.hzcat(str_ + ' = ', repr(var)))
def check_expr_eq(expr1, expr2, verbose=True):
"""
Does not work in general. Problem is not decidable.
Thanks Richard.
Args:
expr1 (?):
expr2 (?):
CommandLine:
python -m vtool_ibeis.symbolic --test-check_expr_eq
SeeALso:
vt.symbolic_randcheck
Example:
>>> # DISABLE_DOCTEST
>>> from vtool_ibeis.symbolic import * # NOQA
>>> expr1 = sympy.Matrix([ [sx*x + 1.0*tx + w1*y], [sy*y + 1.0*ty + w2*x], [1.0]])
>>> expr2 = sympy.Matrix([ [sx*x + tx + w1*y], [sy*y + ty + w2*x], [1]])
>>> result = check_expr_eq(expr1, expr2)
>>> print(result)
"""
import sympy
if isinstance(expr1, six.string_types):
expr1 = sympy.simplify(expr1)
if isinstance(expr2, six.string_types):
expr2 = sympy.simplify(expr2)
print(ub.hzcat('Checking if ', repr(expr1), ' == ', repr(expr2)))
random_point_check = expr1.equals(expr2)
if random_point_check is None:
failexpr = expr1.equals(expr2, failing_expression=True)
print('failexpr = %r' % (failexpr,))
random_point_check = False
print('... seems %r' % (random_point_check,))
#return random_point_check
expr3 = expr1 - expr2
if not random_point_check and True:
common_symbols = expr1.free_symbols.intersection(expr2.free_symbols)
if len(common_symbols):
y = sympy.symbols('y') # Hack, should be a new symbol
symbol = common_symbols.pop()
soln1 = sympy.solve(sympy.Eq(sympy.simplify(expr1), y), symbol)
soln2 = sympy.solve(sympy.Eq(sympy.simplify(expr2), y), symbol)
print('Solving expr1 for common symbol: ' + str(soln1))
print('Solving expr2 for common symbol: ' + str(soln2))
if soln1 == soln2:
print('This seems True')
else:
print('This seems False')
sympy.solve(sympy.Eq(sympy.simplify(expr2), y), 'd')
print(ub.hzcat('... checking 0 ', repr(expr3)))
# Does not always work.
print('(not gaurenteed to work) expr3.is_zero = %r' % (expr3.is_zero,))
return expr3.is_zero
def symbolic_randcheck(expr1, expr2, domain={}, n=10):
def get_domain(key, domain={}, rng=np.random):
min_, max_ = domain.get(key, (-100, 100))
range_ = max_ - min_
return (rng.rand() * (range_)) + min_
num_checks = n
input_list = []
results_list = []
for num in range(num_checks):
expr1_subs = {key: get_domain(key, domain) for key in expr1.free_symbols}
expr2_subs = {key: expr1_subs[key] if key in expr1_subs else get_domain(key, domain)
for key in expr2.free_symbols}
expr1_value = expr1.evalf(subs=expr1_subs)
expr2_value = expr2.evalf(subs=expr2_subs)
input_list.append((expr1_subs, expr2_subs))
results_list.append((expr1_value, expr2_value))
results_list = np.array(results_list)
#truth_list = np.allclose(results_list.T[0], results_list.T[1])
truth_list = results_list.T[0] == results_list.T[1]
return truth_list, results_list, input_list
def sympy_latex_repr(expr1):
import sympy
expr1_repr = sympy.latex(expr1)
expr1_repr = expr1_repr.replace('\\\\', '\\\\\n')
expr1_repr = expr1_repr.replace(r'\left[\begin{smallmatrix}{}', '\\MAT{\n')
expr1_repr = expr1_repr.replace(r'\end{smallmatrix}\right]', '\n}')
expr1_repr = expr1_repr.replace(r'\left[\begin{matrix}', '\\BIGMAT{\n')
expr1_repr = expr1_repr.replace(r'\end{matrix}\right]', '\n}')
expr1_repr = expr1_repr.replace(r'\left (', '(')
expr1_repr = expr1_repr.replace(r'\right )', ')')
expr1_repr = expr1_repr.replace(r'\left(', '(')
expr1_repr = expr1_repr.replace(r'\right)', ')')
# hack of align
expr1_repr = ut.align(expr1_repr, '&', pos=None)
return expr1_repr
#print(expr1_repr)
def sympy_numpy_repr(expr1):
import re
expr1_repr = repr(expr1)
expr1_repr = expr1_repr.replace('Matrix', 'np.array')
expr1_repr = re.sub('\\bsin\\b', 'np.sin', expr1_repr)
expr1_repr = re.sub('\\bcos\\b', 'np.cos', expr1_repr)
expr1_repr = ut.autoformat_pep8(expr1_repr)
print(expr1_repr)
#import autopep8
#autopep8.fix_code(expr1_repr)
"""
Symbolic Scrap Work:
The number of negative reviews needed is usually much larger than the number of
positive reviews.
import sympy
from sympy.abc import theta
import sympy.stats
from sympy.stats import E as mean
items = sympy.symbols('a, b, c, d')
from sympy.stats import FiniteRV, P, E
density = {0: .1, 1: .2, 2: .3, 3: .4}
X = FiniteRV('X', density)
cs = sympy.stats.FiniteRV(str('X'), {0: .5, 1: .5})
cs = [[None] * np.random.randint(10) for _ in range(1)]
print(sum(len(c) - 1 for c in cs))
print(np.mean([len(c) for c in cs]) * len(cs) - len(cs))
ori = theta
x, y, iv11, iv21, iv22, patch_size = sympy.symbols('x y iv11 iv21 iv22 S')
"""
if __name__ == '__main__':
"""
CommandLine:
xdoctest -m vtool_ibeis.symbolic
"""
import xdoctest
xdoctest.doctest_module(__file__)
| apache-2.0 | 5,342,888,130,600,779,000 | 31.060914 | 92 | 0.599113 | false |
RulersOfAsgard/ALAMO-worker | alamo_worker/plugins/__init__.py | 1 | 6930 | # -*- coding: utf-8 -*-
import asyncio
import logging
from datetime import datetime
from typing import Dict, List, Optional
import pkg_resources
from alamo_common import aiostats
from pytz import utc
from stevedore import NamedExtensionManager
from zmq.asyncio import ZMQEventLoop
from alamo_worker.conf import settings
from alamo_worker.plugins.evaluate import ResultEvaluator
logger = logging.getLogger(__name__)
EXCEPTION_MESSAGES = {
'TimeoutError': 'Timeout occurred during request.',
'EnvironmentFetchException': 'An error occurred in check {check_id}.',
'HttpProcessingError': 'Invalid response from {url}, status_code={code}, '
'message={message}.',
'ClientResponseError': 'Invalid response from {url}, status_code={code}, '
'message={message}, headers={headers}, '
'history={history}, request_info={request_info}.',
'NoSuchService': 'No such service in Consul for {url}.',
'ConnectionRefusedError': 'Connection to {host}:{port} refused.',
'ConnectionResetError': 'Connection with {host}:{port} was reset.',
'gaierror': 'Unknown hostname {host}',
}
class BasePlugin(object):
"""Base plugin implementation.
``_type`` is used to determine type of the plugin.
"""
default_exception_pattern = 'Could not execute plugin for check {check_id}.' # noqa
_type = None
is_coroutine = False
def __init__(self, *args, **kwargs):
if self._type is None:
msg = ('Class ``{}`` does not provide '
'"_type" attribute.').format(self.__class__.__name__)
raise NotImplementedError(msg)
self.is_coroutine = asyncio.iscoroutinefunction(self.execute)
def supported_types(self) -> List:
return [] if self._type is None else [self._type]
def init(self, *args):
"""Additional plugin initialization."""
pass
def execute(
self, check: Dict, source: Dict, **context
):
raise NotImplementedError
async def _check_health(self):
"""Should raise an exception if health check failed"""
raise NotImplementedError
async def healthy(self):
status = True
try:
await self._check_health()
except Exception as e:
logger.error(
'Health check failed for %s plugin: %s', self._type, e
)
status = False
return self._type, status
def exception_repr(
self,
e: Exception,
check_id: int = None,
url: str = '',
method: str = '',
host: str = '',
port: int = None
):
pattern = EXCEPTION_MESSAGES.get(
e.__class__.__name__, self.default_exception_pattern)
code = getattr(e, 'code', '')
message = getattr(e, 'message', '')
headers = getattr(e, 'headers', '')
history = getattr(e, 'history', '')
request_info = getattr(e, 'request_info', '')
return pattern.format(
check_id=check_id, url=url, code=code, message=message,
headers=headers, history=history, request_info=request_info,
method=method, host=host, port=port,
)
class PluginManager(object):
"""Plugin manager for alamo worker.
This class always return the same object (Singleton pattern).
``_plugins`` keeps plugin objects
``_classes`` keeps plugin (class) reference to loaded plugin
"""
runner = None
plugin_namespace = 'pl.allegro.tech.monitoring.alamo_worker.plugins'
def __init__(self):
self._evaluator = ResultEvaluator()
def __new__(cls, *args, **kwargs):
if not hasattr(cls, '_inst'):
cls._inst = super(PluginManager, cls).__new__(cls, *args, **kwargs)
return cls._inst
def load(self):
"""Load and instantiate plugins."""
for plug in pkg_resources.iter_entry_points(self.plugin_namespace):
logger.info("Loading %s", plug)
self.runner = NamedExtensionManager(
namespace=self.plugin_namespace,
names=settings.PLUGINS,
invoke_on_load=True
)
@aiostats.timer(metric_name='manager.dispatch')
async def dispatch(self,
loop: ZMQEventLoop,
payload: Dict) -> Optional[Dict]:
"""Dispatch which of available plugins should perform
data processing.
:param zmq.asyncio.ZMQEventLoop loop: asyncio loop
:param dict payload:
"""
check_id = payload.get('id')
project_id = payload.get('service_id')
payload['execution_time'] = datetime.now(tz=utc).isoformat()
payload['worker_fqdn'] = settings.WORKER_FQDN
plugins = {}
for plugin in self.runner:
for supported in plugin.obj.supported_types():
plugins[supported] = plugin.obj
context = {
'check_id': check_id,
'project_id': project_id
}
try:
self._evaluator.prepare_triggers(payload)
tasks = []
for source in payload['sources']:
source_type = source.get('type') or source.get('source_type')
context = {
'plugin': source_type,
**context
}
logger.info(
'Processing check: id="%s", source="%s"',
check_id,
source,
extra=context
)
try:
source_plugin = plugins[source_type]
except KeyError:
logger.error(
'Could not find plugin ``%s``.', source_type,
extra=context
)
continue
if source_plugin.is_coroutine:
tasks.append(
source_plugin.execute(payload, source, **context)
)
else:
tasks.append(loop.run_in_executor(
None, source_plugin.execute, payload, source
))
if not tasks:
return
results = await asyncio.gather(
*tasks, loop=loop
) # type: List[AbstractResult]
return self._evaluator.evaluate(payload, results)
except Exception as e:
aiostats.increment.incr(
'manager.errors.{}'.format(e.__class__.__name__)
)
logger.error(
'Critical error occur for check `%s`. `%s`', check_id, e,
extra=context
)
async def get_plugin_health(self):
tasks = [plugin.obj.healthy() for plugin in self.runner]
return dict(await asyncio.gather(*tasks))
| apache-2.0 | 5,531,675,341,325,306,000 | 32.478261 | 88 | 0.546898 | false |
miltonsarria/dsp-python | qt/ejemploQt2.py | 1 | 3671 | from __future__ import unicode_literals
import os
import random
import sys
from numpy import arange, sin, pi
import matplotlib
#matplotlib.use("Qt4Agg")
from matplotlib.backends.backend_qt4agg import (
FigureCanvasQTAgg as FigureCanvas)
from matplotlib.backends.qt_compat import QtCore, QtGui
from matplotlib.figure import Figure
progname = os.path.basename(sys.argv[0])
class MyMplCanvas(FigureCanvas):
"""Ultimately, this is a QWidget (as well as a FigureCanvasAgg, etc.)."""
def __init__(self, parent=None, width=5, height=4, dpi=100):
fig = Figure(figsize=(width, height), dpi=dpi)
self.axes = fig.add_subplot(111)
self.compute_initial_figure()
FigureCanvas.__init__(self, fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self,
QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
def compute_initial_figure(self):
pass
class MyStaticMplCanvas(MyMplCanvas):
"""Simple canvas with a sine plot."""
def compute_initial_figure(self):
t = arange(0.0, 3.0, 0.01)
s = sin(2*pi*t)
self.axes.plot(t, s)
class MyDynamicMplCanvas(MyMplCanvas):
"""A canvas that updates itself every second with a new plot."""
def __init__(self, *args, **kwargs):
MyMplCanvas.__init__(self, *args, **kwargs)
timer = QtCore.QTimer(self)
timer.timeout.connect(self.update_figure)
timer.start(1000)
def compute_initial_figure(self):
self.axes.plot([0, 1, 2, 3], [1, 2, 0, 4], 'r')
def update_figure(self):
# Build a list of 4 random integers between 0 and 10 (both inclusive)
l = [random.randint(0, 10) for i in range(4)]
self.axes.cla()
self.axes.plot([0, 1, 2, 3], l, 'r')
self.draw()
class ApplicationWindow(QtGui.QMainWindow):
def __init__(self):
QtGui.QMainWindow.__init__(self)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.setWindowTitle("application main window")
self.file_menu = QtGui.QMenu('&File', self)
self.file_menu.addAction('&Quit', self.fileQuit,
QtCore.Qt.CTRL + QtCore.Qt.Key_Q)
self.menuBar().addMenu(self.file_menu)
self.help_menu = QtGui.QMenu('&Help', self)
self.menuBar().addSeparator()
self.menuBar().addMenu(self.help_menu)
self.help_menu.addAction('&About', self.about)
self.main_widget = QtGui.QWidget(self)
l = QtGui.QVBoxLayout(self.main_widget)
sc = MyStaticMplCanvas(self.main_widget, width=5, height=4, dpi=100)
dc = MyDynamicMplCanvas(self.main_widget, width=5, height=4, dpi=100)
l.addWidget(sc)
l.addWidget(dc)
self.main_widget.setFocus()
self.setCentralWidget(self.main_widget)
self.statusBar().showMessage("All hail matplotlib!", 2000)
def fileQuit(self):
self.close()
def closeEvent(self, ce):
self.fileQuit()
def about(self):
QtGui.QMessageBox.about(self, "About",
"""embedding_in_qt4.py example
Copyright 2005 Florent Rougon, 2006 Darren Dale
This program is a simple example of a Qt4 application embedding matplotlib
canvases.
It may be used and modified with no restriction; raw copies as well as
modified versions may be distributed without limitation."""
)
qApp = QtGui.QApplication(sys.argv)
aw = ApplicationWindow()
aw.setWindowTitle("%s" % progname)
aw.show()
sys.exit(qApp.exec_())
#qApp.exec_()
| mit | 8,202,107,306,772,943,000 | 28.604839 | 77 | 0.629256 | false |
enigmampc/catalyst | catalyst/gens/utils.py | 1 | 1990 | #
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytz
import numbers
from hashlib import md5
from datetime import datetime
from catalyst.protocol import DATASOURCE_TYPE
from six import iteritems, b
def hash_args(*args, **kwargs):
"""Define a unique string for any set of representable args."""
arg_string = '_'.join([str(arg) for arg in args])
kwarg_string = '_'.join([str(key) + '=' + str(value)
for key, value in iteritems(kwargs)])
combined = ':'.join([arg_string, kwarg_string])
hasher = md5()
hasher.update(b(combined))
return hasher.hexdigest()
def assert_datasource_protocol(event):
"""Assert that an event meets the protocol for datasource outputs."""
assert event.type in DATASOURCE_TYPE
# Done packets have no dt.
if not event.type == DATASOURCE_TYPE.DONE:
assert isinstance(event.dt, datetime)
assert event.dt.tzinfo == pytz.utc
def assert_trade_protocol(event):
"""Assert that an event meets the protocol for datasource TRADE outputs."""
assert_datasource_protocol(event)
assert event.type == DATASOURCE_TYPE.TRADE
assert isinstance(event.price, numbers.Real)
assert isinstance(event.volume, numbers.Integral)
assert isinstance(event.dt, datetime)
def assert_datasource_unframe_protocol(event):
"""Assert that an event is valid output of zp.DATASOURCE_UNFRAME."""
assert event.type in DATASOURCE_TYPE
| apache-2.0 | 8,942,561,567,197,044,000 | 31.096774 | 79 | 0.711558 | false |
bambooforest/segments | src/segments/tree.py | 1 | 2093 | from segments.errors import replace
class TreeNode(object):
"""
Private class that creates the tree data structure from the orthography profile for
parsing.
"""
def __init__(self, char, sentinel=False):
self.char = char
self.children = {}
self.sentinel = sentinel
class Tree(object):
def __init__(self, graphemes):
def _multigraph(node, line):
# Internal function to add a multigraph starting at node.
for char in line:
node = node.children.setdefault(char, TreeNode(char))
node.sentinel = True
self.root = TreeNode('', sentinel=True)
for grapheme in graphemes:
_multigraph(self.root, grapheme)
def parse(self, line, error=replace):
res, idx = self._parse(self.root, line, 0)
rem = line[idx:]
while rem:
# Chop off one character and try parsing the remainder:
res.append(error(rem[0]))
rem = rem[1:]
r, i = self._parse(self.root, rem, 0)
res.extend(r)
rem = rem[i:]
return res
def _parse(self, root, line, idx):
"""
:param root: Tree node.
:param line: String to parse.
:param idx: Global counter of characters parsed.
:return: (list of parsed graphemes, incremented character count)
"""
# Base (or degenerate..) case.
if len(line) == 0:
return [], idx
parse = []
curr = 0
node = root
cidx = idx
while curr < len(line):
node = node.children.get(line[curr])
curr += 1
if not node:
break
if node.sentinel:
subparse, cidx = self._parse(root, line[curr:], idx + curr)
# Always keep the latest valid parse, which will be
# the longest-matched (greedy match) graphemes.
parse = [line[:curr]]
parse.extend(subparse)
if parse:
idx = cidx
return parse, idx
| apache-2.0 | -2,984,269,760,610,760,700 | 29.779412 | 87 | 0.530817 | false |
gam17/QAD | qad_mbuffer_fun.py | 1 | 1950 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
ok
QAD Quantum Aided Design plugin
funzioni per stirare oggetti grafici
-------------------
begin : 2013-11-11
copyright : iiiii
email : hhhhh
developers : bbbbb aaaaa ggggg
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
# Import the PyQt and QGIS libraries
from qgis.PyQt.QtCore import *
from qgis.PyQt.QtGui import *
from qgis.core import *
from . import qad_utils
from .qad_msg import QadMsg
from .qad_variables import QadVariables
from .qad_multi_geom import *
#===============================================================================
# buffer
#===============================================================================
def buffer(qadGeom, distance):
"""
Returns a buffer region around this geometry having the given distance.
"""
g = qadGeom.asGeom()
nSegments = QadVariables.get(QadMsg.translate("Environment variables", "ARCMINSEGMENTQTY"), 12)
bufferedGeom = g.buffer(distance, nSegments)
if bufferedGeom.isEmpty(): return None
return fromQgsGeomToQadGeom(bufferedGeom)
| gpl-3.0 | -2,696,228,024,097,002,000 | 36.27451 | 98 | 0.410769 | false |
BD2KGenomics/slugflow | src/toil/jobStores/aws/utils.py | 1 | 17390 | # Copyright (C) 2015-2021 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import bz2
import errno
import logging
import os
import socket
import types
from ssl import SSLError
from typing import Optional
from boto.exception import (
BotoServerError,
SDBResponseError,
S3ResponseError
)
from boto3.s3.transfer import TransferConfig
from botocore.exceptions import ClientError
from toil.lib.compatibility import compat_bytes
from toil.lib.retry import (
old_retry,
retry,
ErrorCondition
)
logger = logging.getLogger(__name__)
class SDBHelper(object):
"""
A mixin with methods for storing limited amounts of binary data in an SDB item
>>> import os
>>> H=SDBHelper
>>> H.presenceIndicator() # doctest: +ALLOW_UNICODE
u'numChunks'
>>> H.binaryToAttributes(None)['numChunks']
0
>>> H.attributesToBinary({u'numChunks': 0})
(None, 0)
>>> H.binaryToAttributes(b'') # doctest: +ALLOW_UNICODE +ALLOW_BYTES
{u'000': b'VQ==', u'numChunks': 1}
>>> H.attributesToBinary({u'numChunks': 1, u'000': b'VQ=='}) # doctest: +ALLOW_BYTES
(b'', 1)
Good pseudo-random data is very likely smaller than its bzip2ed form. Subtract 1 for the type
character, i.e 'C' or 'U', with which the string is prefixed. We should get one full chunk:
>>> s = os.urandom(H.maxRawValueSize-1)
>>> d = H.binaryToAttributes(s)
>>> len(d), len(d['000'])
(2, 1024)
>>> H.attributesToBinary(d) == (s, 1)
True
One byte more and we should overflow four bytes into the second chunk, two bytes for
base64-encoding the additional character and two bytes for base64-padding to the next quartet.
>>> s += s[0:1]
>>> d = H.binaryToAttributes(s)
>>> len(d), len(d['000']), len(d['001'])
(3, 1024, 4)
>>> H.attributesToBinary(d) == (s, 2)
True
"""
# The SDB documentation is not clear as to whether the attribute value size limit of 1024
# applies to the base64-encoded value or the raw value. It suggests that responses are
# automatically encoded from which I conclude that the limit should apply to the raw,
# unencoded value. However, there seems to be a discrepancy between how Boto computes the
# request signature if a value contains a binary data, and how SDB does it. This causes
# requests to fail signature verification, resulting in a 403. We therefore have to
# base64-encode values ourselves even if that means we loose a quarter of capacity.
maxAttributesPerItem = 256
maxValueSize = 1024
maxRawValueSize = maxValueSize * 3 // 4
# Just make sure we don't have a problem with padding or integer truncation:
assert len(base64.b64encode(b' ' * maxRawValueSize)) == 1024
assert len(base64.b64encode(b' ' * (1 + maxRawValueSize))) > 1024
@classmethod
def _reservedAttributes(cls):
"""
Override in subclass to reserve a certain number of attributes that can't be used for
chunks.
"""
return 1
@classmethod
def _maxChunks(cls):
return cls.maxAttributesPerItem - cls._reservedAttributes()
@classmethod
def maxBinarySize(cls, extraReservedChunks=0):
return (cls._maxChunks() - extraReservedChunks) * cls.maxRawValueSize - 1 # for the 'C' or 'U' prefix
@classmethod
def _maxEncodedSize(cls):
return cls._maxChunks() * cls.maxValueSize
@classmethod
def binaryToAttributes(cls, binary):
"""
Turn a bytestring, or None, into SimpleDB attributes.
"""
if binary is None: return {u'numChunks': 0}
assert isinstance(binary, bytes)
assert len(binary) <= cls.maxBinarySize()
# The use of compression is just an optimization. We can't include it in the maxValueSize
# computation because the compression ratio depends on the input.
compressed = bz2.compress(binary)
if len(compressed) > len(binary):
compressed = b'U' + binary
else:
compressed = b'C' + compressed
encoded = base64.b64encode(compressed)
assert len(encoded) <= cls._maxEncodedSize()
n = cls.maxValueSize
chunks = (encoded[i:i + n] for i in range(0, len(encoded), n))
attributes = {cls._chunkName(i): chunk for i, chunk in enumerate(chunks)}
attributes.update({u'numChunks': len(attributes)})
return attributes
@classmethod
def _chunkName(cls, i):
return str(i).zfill(3)
@classmethod
def _isValidChunkName(cls, s):
return len(s) == 3 and s.isdigit()
@classmethod
def presenceIndicator(cls):
"""
The key that is guaranteed to be present in the return value of binaryToAttributes().
Assuming that binaryToAttributes() is used with SDB's PutAttributes, the return value of
this method could be used to detect the presence/absence of an item in SDB.
"""
return u'numChunks'
@classmethod
def attributesToBinary(cls, attributes):
"""
:rtype: (str|None,int)
:return: the binary data and the number of chunks it was composed from
"""
chunks = [(int(k), v) for k, v in attributes.items() if cls._isValidChunkName(k)]
chunks.sort()
numChunks = int(attributes[u'numChunks'])
if numChunks:
serializedJob = b''.join(v.encode() for k, v in chunks)
compressed = base64.b64decode(serializedJob)
if compressed[0] == b'C'[0]:
binary = bz2.decompress(compressed[1:])
elif compressed[0] == b'U'[0]:
binary = compressed[1:]
else:
raise RuntimeError('Unexpected prefix {}'.format(compressed[0]))
else:
binary = None
return binary, numChunks
def fileSizeAndTime(localFilePath):
file_stat = os.stat(localFilePath)
return file_stat.st_size, file_stat.st_mtime
@retry(errors=[ErrorCondition(
error=ClientError,
error_codes=[404, 500, 502, 503, 504]
)])
def uploadFromPath(localFilePath: str,
resource,
bucketName: str,
fileID: str,
headerArgs: Optional[dict] = None,
partSize: int = 50 << 20):
"""
Uploads a file to s3, using multipart uploading if applicable
:param str localFilePath: Path of the file to upload to s3
:param S3.Resource resource: boto3 resource
:param str bucketName: name of the bucket to upload to
:param str fileID: the name of the file to upload to
:param dict headerArgs: http headers to use when uploading - generally used for encryption purposes
:param int partSize: max size of each part in the multipart upload, in bytes
:return: version of the newly uploaded file
"""
if headerArgs is None:
headerArgs = {}
client = resource.meta.client
file_size, file_time = fileSizeAndTime(localFilePath)
version = uploadFile(localFilePath, resource, bucketName, fileID, headerArgs, partSize)
info = client.head_object(Bucket=bucketName, Key=compat_bytes(fileID), VersionId=version, **headerArgs)
size = info.get('ContentLength')
assert size == file_size
# Make reasonably sure that the file wasn't touched during the upload
assert fileSizeAndTime(localFilePath) == (file_size, file_time)
return version
@retry(errors=[ErrorCondition(
error=ClientError,
error_codes=[404, 500, 502, 503, 504]
)])
def uploadFile(readable,
resource,
bucketName: str,
fileID: str,
headerArgs: Optional[dict] = None,
partSize: int = 50 << 20):
"""
Upload a readable object to s3, using multipart uploading if applicable.
:param readable: a readable stream or a file path to upload to s3
:param S3.Resource resource: boto3 resource
:param str bucketName: name of the bucket to upload to
:param str fileID: the name of the file to upload to
:param dict headerArgs: http headers to use when uploading - generally used for encryption purposes
:param int partSize: max size of each part in the multipart upload, in bytes
:return: version of the newly uploaded file
"""
if headerArgs is None:
headerArgs = {}
client = resource.meta.client
config = TransferConfig(
multipart_threshold=partSize,
multipart_chunksize=partSize,
use_threads=True
)
if isinstance(readable, str):
client.upload_file(Filename=readable,
Bucket=bucketName,
Key=compat_bytes(fileID),
ExtraArgs=headerArgs,
Config=config)
else:
client.upload_fileobj(Fileobj=readable,
Bucket=bucketName,
Key=compat_bytes(fileID),
ExtraArgs=headerArgs,
Config=config)
# Wait until the object exists before calling head_object
object_summary = resource.ObjectSummary(bucketName, compat_bytes(fileID))
object_summary.wait_until_exists(**headerArgs)
info = client.head_object(Bucket=bucketName, Key=compat_bytes(fileID), **headerArgs)
return info.get('VersionId', None)
@retry(errors=[ErrorCondition(
error=ClientError,
error_codes=[404, 500, 502, 503, 504]
)])
def copyKeyMultipart(resource,
srcBucketName: str,
srcKeyName: str,
srcKeyVersion: str,
dstBucketName: str,
dstKeyName: str,
sseAlgorithm: Optional[str] = None,
sseKey: Optional[str] = None,
copySourceSseAlgorithm: Optional[str] = None,
copySourceSseKey: Optional[str] = None):
"""
Copies a key from a source key to a destination key in multiple parts. Note that if the
destination key exists it will be overwritten implicitly, and if it does not exist a new
key will be created. If the destination bucket does not exist an error will be raised.
:param S3.Resource resource: boto3 resource
:param str srcBucketName: The name of the bucket to be copied from.
:param str srcKeyName: The name of the key to be copied from.
:param str srcKeyVersion: The version of the key to be copied from.
:param str dstBucketName: The name of the destination bucket for the copy.
:param str dstKeyName: The name of the destination key that will be created or overwritten.
:param str sseAlgorithm: Server-side encryption algorithm for the destination.
:param str sseKey: Server-side encryption key for the destination.
:param str copySourceSseAlgorithm: Server-side encryption algorithm for the source.
:param str copySourceSseKey: Server-side encryption key for the source.
:rtype: str
:return: The version of the copied file (or None if versioning is not enabled for dstBucket).
"""
dstBucket = resource.Bucket(compat_bytes(dstBucketName))
dstObject = dstBucket.Object(compat_bytes(dstKeyName))
copySource = {'Bucket': compat_bytes(srcBucketName), 'Key': compat_bytes(srcKeyName)}
if srcKeyVersion is not None:
copySource['VersionId'] = compat_bytes(srcKeyVersion)
# The boto3 functions don't allow passing parameters as None to
# indicate they weren't provided. So we have to do a bit of work
# to ensure we only provide the parameters when they are actually
# required.
destEncryptionArgs = {}
if sseKey is not None:
destEncryptionArgs.update({'SSECustomerAlgorithm': sseAlgorithm,
'SSECustomerKey': sseKey})
copyEncryptionArgs = {}
if copySourceSseKey is not None:
copyEncryptionArgs.update({'CopySourceSSECustomerAlgorithm': copySourceSseAlgorithm,
'CopySourceSSECustomerKey': copySourceSseKey})
copyEncryptionArgs.update(destEncryptionArgs)
dstObject.copy(copySource, ExtraArgs=copyEncryptionArgs)
# Wait until the object exists before calling head_object
object_summary = resource.ObjectSummary(dstObject.bucket_name, dstObject.key)
object_summary.wait_until_exists(**destEncryptionArgs)
# Unfortunately, boto3's managed copy doesn't return the version
# that it actually copied to. So we have to check immediately
# after, leaving open the possibility that it may have been
# modified again in the few seconds since the copy finished. There
# isn't much we can do about it.
info = resource.meta.client.head_object(Bucket=dstObject.bucket_name,
Key=dstObject.key,
**destEncryptionArgs)
return info.get('VersionId', None)
def _put_attributes_using_post(self, domain_or_name, item_name, attributes,
replace=True, expected_value=None):
"""
Monkey-patched version of SDBConnection.put_attributes that uses POST instead of GET
The GET version is subject to the URL length limit which kicks in before the 256 x 1024 limit
for attribute values. Using POST prevents that.
https://github.com/BD2KGenomics/toil/issues/502
"""
domain, domain_name = self.get_domain_and_name(domain_or_name)
params = {'DomainName': domain_name,
'ItemName': item_name}
self._build_name_value_list(params, attributes, replace)
if expected_value:
self._build_expected_value(params, expected_value)
# The addition of the verb keyword argument is the only difference to put_attributes (Hannes)
return self.get_status('PutAttributes', params, verb='POST')
def monkeyPatchSdbConnection(sdb):
"""
:type sdb: SDBConnection
"""
sdb.put_attributes = types.MethodType(_put_attributes_using_post, sdb)
default_delays = (0, 1, 1, 4, 16, 64)
default_timeout = 300
def connection_reset(e):
# For some reason we get 'error: [Errno 104] Connection reset by peer' where the
# English description suggests that errno is 54 (ECONNRESET) while the actual
# errno is listed as 104. To be safe, we check for both:
return isinstance(e, socket.error) and e.errno in (errno.ECONNRESET, 104)
def sdb_unavailable(e):
return isinstance(e, BotoServerError) and e.status in (500, 503)
def no_such_sdb_domain(e):
return (isinstance(e, SDBResponseError)
and e.error_code
and e.error_code.endswith('NoSuchDomain'))
def retryable_ssl_error(e):
# https://github.com/BD2KGenomics/toil/issues/978
return isinstance(e, SSLError) and e.reason == 'DECRYPTION_FAILED_OR_BAD_RECORD_MAC'
def retryable_sdb_errors(e):
return (sdb_unavailable(e)
or no_such_sdb_domain(e)
or connection_reset(e)
or retryable_ssl_error(e))
def retry_sdb(delays=default_delays, timeout=default_timeout, predicate=retryable_sdb_errors):
return old_retry(delays=delays, timeout=timeout, predicate=predicate)
# https://github.com/boto/botocore/blob/49f87350d54f55b687969ec8bf204df785975077/botocore/retries/standard.py#L316
THROTTLED_ERROR_CODES = [
'Throttling',
'ThrottlingException',
'ThrottledException',
'RequestThrottledException',
'TooManyRequestsException',
'ProvisionedThroughputExceededException',
'TransactionInProgressException',
'RequestLimitExceeded',
'BandwidthLimitExceeded',
'LimitExceededException',
'RequestThrottled',
'SlowDown',
'PriorRequestNotComplete',
'EC2ThrottledException',
]
# TODO: Replace with: @retry and ErrorCondition
def retryable_s3_errors(e):
return (connection_reset(e)
or (isinstance(e, BotoServerError) and e.status in (429, 500))
or (isinstance(e, BotoServerError) and e.code in THROTTLED_ERROR_CODES)
# boto3 errors
or (isinstance(e, S3ResponseError) and e.error_code in THROTTLED_ERROR_CODES)
or (isinstance(e, ClientError) and 'BucketNotEmpty' in str(e))
or (isinstance(e, ClientError) and e.response.get('ResponseMetadata', {}).get('HTTPStatusCode') == 409 and 'try again' in str(e))
or (isinstance(e, ClientError) and e.response.get('ResponseMetadata', {}).get('HTTPStatusCode') in (404, 429, 500, 502, 503, 504)))
def retry_s3(delays=default_delays, timeout=default_timeout, predicate=retryable_s3_errors):
return old_retry(delays=delays, timeout=timeout, predicate=predicate)
def region_to_bucket_location(region):
return '' if region == 'us-east-1' else region
def bucket_location_to_region(location):
return 'us-east-1' if location == '' else location
| apache-2.0 | -7,117,220,125,035,085,000 | 38.166667 | 143 | 0.660897 | false |
qedsoftware/commcare-hq | corehq/tests/noseplugins/timing.py | 1 | 3440 | """A test timing plugin for nose
Usage: ./manage.py test --with-timing --timing-file=/path/to/timing.csv
"""
import csv
import sys
import time
from nose.plugins import Plugin
from corehq.tests.noseplugins.uniformresult import uniform_description
class TimingPlugin(Plugin):
"""A plugin to measure times of testing events
Measure elapsed time before setup, during setup, during test, and
during teardown events. Outputs the results as CSV.
"""
name = "timing"
def options(self, parser, env):
"""Register commandline options.
"""
super(TimingPlugin, self).options(parser, env)
parser.add_option('--timing-file', action='store',
dest='timing_file',
metavar="FILE",
default=env.get('NOSE_TIMING_FILE'),
help='Timing output file (CSV); default is STDOUT')
parser.add_option('--pretty-timing', action='store_true',
dest='pretty_output',
default=env.get('NOSE_PRETTY_TIMING'),
help='Print timing info in a format that is better '
'for reviewing in text mode (not CSV).')
def configure(self, options, conf):
"""Configure plugin.
"""
super(TimingPlugin, self).configure(options, conf)
self.conf = conf
self.timing_file = options.timing_file
self.pretty_output = options.pretty_output
def begin(self):
self.output = (open(self.timing_file, "w")
if self.timing_file else sys.__stdout__)
if not self.pretty_output:
self.csv = csv.writer(self.output)
self.csv.writerow(["event", "name", "elapsed time", "start time"])
self.event_start = time.time()
global PLUGIN_INSTANCE
PLUGIN_INSTANCE = self
def finalize(self, result):
if self.output is not None:
self.output.close()
def end_event(self, event, context):
now = time.time()
name = uniform_description(context)
if self.pretty_output:
self.output.write("{time:>-6,.2f} {event} {name}\n".format(
event=event,
name=name,
time=now - self.event_start,
))
else:
self.csv.writerow([
event,
name,
now - self.event_start,
self.event_start,
])
self.event_start = now
def startContext(self, context):
# called before context setup
self.end_event("before", context)
def startTest(self, case):
# called before test is started
self.end_event("setup", case.test)
def stopTest(self, case):
# called on test completion
self.end_event("run", case.test)
def stopContext(self, context):
# called after context teardown
self.end_event("teardown", context)
PLUGIN_INSTANCE = None
def end_event(name, context):
"""Signal the end of a custom timing event
Use to add arbitrary "events" anywhere in the code to isolate
sources of slowness during profiling. This function terminates the
given event name and immediately begins the next (as yet unnamed)
event. Requires the `TimingPlugin` must to be enabled.
"""
PLUGIN_INSTANCE.end_event(name, context)
| bsd-3-clause | -7,585,139,054,977,179,000 | 32.076923 | 78 | 0.580233 | false |
cudevmaxwell/SwiftBulkUploader | swiftbulkuploader/prepareupload.py | 1 | 3048 | import sys
import olrcdb
import os
import datetime
from bulkupload import env_vars_set
# Globals
COUNT = 0
FAILED = 0
REQUIRED_VARIABLES = [
"MYSQL_HOST",
"MYSQL_USER",
"MYSQL_PASSWD",
"MYSQL_DB",
]
def prepare_upload(connect, directory, table_name):
'''Given a database connection, directory and table_name,
-Create the table in the database
-populate the table with (path, uploaded=false)
where each path is a file in the given directory.'''
global COUNT, FAILED
# Loop through all items in the directory.
for filename in os.listdir(directory):
file_path = os.path.join(directory, filename)
# Add file name to the list.
if os.path.isfile(file_path):
try:
connect.insert_path(file_path, table_name)
COUNT += 1
except:
# Try again with the alternative query.
try:
connect.insert_path(file_path, table_name, True)
COUNT += 1
except:
FAILED += 1
error_log = open(table_name + '.prepare.error.log', 'a')
error_log.write("\rFailed: {0}\n".format(file_path))
error_log.close()
sys.stdout.flush()
sys.stdout.write("\r{0} parsed. ".format(COUNT))
#Output status to a file.
final_count = open(table_name + ".prepare.out", 'w+')
final_count.write("\r{0} parsed. ".format(COUNT))
final_count.close()
# Recursive call for sub directories.
else:
prepare_upload(connect, file_path, table_name)
if __name__ == "__main__":
# Check for proper parameters
if len(sys.argv) != 3:
sys.stderr.write(
'Usage: python prepareupload.py path-to-directory table-name\n'
)
sys.exit(1)
else:
table_name = sys.argv[2]
directory = sys.argv[1]
# Check required environment variables have been set
if not env_vars_set():
set_env_message = "The following environment variables need to be " \
"set:\n"
set_env_message += " \n".join(REQUIRED_VARIABLES)
set_env_message += "\nPlease set these environment variables to " \
"connect to the OLRC."
print(set_env_message)
exit(0)
#Open error log:
error_log = open(table_name + '.prepare.error.log', 'w+')
error_log.write("From execution {0}:\n".format(
str(datetime.datetime.now())
))
error_log.close()
connect = olrcdb.DatabaseConnection()
connect.create_table(table_name)
prepare_upload(connect, directory, table_name)
sys.stdout.flush()
sys.stdout.write("\r{0} parsed. ".format(COUNT))
if FAILED != 0:
sys.stdout.write("\n{0} FAILED. See error.log.".format(FAILED))
#Log the final count
final_count = open(table_name + ".prepare.out", 'w+')
final_count.write("\r{0} parsed. ".format(COUNT))
final_count.close() | bsd-3-clause | 6,526,452,972,449,950,000 | 28.038095 | 77 | 0.574475 | false |
dcelisgarza/applied_math | solar_system/animatep2.py | 1 | 1987 | import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
import matplotlib.animation as anm
#plt.rcParams['animation.ffmpeg_path'] = '/usr/bin/ffmpeg'
plt.close('all')
data = np.loadtxt('solar_system.dat')
data2 = data[:,0:15]
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.set_xlim3d([np.min(data2[:,0::3]), np.max(data2[:,0::3])])
ax.set_xlabel('X')
ax.set_ylim3d([np.min(data2[:,1::3]), np.max(data2[:,1::3])])
ax.set_ylabel('Y')
ax.set_zlim3d([np.min(data2[:,2::3]), np.max(data2[:,2::3])])
ax.set_zlabel('Z')
# choose a different color for each trajectory
colors = plt.cm.jet(np.linspace(0, 1, np.size(data2[0,:])/3))
# set up lines and points
lines = sum([ax.plot([], [], [], '-', c=c)
for c in colors], [])
pts = sum([ax.plot([], [], [], 'o', c=c)
for c in colors], [])
ax.view_init(30, 0)
data3 = np.reshape(data2,(np.size(data2[0,:])/3,np.size(data2[:,0]),3))
n = 0
for i in np.arange(0,int(np.size(data2[0,:])/3),1):
data3[i,:,0:3] = data2[:,i+n:i+n+3]
n = n + 2
def init():
for line, pt in zip(lines, pts):
line.set_data([], [])
line.set_3d_properties([])
pt.set_data([], [])
pt.set_3d_properties([])
return pts + lines,
def animate(i):
# we'll step two time-steps per frame. This leads to nice results.
#i = (2 * i) % data3.shape[1]
for line, pt, xi in zip(lines, pts, data3):
x, y, z = xi[:i,0:3].T
line.set_data(x, y)
line.set_3d_properties(z)
pt.set_data(x[-1:], y[-1:])
pt.set_3d_properties(z[-1:])
ax.view_init(30, 0.3 * i)
fig.canvas.draw()
return pts + lines
anim = anm.FuncAnimation(fig, animate, init_func=init,
frames=int(np.size(data2[:,0])), interval=1, blit=True)
writer = anm.writers['ffmpeg'](fps=30)
anim.save('inner_sol_sys.mp4', writer = writer)#, 'ffmpeg_file', fps=15, extra_args=['-vcodec', 'libx264']
| mit | -6,319,089,473,703,924,000 | 26.985915 | 106 | 0.574233 | false |
City-of-Helsinki/smbackend | observations/models.py | 1 | 6698 | import binascii
import os
import rest_framework.authentication
import rest_framework.authtoken.models
from django.apps import apps
from django.conf import settings
from django.db import models
from django.utils.translation import gettext_lazy as _
from polymorphic.models import PolymorphicModel
from rest_framework import exceptions
from services import models as services_models
AUTH_USER_MODEL = getattr(settings, "AUTH_USER_MODEL", "auth.User")
class ObservableProperty(models.Model):
"""Specifies the detailed interpretation of observations.
Includes the unit of measurement.
Observations can only be made on units which have a service that
is linked to an ObservableProperty. For example, only units which
are ice-skating fields can have observations with the property
"ice condition" or something similar.
"""
# TODO move back to sequential id field
id = models.CharField(max_length=50, primary_key=True)
name = models.CharField(max_length=100, null=False, blank=False, db_index=True)
measurement_unit = models.CharField(max_length=20, null=True, blank=False)
expiration = models.DurationField(blank=True, null=True)
# todo: change to services
services = models.ManyToManyField(
services_models.Service, related_name="observable_properties"
)
observation_type = models.CharField(max_length=80, null=False, blank=False)
def __str__(self):
return "%s (%s)" % (self.name, self.id)
def get_observation_model(self):
return apps.get_model(self.observation_type)
def get_observation_type(self):
return self.get_observation_model().get_type()
def create_observation(self, **validated_data):
return self.get_observation_model().objects.create(**validated_data)
def get_internal_value(self, value):
return self.get_observation_model().get_internal_value(self, value)
class AllowedValue(models.Model):
# Currently only works for categorical observations
identifier = models.CharField(max_length=50, null=True, blank=False, db_index=True)
quality = models.CharField(
max_length=50, null=True, blank=False, db_index=True, default="unknown"
)
name = models.CharField(max_length=100, null=True, blank=False, db_index=True)
description = models.TextField(null=False, blank=False)
property = models.ForeignKey(
ObservableProperty,
on_delete=models.CASCADE,
blank=False,
null=False,
related_name="allowed_values",
)
class Meta:
unique_together = (("identifier", "property"),)
class Observation(PolymorphicModel):
"""An observation is a measured/observed value of
a property of a unit at a certain time.
"""
value = models.ForeignKey(
AllowedValue,
blank=False,
null=True,
on_delete=models.PROTECT,
related_name="instances",
)
time = models.DateTimeField(
db_index=True, help_text="Exact time the observation was made"
)
unit = models.ForeignKey(
services_models.Unit,
blank=False,
null=False,
on_delete=models.PROTECT,
help_text="The unit the observation is about",
related_name="observation_history",
)
units = models.ManyToManyField(
services_models.Unit, through="UnitLatestObservation"
)
auth = models.ForeignKey("PluralityAuthToken", null=True, on_delete=models.PROTECT)
property = models.ForeignKey(
ObservableProperty,
blank=False,
null=False,
on_delete=models.PROTECT,
help_text="The property observed",
)
class Meta:
ordering = ["-time"]
class CategoricalObservation(Observation):
def get_external_value(self):
return self.value.identifier
@staticmethod
def get_type():
return "categorical"
@staticmethod
def get_internal_value(oproperty, value):
if value is None:
return None
return oproperty.allowed_values.get(identifier=value)
class DescriptiveObservation(Observation):
def get_external_value(self):
return self.value
@staticmethod
def get_type():
return "descriptive"
@staticmethod
def get_internal_value(oproperty, value):
return AllowedValue.objects.create(property=oproperty, **value)
class UnitLatestObservation(models.Model):
unit = models.ForeignKey(
services_models.Unit,
null=False,
blank=False,
related_name="latest_observations",
on_delete=models.CASCADE,
)
property = models.ForeignKey(
ObservableProperty, null=False, blank=False, on_delete=models.CASCADE
)
observation = models.ForeignKey(
Observation, null=False, blank=False, on_delete=models.CASCADE
)
class Meta:
unique_together = (("unit", "property"),)
class PluralityAuthToken(models.Model):
"""
A token class which can have multiple active tokens per user.
"""
key = models.CharField(max_length=40, primary_key=False, db_index=True)
user = models.ForeignKey(
AUTH_USER_MODEL,
related_name="auth_tokens",
null=False,
on_delete=models.PROTECT,
)
created = models.DateTimeField(auto_now_add=True)
active = models.BooleanField(default=True)
class Meta:
# Work around for a bug in Django:
# https://code.djangoproject.com/ticket/19422
#
# Also see corresponding ticket:
# https://github.com/tomchristie/django-rest-framework/issues/705
abstract = "rest_framework.authtoken" not in settings.INSTALLED_APPS
def save(self, *args, **kwargs):
if not self.key:
self.key = self.generate_key()
return super(PluralityAuthToken, self).save(*args, **kwargs)
def generate_key(self):
return binascii.hexlify(os.urandom(20)).decode()
def __str__(self):
return self.key
class PluralityTokenAuthentication(rest_framework.authentication.TokenAuthentication):
model = PluralityAuthToken
def authenticate_credentials(self, key):
user, token = super(
PluralityTokenAuthentication, self
).authenticate_credentials(key)
if not token.active:
raise exceptions.AuthenticationFailed(_("Token inactive or deleted."))
return token.user, token
class UserOrganization(models.Model):
organization = models.ForeignKey(
services_models.Department, on_delete=models.CASCADE
)
user = models.OneToOneField(
AUTH_USER_MODEL,
related_name="organization",
null=False,
on_delete=models.CASCADE,
)
| agpl-3.0 | -9,209,405,566,981,158,000 | 29.724771 | 87 | 0.674231 | false |
mbakke/ganeti | test/ganeti.ht_unittest.py | 1 | 8965 | #!/usr/bin/python
#
# Copyright (C) 2011 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Script for testing ganeti.ht"""
import unittest
from ganeti import ht
import testutils
class TestTypeChecks(unittest.TestCase):
def testNone(self):
self.assertFalse(ht.TNotNone(None))
self.assertTrue(ht.TNone(None))
for val in [0, True, "", "Hello World", [], range(5)]:
self.assertTrue(ht.TNotNone(val))
self.assertFalse(ht.TNone(val))
def testBool(self):
self.assertTrue(ht.TBool(True))
self.assertTrue(ht.TBool(False))
for val in [0, None, "", [], "Hello"]:
self.assertFalse(ht.TBool(val))
for val in [True, -449, 1, 3, "x", "abc", [1, 2]]:
self.assertTrue(ht.TTrue(val))
for val in [False, 0, None, []]:
self.assertFalse(ht.TTrue(val))
def testInt(self):
for val in [-100, -3, 0, 16, 128, 923874]:
self.assertTrue(ht.TInt(val))
self.assertTrue(ht.TNumber(val))
for val in [False, True, None, "", [], "Hello", 0.0, 0.23, -3818.163]:
self.assertFalse(ht.TInt(val))
for val in range(0, 100, 4):
self.assertTrue(ht.TPositiveInt(val))
neg = -(val + 1)
self.assertFalse(ht.TPositiveInt(neg))
self.assertFalse(ht.TStrictPositiveInt(neg))
self.assertFalse(ht.TPositiveInt(0.1 + val))
self.assertFalse(ht.TStrictPositiveInt(0.1 + val))
for val in [0, 0.1, 0.9, -0.3]:
self.assertFalse(ht.TStrictPositiveInt(val))
for val in range(1, 100, 4):
self.assertTrue(ht.TStrictPositiveInt(val))
self.assertFalse(ht.TStrictPositiveInt(0.1 + val))
def testFloat(self):
for val in [-100.21, -3.0, 0.0, 16.12, 128.3433, 923874.928]:
self.assertTrue(ht.TFloat(val))
self.assertTrue(ht.TNumber(val))
for val in [False, True, None, "", [], "Hello", 0, 28, -1, -3281]:
self.assertFalse(ht.TFloat(val))
def testNumber(self):
for val in [-100, -3, 0, 16, 128, 923874,
-100.21, -3.0, 0.0, 16.12, 128.3433, 923874.928]:
self.assertTrue(ht.TNumber(val))
for val in [False, True, None, "", [], "Hello", "1"]:
self.assertFalse(ht.TNumber(val))
def testString(self):
for val in ["", "abc", "Hello World", "123",
u"", u"\u272C", u"abc"]:
self.assertTrue(ht.TString(val))
for val in [False, True, None, [], 0, 1, 5, -193, 93.8582]:
self.assertFalse(ht.TString(val))
def testElemOf(self):
fn = ht.TElemOf(range(10))
self.assertTrue(fn(0))
self.assertTrue(fn(3))
self.assertTrue(fn(9))
self.assertFalse(fn(-1))
self.assertFalse(fn(100))
fn = ht.TElemOf([])
self.assertFalse(fn(0))
self.assertFalse(fn(100))
self.assertFalse(fn(True))
fn = ht.TElemOf(["Hello", "World"])
self.assertTrue(fn("Hello"))
self.assertTrue(fn("World"))
self.assertFalse(fn("e"))
def testList(self):
for val in [[], range(10), ["Hello", "World", "!"]]:
self.assertTrue(ht.TList(val))
for val in [False, True, None, {}, 0, 1, 5, -193, 93.8582]:
self.assertFalse(ht.TList(val))
def testDict(self):
for val in [{}, dict.fromkeys(range(10)), {"Hello": [], "World": "!"}]:
self.assertTrue(ht.TDict(val))
for val in [False, True, None, [], 0, 1, 5, -193, 93.8582]:
self.assertFalse(ht.TDict(val))
def testIsLength(self):
fn = ht.TIsLength(10)
self.assertTrue(fn(range(10)))
self.assertFalse(fn(range(1)))
self.assertFalse(fn(range(100)))
def testAnd(self):
fn = ht.TAnd(ht.TNotNone, ht.TString)
self.assertTrue(fn(""))
self.assertFalse(fn(1))
self.assertFalse(fn(None))
def testOr(self):
fn = ht.TOr(ht.TNone, ht.TAnd(ht.TString, ht.TIsLength(5)))
self.assertTrue(fn("12345"))
self.assertTrue(fn(None))
self.assertFalse(fn(1))
self.assertFalse(fn(""))
self.assertFalse(fn("abc"))
def testMap(self):
self.assertTrue(ht.TMap(str, ht.TString)(123))
self.assertTrue(ht.TMap(int, ht.TInt)("9999"))
self.assertFalse(ht.TMap(lambda x: x + 100, ht.TString)(123))
def testNonEmptyString(self):
self.assertTrue(ht.TNonEmptyString("xyz"))
self.assertTrue(ht.TNonEmptyString("Hello World"))
self.assertFalse(ht.TNonEmptyString(""))
self.assertFalse(ht.TNonEmptyString(None))
self.assertFalse(ht.TNonEmptyString([]))
def testMaybeString(self):
self.assertTrue(ht.TMaybeString("xyz"))
self.assertTrue(ht.TMaybeString("Hello World"))
self.assertTrue(ht.TMaybeString(None))
self.assertFalse(ht.TMaybeString(""))
self.assertFalse(ht.TMaybeString([]))
def testMaybeBool(self):
self.assertTrue(ht.TMaybeBool(False))
self.assertTrue(ht.TMaybeBool(True))
self.assertTrue(ht.TMaybeBool(None))
self.assertFalse(ht.TMaybeBool([]))
self.assertFalse(ht.TMaybeBool("0"))
self.assertFalse(ht.TMaybeBool("False"))
def testListOf(self):
fn = ht.TListOf(ht.TNonEmptyString)
self.assertTrue(fn([]))
self.assertTrue(fn(["x"]))
self.assertTrue(fn(["Hello", "World"]))
self.assertFalse(fn(None))
self.assertFalse(fn(False))
self.assertFalse(fn(range(3)))
self.assertFalse(fn(["x", None]))
def testDictOf(self):
fn = ht.TDictOf(ht.TNonEmptyString, ht.TInt)
self.assertTrue(fn({}))
self.assertTrue(fn({"x": 123, "y": 999}))
self.assertFalse(fn(None))
self.assertFalse(fn({1: "x"}))
self.assertFalse(fn({"x": ""}))
self.assertFalse(fn({"x": None}))
self.assertFalse(fn({"": 8234}))
def testStrictDictRequireAllExclusive(self):
fn = ht.TStrictDict(True, True, { "a": ht.TInt, })
self.assertFalse(fn(1))
self.assertFalse(fn(None))
self.assertFalse(fn({}))
self.assertFalse(fn({"a": "Hello", }))
self.assertFalse(fn({"unknown": 999,}))
self.assertFalse(fn({"unknown": None,}))
self.assertTrue(fn({"a": 123, }))
self.assertTrue(fn({"a": -5, }))
fn = ht.TStrictDict(True, True, { "a": ht.TInt, "x": ht.TString, })
self.assertFalse(fn({}))
self.assertFalse(fn({"a": -5, }))
self.assertTrue(fn({"a": 123, "x": "", }))
self.assertFalse(fn({"a": 123, "x": None, }))
def testStrictDictExclusive(self):
fn = ht.TStrictDict(False, True, { "a": ht.TInt, "b": ht.TList, })
self.assertTrue(fn({}))
self.assertTrue(fn({"a": 123, }))
self.assertTrue(fn({"b": range(4), }))
self.assertFalse(fn({"b": 123, }))
self.assertFalse(fn({"foo": {}, }))
self.assertFalse(fn({"bar": object(), }))
def testStrictDictRequireAll(self):
fn = ht.TStrictDict(True, False, { "a": ht.TInt, "m": ht.TInt, })
self.assertTrue(fn({"a": 1, "m": 2, "bar": object(), }))
self.assertFalse(fn({}))
self.assertFalse(fn({"a": 1, "bar": object(), }))
self.assertFalse(fn({"a": 1, "m": [], "bar": object(), }))
def testStrictDict(self):
fn = ht.TStrictDict(False, False, { "a": ht.TInt, })
self.assertTrue(fn({}))
self.assertFalse(fn({"a": ""}))
self.assertTrue(fn({"a": 11}))
self.assertTrue(fn({"other": 11}))
self.assertTrue(fn({"other": object()}))
def testJobId(self):
for i in [0, 1, 4395, 2347625220]:
self.assertTrue(ht.TJobId(i))
self.assertTrue(ht.TJobId(str(i)))
self.assertFalse(ht.TJobId(-(i + 1)))
for i in ["", "-", ".", ",", "a", "99j", "job-123", "\t", " 83 ",
None, [], {}, object()]:
self.assertFalse(ht.TJobId(i))
def testRelativeJobId(self):
for i in [-1, -93, -4395]:
self.assertTrue(ht.TRelativeJobId(i))
self.assertFalse(ht.TRelativeJobId(str(i)))
for i in [0, 1, 2, 10, 9289, "", "0", "-1", "-999"]:
self.assertFalse(ht.TRelativeJobId(i))
self.assertFalse(ht.TRelativeJobId(str(i)))
def testItems(self):
self.assertRaises(AssertionError, ht.TItems, [])
fn = ht.TItems([ht.TString])
self.assertFalse(fn([0]))
self.assertFalse(fn([None]))
self.assertTrue(fn(["Hello"]))
self.assertTrue(fn(["Hello", "World"]))
self.assertTrue(fn(["Hello", 0, 1, 2, "anything"]))
fn = ht.TItems([ht.TAny, ht.TInt, ht.TAny])
self.assertTrue(fn(["Hello", 0, []]))
self.assertTrue(fn(["Hello", 893782]))
self.assertTrue(fn([{}, -938210858947, None]))
self.assertFalse(fn(["Hello", []]))
if __name__ == "__main__":
testutils.GanetiTestProgram()
| gpl-2.0 | -5,955,711,607,159,148,000 | 31.017857 | 75 | 0.61952 | false |
analyst-collective/dbt | test/unit/test_linker.py | 1 | 5807 | import os
import tempfile
import unittest
from unittest import mock
from dbt import compilation
try:
from queue import Empty
except ImportError:
from Queue import Empty
from dbt.graph.selector import NodeSelector
from dbt.graph.cli import parse_difference
def _mock_manifest(nodes):
config = mock.MagicMock(enabled=True)
manifest = mock.MagicMock(nodes={
n: mock.MagicMock(
unique_id=n,
package_name='pkg',
name=n,
empty=False,
config=config,
fqn=['pkg', n],
) for n in nodes
})
manifest.expect.side_effect = lambda n: mock.MagicMock(unique_id=n)
return manifest
class LinkerTest(unittest.TestCase):
def setUp(self):
self.linker = compilation.Linker()
def test_linker_add_node(self):
expected_nodes = ['A', 'B', 'C']
for node in expected_nodes:
self.linker.add_node(node)
actual_nodes = self.linker.nodes()
for node in expected_nodes:
self.assertIn(node, actual_nodes)
self.assertEqual(len(actual_nodes), len(expected_nodes))
def test_linker_write_graph(self):
expected_nodes = ['A', 'B', 'C']
for node in expected_nodes:
self.linker.add_node(node)
manifest = _mock_manifest('ABC')
(fd, fname) = tempfile.mkstemp()
os.close(fd)
try:
self.linker.write_graph(fname, manifest)
assert os.path.exists(fname)
finally:
os.unlink(fname)
def assert_would_join(self, queue):
"""test join() without timeout risk"""
self.assertEqual(queue.inner.unfinished_tasks, 0)
def _get_graph_queue(self, manifest, include=None, exclude=None):
graph = compilation.Graph(self.linker.graph)
selector = NodeSelector(graph, manifest)
spec = parse_difference(include, exclude)
return selector.get_graph_queue(spec)
def test_linker_add_dependency(self):
actual_deps = [('A', 'B'), ('A', 'C'), ('B', 'C')]
for (l, r) in actual_deps:
self.linker.dependency(l, r)
queue = self._get_graph_queue(_mock_manifest('ABC'))
got = queue.get(block=False)
self.assertEqual(got.unique_id, 'C')
with self.assertRaises(Empty):
queue.get(block=False)
self.assertFalse(queue.empty())
queue.mark_done('C')
self.assertFalse(queue.empty())
got = queue.get(block=False)
self.assertEqual(got.unique_id, 'B')
with self.assertRaises(Empty):
queue.get(block=False)
self.assertFalse(queue.empty())
queue.mark_done('B')
self.assertFalse(queue.empty())
got = queue.get(block=False)
self.assertEqual(got.unique_id, 'A')
with self.assertRaises(Empty):
queue.get(block=False)
self.assertTrue(queue.empty())
queue.mark_done('A')
self.assert_would_join(queue)
self.assertTrue(queue.empty())
def test_linker_add_disjoint_dependencies(self):
actual_deps = [('A', 'B')]
additional_node = 'Z'
for (l, r) in actual_deps:
self.linker.dependency(l, r)
self.linker.add_node(additional_node)
queue = self._get_graph_queue(_mock_manifest('ABCZ'))
# the first one we get must be B, it has the longest dep chain
first = queue.get(block=False)
self.assertEqual(first.unique_id, 'B')
self.assertFalse(queue.empty())
queue.mark_done('B')
self.assertFalse(queue.empty())
second = queue.get(block=False)
self.assertIn(second.unique_id, {'A', 'Z'})
self.assertFalse(queue.empty())
queue.mark_done(second.unique_id)
self.assertFalse(queue.empty())
third = queue.get(block=False)
self.assertIn(third.unique_id, {'A', 'Z'})
with self.assertRaises(Empty):
queue.get(block=False)
self.assertNotEqual(second.unique_id, third.unique_id)
self.assertTrue(queue.empty())
queue.mark_done(third.unique_id)
self.assert_would_join(queue)
self.assertTrue(queue.empty())
def test_linker_dependencies_limited_to_some_nodes(self):
actual_deps = [('A', 'B'), ('B', 'C'), ('C', 'D')]
for (l, r) in actual_deps:
self.linker.dependency(l, r)
queue = self._get_graph_queue(_mock_manifest('ABCD'), ['B'])
got = queue.get(block=False)
self.assertEqual(got.unique_id, 'B')
self.assertTrue(queue.empty())
queue.mark_done('B')
self.assert_would_join(queue)
queue_2 = queue = self._get_graph_queue(_mock_manifest('ABCD'), ['A', 'B'])
got = queue_2.get(block=False)
self.assertEqual(got.unique_id, 'B')
self.assertFalse(queue_2.empty())
with self.assertRaises(Empty):
queue_2.get(block=False)
queue_2.mark_done('B')
self.assertFalse(queue_2.empty())
got = queue_2.get(block=False)
self.assertEqual(got.unique_id, 'A')
self.assertTrue(queue_2.empty())
with self.assertRaises(Empty):
queue_2.get(block=False)
self.assertTrue(queue_2.empty())
queue_2.mark_done('A')
self.assert_would_join(queue_2)
def test__find_cycles__cycles(self):
actual_deps = [('A', 'B'), ('B', 'C'), ('C', 'A')]
for (l, r) in actual_deps:
self.linker.dependency(l, r)
self.assertIsNotNone(self.linker.find_cycles())
def test__find_cycles__no_cycles(self):
actual_deps = [('A', 'B'), ('B', 'C'), ('C', 'D')]
for (l, r) in actual_deps:
self.linker.dependency(l, r)
self.assertIsNone(self.linker.find_cycles())
| apache-2.0 | 4,797,753,715,547,958,000 | 30.906593 | 83 | 0.585845 | false |
dgdell/enigma2 | lib/python/Plugins/SystemPlugins/SoftwareManager/SoftwareTools.py | 1 | 9326 | # -*- coding: iso-8859-1 -*-
from enigma import eConsoleAppContainer
from Components.Console import Console
from Components.About import about
from Components.PackageInfo import PackageInfoHandler
from Components.Language import language
from Components.Sources.List import List
from Components.Ipkg import IpkgComponent
from Components.Network import iNetwork
from Tools.Directories import pathExists, fileExists, resolveFilename, SCOPE_METADIR
from Tools.HardwareInfo import HardwareInfo
from time import time
class SoftwareTools(PackageInfoHandler):
lastDownloadDate = None
NetworkConnectionAvailable = None
list_updating = False
available_updates = 0
available_updatelist = []
available_packetlist = []
installed_packetlist = {}
def __init__(self):
aboutInfo = about.getImageVersionString()
if aboutInfo.startswith("dev-"):
self.ImageVersion = 'Experimental'
else:
self.ImageVersion = 'Stable'
self.language = language.getLanguage()[:2] # getLanguage returns e.g. "fi_FI" for "language_country"
PackageInfoHandler.__init__(self, self.statusCallback, neededTag = 'ALL_TAGS', neededFlag = self.ImageVersion)
self.directory = resolveFilename(SCOPE_METADIR)
self.list = List([])
self.NotifierCallback = None
self.Console = Console()
self.UpdateConsole = Console()
self.cmdList = []
self.unwanted_extensions = ('-dbg', '-dev', '-doc', '-staticdev', '-src')
self.ipkg = IpkgComponent()
self.ipkg.addCallback(self.ipkgCallback)
def statusCallback(self, status, progress):
pass
def startSoftwareTools(self, callback = None):
if callback is not None:
self.NotifierCallback = callback
iNetwork.checkNetworkState(self.checkNetworkCB)
def checkNetworkCB(self,data):
if data is not None:
if data <= 2:
self.NetworkConnectionAvailable = True
self.getUpdates()
else:
self.NetworkConnectionAvailable = False
self.getUpdates()
def getUpdates(self, callback = None):
if self.lastDownloadDate is None:
if self.NetworkConnectionAvailable == True:
self.lastDownloadDate = time()
if self.list_updating is False and callback is None:
self.list_updating = True
self.ipkg.startCmd(IpkgComponent.CMD_UPDATE)
elif self.list_updating is False and callback is not None:
self.list_updating = True
self.NotifierCallback = callback
self.ipkg.startCmd(IpkgComponent.CMD_UPDATE)
elif self.list_updating is True and callback is not None:
self.NotifierCallback = callback
else:
self.list_updating = False
if callback is not None:
callback(False)
elif self.NotifierCallback is not None:
self.NotifierCallback(False)
else:
if self.NetworkConnectionAvailable == True:
self.lastDownloadDate = time()
if self.list_updating is False and callback is None:
self.list_updating = True
self.ipkg.startCmd(IpkgComponent.CMD_UPDATE)
elif self.list_updating is False and callback is not None:
self.list_updating = True
self.NotifierCallback = callback
self.ipkg.startCmd(IpkgComponent.CMD_UPDATE)
elif self.list_updating is True and callback is not None:
self.NotifierCallback = callback
else:
if self.list_updating and callback is not None:
self.NotifierCallback = callback
self.startIpkgListAvailable()
else:
self.list_updating = False
if callback is not None:
callback(False)
elif self.NotifierCallback is not None:
self.NotifierCallback(False)
def ipkgCallback(self, event, param):
if event == IpkgComponent.EVENT_ERROR:
self.list_updating = False
if self.NotifierCallback is not None:
self.NotifierCallback(False)
elif event == IpkgComponent.EVENT_DONE:
if self.list_updating:
self.startIpkgListAvailable()
pass
def startIpkgListAvailable(self, callback = None):
if callback is not None:
self.list_updating = True
if self.list_updating:
if not self.UpdateConsole:
self.UpdateConsole = Console()
cmd = self.ipkg.ipkg + " list"
self.UpdateConsole.ePopen(cmd, self.IpkgListAvailableCB, callback)
def IpkgListAvailableCB(self, result, retval, extra_args = None):
(callback) = extra_args
if result:
if self.list_updating:
self.available_packetlist = []
for x in result.splitlines():
tokens = x.split(' - ')
name = tokens[0].strip()
if not any(name.endswith(x) for x in self.unwanted_extensions):
l = len(tokens)
version = l > 1 and tokens[1].strip() or ""
descr = l > 2 and tokens[2].strip() or ""
self.available_packetlist.append([name, version, descr])
if callback is None:
self.startInstallMetaPackage()
else:
if self.UpdateConsole:
if len(self.UpdateConsole.appContainers) == 0:
callback(True)
else:
self.list_updating = False
if self.UpdateConsole:
if len(self.UpdateConsole.appContainers) == 0:
if callback is not None:
callback(False)
def startInstallMetaPackage(self, callback = None):
if callback is not None:
self.list_updating = True
if self.list_updating:
if self.NetworkConnectionAvailable == True:
if not self.UpdateConsole:
self.UpdateConsole = Console()
cmd = self.ipkg.ipkg + " install enigma2-meta enigma2-plugins-meta enigma2-skins-meta"
self.UpdateConsole.ePopen(cmd, self.InstallMetaPackageCB, callback)
else:
self.InstallMetaPackageCB(True)
def InstallMetaPackageCB(self, result, retval = None, extra_args = None):
(callback) = extra_args
if result:
self.fillPackagesIndexList()
if callback is None:
self.startIpkgListInstalled()
else:
if self.UpdateConsole:
if len(self.UpdateConsole.appContainers) == 0:
callback(True)
else:
self.list_updating = False
if self.UpdateConsole:
if len(self.UpdateConsole.appContainers) == 0:
if callback is not None:
callback(False)
def startIpkgListInstalled(self, callback = None):
if callback is not None:
self.list_updating = True
if self.list_updating:
if not self.UpdateConsole:
self.UpdateConsole = Console()
cmd = self.ipkg.ipkg + " list_installed"
self.UpdateConsole.ePopen(cmd, self.IpkgListInstalledCB, callback)
def IpkgListInstalledCB(self, result, retval, extra_args = None):
(callback) = extra_args
if result:
self.installed_packetlist = {}
for x in result.splitlines():
tokens = x.split(' - ')
name = tokens[0].strip()
if not any(name.endswith(x) for x in self.unwanted_extensions):
l = len(tokens)
version = l > 1 and tokens[1].strip() or ""
self.installed_packetlist[name] = version
for package in self.packagesIndexlist[:]:
if not self.verifyPrerequisites(package[0]["prerequisites"]):
self.packagesIndexlist.remove(package)
for package in self.packagesIndexlist[:]:
attributes = package[0]["attributes"]
if attributes.has_key("packagetype"):
if attributes["packagetype"] == "internal":
self.packagesIndexlist.remove(package)
if callback is None:
self.countUpdates()
else:
if self.UpdateConsole:
if len(self.UpdateConsole.appContainers) == 0:
callback(True)
else:
self.list_updating = False
if self.UpdateConsole:
if len(self.UpdateConsole.appContainers) == 0:
if callback is not None:
callback(False)
def countUpdates(self, callback = None):
self.available_updates = 0
self.available_updatelist = []
for package in self.packagesIndexlist[:]:
attributes = package[0]["attributes"]
packagename = attributes["packagename"]
for x in self.available_packetlist:
if x[0] == packagename:
if self.installed_packetlist.has_key(packagename):
if self.installed_packetlist[packagename] != x[1]:
self.available_updates +=1
self.available_updatelist.append([packagename])
self.list_updating = False
if self.UpdateConsole:
if len(self.UpdateConsole.appContainers) == 0:
if callback is not None:
callback(True)
callback = None
elif self.NotifierCallback is not None:
self.NotifierCallback(True)
self.NotifierCallback = None
def startIpkgUpdate(self, callback = None):
if not self.Console:
self.Console = Console()
cmd = self.ipkg.ipkg + " update"
self.Console.ePopen(cmd, self.IpkgUpdateCB, callback)
def IpkgUpdateCB(self, result, retval, extra_args = None):
(callback) = extra_args
if result:
if self.Console:
if len(self.Console.appContainers) == 0:
if callback is not None:
callback(True)
callback = None
def cleanupSoftwareTools(self):
self.list_updating = False
if self.NotifierCallback is not None:
self.NotifierCallback = None
self.ipkg.stop()
if self.Console is not None:
if len(self.Console.appContainers):
for name in self.Console.appContainers.keys():
self.Console.kill(name)
if self.UpdateConsole is not None:
if len(self.UpdateConsole.appContainers):
for name in self.UpdateConsole.appContainers.keys():
self.UpdateConsole.kill(name)
def verifyPrerequisites(self, prerequisites):
if prerequisites.has_key("hardware"):
hardware_found = False
for hardware in prerequisites["hardware"]:
if hardware == HardwareInfo().device_name:
hardware_found = True
if not hardware_found:
return False
return True
iSoftwareTools = SoftwareTools()
| gpl-2.0 | 2,123,058,099,230,987,800 | 32.426523 | 112 | 0.710165 | false |
questrail/pycan | tests/test_kvaser.py | 1 | 3371 | # -*- coding: utf-8 -*-
# Copyright (c) 2013 The pycan developers. All rights reserved.
# Project site: https://github.com/questrail/pycan
# Use of this source code is governed by a MIT-style license that
# can be found in the LICENSE.txt file for the project.
import os
import time
import threading
import unittest
import ConfigParser
import pycan.drivers.kvaser as driver
from pycan.common import CANMessage
class KvaserTests(unittest.TestCase):
def tearDown(self):
try:
self.driver.bus_off()
self.driver.shutdown()
time.sleep(2)
except:
pass
def __load_test_config(self):
test_path = os.path.dirname(os.path.abspath(__file__))
config = ConfigParser.ConfigParser()
config.read(os.path.join(test_path, 'test.cfg'))
self.known_can_id = int(config.get('COMMON', 'Known_ID_On_Bus'), 16)
def testPEP8Compliance(self):
# Ensure PEP8 is installed
try:
import pep8
except ImportError:
self.fail(msg="PEP8 not installed.")
# Check the CAN driver
driver_path = os.path.dirname(driver.__file__)
driver_file = os.path.abspath(os.path.join(driver_path, 'kvaser.py'))
pep8_checker = pep8.Checker(driver_file)
violation_count = pep8_checker.check_all()
error_message = "PEP8 violations found: %d" % (violation_count)
self.assertTrue(violation_count == 0, msg = error_message)
def testDriver(self):
# Load the real time test configuration
self.__load_test_config()
# Setup the driver
self.driver = driver.Kvaser()
# Run the driver specific tests if and only if the driver was setup
self.Transmit()
self.Receive()
self.SpecificReceive()
def Transmit(self):
# Note you must also check that the CAN message is being placed
# on the wire at 100ms intervals
messages_to_send = 50
msg1 = CANMessage(0x123456, [1,2,3])
for x in range(messages_to_send):
time.sleep(0.1)
msg = "Failed to send message {x}".format(x=x)
self.assertTrue(self.driver.send(msg1), msg)
self.assertEqual(self.driver.life_time_sent(), messages_to_send)
def Receive(self):
messages_to_receive = 25
# Check that the life time received hasn't been updated yet
self.assertEqual(self.driver.life_time_received(), 0)
# Read back a fixed number of messages and check that the lifetime
# values track the next_message call
read_messages = 0
for x in range(messages_to_receive):
if self.driver.next_message():
self.assertEqual((x+1), self.driver.life_time_received())
def SpecificReceive(self):
messages_to_receive = 10
actual_messaged_received = 0
max_specific_attempts = 1000
# Keep reading from the bus until we find the required messages
read_messages = 0
for x in range(max_specific_attempts):
msg = self.driver.next_message()
if msg.id == self.known_can_id:
actual_messaged_received += 1
if actual_messaged_received == messages_to_receive:
break;
self.assertEqual(actual_messaged_received, messages_to_receive)
| mit | -3,411,936,796,912,395,300 | 33.050505 | 77 | 0.621181 | false |
awni/tensorflow | tensorflow/python/training/training_ops.py | 1 | 7523 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrappers for training ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.training import gen_training_ops
# pylint: disable=wildcard-import
from tensorflow.python.training.gen_training_ops import *
# pylint: enable=wildcard-import
# Shape functions for fused training ops
# --------------------------------------
#
# The fused training ops all have the same basic structure: they take
# one or more variables with the same shape, and emit a reference to
# the original variable (which has the same shape as the first
# input). In addition, they take one or more scalar tensors containing
# hyperparameters.
#
# The sparse ops take the gradients as a Python IndexedSlices, which
# means that the indices are a vector of length N, and the gradient
# values are a tensor whose size is the same as the original variable,
# except for the 0th dimension, which has size N.
def _AssertInputIsScalar(op, index):
"""Raises ValueError if `op.inputs[index]` is not scalar."""
op.inputs[index].get_shape().assert_is_compatible_with(tensor_shape.scalar())
@ops.RegisterShape("ApplyAdadelta")
def _ApplyAdadeltaShape(op):
"""Shape function for the ApplyAdadelta op."""
var_shape = op.inputs[0].get_shape()
accum_shape = op.inputs[1].get_shape().merge_with(var_shape)
accum_update_shape = op.inputs[2].get_shape().merge_with(var_shape)
_AssertInputIsScalar(op, 3) # lr
_AssertInputIsScalar(op, 4) # rho
_AssertInputIsScalar(op, 5) # epsilon
grad_shape = op.inputs[6].get_shape().merge_with(accum_shape)
return [grad_shape]
@ops.RegisterShape("ApplyAdagrad")
def _ApplyAdagradShape(op):
"""Shape function for the ApplyAdagrad op."""
var_shape = op.inputs[0].get_shape()
accum_shape = op.inputs[1].get_shape().merge_with(var_shape)
_AssertInputIsScalar(op, 2) # lr
grad_shape = op.inputs[3].get_shape().merge_with(accum_shape)
return [grad_shape]
@ops.RegisterShape("ApplyFtrl")
def _ApplyFtrlShape(op):
"""Shape function for the ApplyFtrlOp op."""
var_shape = op.inputs[0].get_shape()
accum_shape = op.inputs[1].get_shape().merge_with(var_shape)
linear_shape = op.inputs[2].get_shape().merge_with(accum_shape)
grad_shape = op.inputs[3].get_shape().merge_with(linear_shape)
_AssertInputIsScalar(op, 4) # lr
_AssertInputIsScalar(op, 5) # l1
_AssertInputIsScalar(op, 6) # l2
_AssertInputIsScalar(op, 7) # lr_power
return [grad_shape]
@ops.RegisterShape("ApplyAdam")
def _ApplyAdamShape(op):
"""Shape function for the ApplyAdam op."""
var_shape = op.inputs[0].get_shape()
m_shape = op.inputs[1].get_shape().merge_with(var_shape)
v_shape = op.inputs[2].get_shape().merge_with(m_shape)
_AssertInputIsScalar(op, 3) # beta1_power
_AssertInputIsScalar(op, 4) # beta2_power
_AssertInputIsScalar(op, 5) # lr
_AssertInputIsScalar(op, 6) # beta1
_AssertInputIsScalar(op, 7) # beta2
_AssertInputIsScalar(op, 8) # epsilon
grad_shape = op.inputs[9].get_shape().merge_with(v_shape)
return [grad_shape]
@ops.RegisterShape("ApplyMomentum")
def _ApplyMomentumShape(op):
"""Shape function for the ApplyMomentum op."""
var_shape = op.inputs[0].get_shape()
accum_shape = op.inputs[1].get_shape().merge_with(var_shape)
_AssertInputIsScalar(op, 2) # lr
grad_shape = op.inputs[3].get_shape().merge_with(accum_shape)
_AssertInputIsScalar(op, 4) # momentum
return [grad_shape]
@ops.RegisterShape("ApplyRMSProp")
def _ApplyRMSPropShape(op):
"""Shape function for the ApplyRMSProp op."""
var_shape = op.inputs[0].get_shape()
ms_shape = op.inputs[1].get_shape().merge_with(var_shape)
mom_shape = op.inputs[2].get_shape().merge_with(ms_shape)
_AssertInputIsScalar(op, 3) # lr
_AssertInputIsScalar(op, 4) # rho
_AssertInputIsScalar(op, 5) # momentum
_AssertInputIsScalar(op, 6) # epsilon
grad_shape = op.inputs[7].get_shape().merge_with(mom_shape)
return [grad_shape]
@ops.RegisterShape("ApplyGradientDescent")
def _ApplyGradientDescentShape(op):
"""Shape function for the ApplyGradientDescent op."""
var_shape = op.inputs[0].get_shape()
_AssertInputIsScalar(op, 1) # alpha
delta_shape = op.inputs[2].get_shape().merge_with(var_shape)
return [delta_shape]
@ops.RegisterShape("SparseApplyAdadelta")
def _SparseApplyAdadeltaShape(op):
"""Shape function for the SparseApplyAdadelta op."""
var_shape = op.inputs[0].get_shape()
accum_grad_shape = op.inputs[1].get_shape().merge_with(var_shape)
accum_update_shape = op.inputs[2].get_shape().merge_with(accum_grad_shape)
_AssertInputIsScalar(op, 3) # lr
_AssertInputIsScalar(op, 4) # decay_rate
_AssertInputIsScalar(op, 5) # epsilon
grad_shape = op.inputs[6].get_shape().merge_with(
tensor_shape.TensorShape([None]).concatenate(accum_update_shape[1:]))
unused_indices_shape = op.inputs[7].get_shape().merge_with(
tensor_shape.vector(grad_shape[0]))
return [accum_update_shape]
@ops.RegisterShape("SparseApplyAdagrad")
def _SparseApplyAdagradShape(op):
"""Shape function for the SparseApplyAdagrad op."""
var_shape = op.inputs[0].get_shape()
accum_shape = op.inputs[1].get_shape().merge_with(var_shape)
_AssertInputIsScalar(op, 2) # lr
grad_shape = op.inputs[3].get_shape().merge_with(
tensor_shape.TensorShape([None]).concatenate(accum_shape[1:]))
unused_indices_shape = op.inputs[4].get_shape().merge_with(
tensor_shape.vector(grad_shape[0]))
return [accum_shape]
@ops.RegisterShape("SparseApplyFtrl")
def _SparseApplyFtrlShape(op):
"""Shape function for the SparseApplyFtrl op."""
var_shape = op.inputs[0].get_shape()
accum_shape = op.inputs[1].get_shape().merge_with(var_shape)
linear_shape = op.inputs[2].get_shape().merge_with(accum_shape)
grad_shape = op.inputs[3].get_shape().merge_with(
tensor_shape.TensorShape([None]).concatenate(linear_shape[1:]))
unused_indices_shape = op.inputs[4].get_shape().merge_with(
tensor_shape.vector(grad_shape[0]))
_AssertInputIsScalar(op, 5) # lr
_AssertInputIsScalar(op, 6) # l1
_AssertInputIsScalar(op, 7) # l2
_AssertInputIsScalar(op, 8) # lr_power
return [linear_shape]
@ops.RegisterShape("SparseApplyMomentum")
def _SparseApplyMomentumShape(op):
"""Shape function for the SparseApplyMomentum op."""
var_shape = op.inputs[0].get_shape()
accum_shape = op.inputs[1].get_shape().merge_with(var_shape)
_AssertInputIsScalar(op, 2) # lr
grad_shape = op.inputs[3].get_shape().merge_with(
tensor_shape.TensorShape([None]).concatenate(accum_shape[1:]))
unused_indices_shape = op.inputs[4].get_shape().merge_with(
tensor_shape.vector(grad_shape[0]))
_AssertInputIsScalar(op, 5) # momentum
return [accum_shape]
| apache-2.0 | 2,950,394,919,528,449,000 | 38.387435 | 80 | 0.705968 | false |
tawiesn/dialog_exercise | ex_unittest.py | 1 | 5130 | #!/usr/bin/python3
# -*- coding:utf-8 -*-
__author__ = "Tobias Wiesner"
__license__ = "GPL 3.0"
__maintainer__ = "Tobias Wiesner"
__email__ = "[email protected]"
import sys
import unittest
import itertools
from bitstring import BitArray
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5.QtTest import QTest
from ex_gui import ExerciseWindow,MyRegisterModel
app = QApplication(sys.argv)
class EightBitDemoDevice:
""" Hardware interface layer for 8bit demo device """
def subset_sum(self, numbers, target, partial=[],result=[]):
""" Recursive helper function to collect all possible summands """
s = sum(partial)
# check if the partial sum is equals to target
if s == target:
result.append(partial)
if s >= target:
return # if we reach the number why bother to continue
for i in range(len(numbers)):
n = numbers[i]
remaining = numbers[i+1:]
self.subset_sum(remaining, target, partial + [n],result)
def generate_bitfields(self):
""" helper function which generates all possible lists of sums of 8
with summands in increasing ordering. For each list of summands
we generate all permutations and store them in one big list of
lists
"""
res = []
# build all subset sums (with summands of increasing size)
self.subset_sum([1,1,1,1,1,1,1,1,2,2,2,2,3,3,4,4,5,6,7,8],8,[],res)
# permute all subset sums (to have all cases with random ordering of summands)
final = []
for r in res:
perm = itertools.permutations(r)
uniq_res = [list(t) for t in set(map(tuple, list(perm)))]
for s in uniq_res:
final.append(s)
uniq_final = [list(t) for t in set(map(tuple, final))]
return uniq_final
def generate_register(self, regAddress, bitfieldWidths):
""" helper function generates a register entry for a given bitfield array """
register = []
register.append("reg {0}".format(regAddress))
register.append(BitArray(int=regAddress, length=16))
bitfields = []
b = 0
for i in bitfieldWidths:
bf = []
bf.append("bit {0}-{1}".format(b,b+i-1))
bf.append(b)
bf.append(i)
b = b + i
bitfields.append(bf)
register.append(bitfields)
register.append(BitArray(int=0, length=8))
return register
def build_8bit_demo_device(self):
""" build demo device containing all combination of GUI elements for 8 bit data """
# generate all bitfield combinations
bitfieldcombinations = self.generate_bitfields()
# determine number of registers (1 per bitfield variation)
numRegisters = len(bitfieldcombinations)
# collect all registers
deviceregisters = []
for i in range(0,numRegisters):
reg = self.generate_register(i,bitfieldcombinations[i])
deviceregisters.append(reg)
self.my_data = deviceregisters
def loadData(self):
# generate demo device and return it
self.build_8bit_demo_device()
return self.my_data
def storeData(self, data):
self.my_data = data
for i in range(0,len(self.my_data)):
print("Register 0x{0: <4}: {1: <7} = 0b{2}".format(BitArray(int=i, length=16).hex,self.my_data[i][0],self.my_data[i][3].bin))
class DefectDeviceA:
def loadData(self):
data = [[42, BitArray(int = 1, length=16), # integer as a name
[
["bit 0", 0, 1],
["bits 1-7", 1, 7]
],
BitArray('0b00000000')
]]
return data
def storeData(self, data):
print ("")
class DefectDeviceB:
def loadData(self):
data = [["defect", 66666, # wrong address
[
["bit 0", 0, 1],
["bits 1-7", 1, 7]
],
BitArray('0b00000000')
]]
return data
def storeData(self, data):
print ("")
class DefectDeviceC:
def loadData(self):
data = [["defect", BitArray(int = 1, length=16),
[
["bit 0", 0, 1],
["bits 1-7", 1, 22] # wrong bitfield width
],
BitArray('0b00000000')
]]
return data
def storeData(self, data):
print ("")
class DefectDeviceD:
def loadData(self):
data = [["defect", BitArray(int = 1, length=16),
[
["bit 0", 0, 1],
["bits 1-7", 1, 7]
],
"DEFECT" # not a BitArray
]]
return data
def storeData(self, data):
print ("")
class ExerciseTest(unittest.TestCase):
""" Unit test for Exercise GUI """
def setUp(self):
""" Create the GUI """
self.form = ExerciseWindow()
def test_defaults(self):
""" Test GUI """
demodevice = EightBitDemoDevice()
self.model = MyRegisterModel(demodevice)
self.form.setModel(self.model)
self.assertEqual(self.form.testMe(), True)
def test_defectA(self):
""" Test Defect devices """
for demodevice in {DefectDeviceA(),DefectDeviceB(),DefectDeviceC(),DefectDeviceD()}:
self.model = MyRegisterModel(demodevice)
self.assertRaises(TypeError, self.form.testMe, self.model)
if __name__ == "__main__":
unittest.main()
| gpl-3.0 | 6,534,881,525,604,608,000 | 27.032787 | 131 | 0.609552 | false |
giorgiop/scikit-learn | sklearn/utils/fixes.py | 2 | 13212 | """Compatibility fixes for older version of python, numpy and scipy
If you add content to this file, please give the version of the package
at which the fixe is no longer needed.
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Lars Buitinck
#
# License: BSD 3 clause
import warnings
import sys
import functools
import os
import errno
import numpy as np
import scipy.sparse as sp
import scipy
try:
from inspect import signature
except ImportError:
from ..externals.funcsigs import signature
def _parse_version(version_string):
version = []
for x in version_string.split('.'):
try:
version.append(int(x))
except ValueError:
# x may be of the form dev-1ea1592
version.append(x)
return tuple(version)
np_version = _parse_version(np.__version__)
sp_version = _parse_version(scipy.__version__)
try:
from scipy.special import expit # SciPy >= 0.10
with np.errstate(invalid='ignore', over='ignore'):
if np.isnan(expit(1000)): # SciPy < 0.14
raise ImportError("no stable expit in scipy.special")
except ImportError:
def expit(x, out=None):
"""Logistic sigmoid function, ``1 / (1 + exp(-x))``.
See sklearn.utils.extmath.log_logistic for the log of this function.
"""
if out is None:
out = np.empty(np.atleast_1d(x).shape, dtype=np.float64)
out[:] = x
# 1 / (1 + exp(-x)) = (1 + tanh(x / 2)) / 2
# This way of computing the logistic is both fast and stable.
out *= .5
np.tanh(out, out)
out += 1
out *= .5
return out.reshape(np.shape(x))
# little danse to see if np.copy has an 'order' keyword argument
# Supported since numpy 1.7.0
if 'order' in signature(np.copy).parameters:
def safe_copy(X):
# Copy, but keep the order
return np.copy(X, order='K')
else:
# Before an 'order' argument was introduced, numpy wouldn't muck with
# the ordering
safe_copy = np.copy
try:
if (not np.allclose(np.divide(.4, 1, casting="unsafe"),
np.divide(.4, 1, casting="unsafe", dtype=np.float64))
or not np.allclose(np.divide(.4, 1), .4)):
raise TypeError('Divide not working with dtype: '
'https://github.com/numpy/numpy/issues/3484')
divide = np.divide
except TypeError:
# Compat for old versions of np.divide that do not provide support for
# the dtype args
def divide(x1, x2, out=None, dtype=None):
out_orig = out
if out is None:
out = np.asarray(x1, dtype=dtype)
if out is x1:
out = x1.copy()
else:
if out is not x1:
out[:] = x1
if dtype is not None and out.dtype != dtype:
out = out.astype(dtype)
out /= x2
if out_orig is None and np.isscalar(x1):
out = np.asscalar(out)
return out
try:
np.array(5).astype(float, copy=False)
except TypeError:
# Compat where astype accepted no copy argument (numpy < 1.7.0)
def astype(array, dtype, copy=True):
if not copy and array.dtype == dtype:
return array
return array.astype(dtype)
else:
astype = np.ndarray.astype
try:
with warnings.catch_warnings(record=True):
# Don't raise the numpy deprecation warnings that appear in
# 1.9, but avoid Python bug due to simplefilter('ignore')
warnings.simplefilter('always')
sp.csr_matrix([1.0, 2.0, 3.0]).max(axis=0)
except (TypeError, AttributeError):
# in scipy < 14.0, sparse matrix min/max doesn't accept an `axis` argument
# the following code is taken from the scipy 0.14 codebase
def _minor_reduce(X, ufunc):
major_index = np.flatnonzero(np.diff(X.indptr))
if X.data.size == 0 and major_index.size == 0:
# Numpy < 1.8.0 don't handle empty arrays in reduceat
value = np.zeros_like(X.data)
else:
value = ufunc.reduceat(X.data, X.indptr[major_index])
return major_index, value
def _min_or_max_axis(X, axis, min_or_max):
N = X.shape[axis]
if N == 0:
raise ValueError("zero-size array to reduction operation")
M = X.shape[1 - axis]
mat = X.tocsc() if axis == 0 else X.tocsr()
mat.sum_duplicates()
major_index, value = _minor_reduce(mat, min_or_max)
not_full = np.diff(mat.indptr)[major_index] < N
value[not_full] = min_or_max(value[not_full], 0)
mask = value != 0
major_index = np.compress(mask, major_index)
value = np.compress(mask, value)
from scipy.sparse import coo_matrix
if axis == 0:
res = coo_matrix((value, (np.zeros(len(value)), major_index)),
dtype=X.dtype, shape=(1, M))
else:
res = coo_matrix((value, (major_index, np.zeros(len(value)))),
dtype=X.dtype, shape=(M, 1))
return res.A.ravel()
def _sparse_min_or_max(X, axis, min_or_max):
if axis is None:
if 0 in X.shape:
raise ValueError("zero-size array to reduction operation")
zero = X.dtype.type(0)
if X.nnz == 0:
return zero
m = min_or_max.reduce(X.data.ravel())
if X.nnz != np.product(X.shape):
m = min_or_max(zero, m)
return m
if axis < 0:
axis += 2
if (axis == 0) or (axis == 1):
return _min_or_max_axis(X, axis, min_or_max)
else:
raise ValueError("invalid axis, use 0 for rows, or 1 for columns")
def sparse_min_max(X, axis):
return (_sparse_min_or_max(X, axis, np.minimum),
_sparse_min_or_max(X, axis, np.maximum))
else:
def sparse_min_max(X, axis):
return (X.min(axis=axis).toarray().ravel(),
X.max(axis=axis).toarray().ravel())
try:
from numpy import argpartition
except ImportError:
# numpy.argpartition was introduced in v 1.8.0
def argpartition(a, kth, axis=-1, kind='introselect', order=None):
return np.argsort(a, axis=axis, order=order)
try:
from numpy import partition
except ImportError:
warnings.warn('Using `sort` instead of partition.'
'Upgrade numpy to 1.8 for better performace on large number'
'of clusters')
def partition(a, kth, axis=-1, kind='introselect', order=None):
return np.sort(a, axis=axis, order=order)
try:
from itertools import combinations_with_replacement
except ImportError:
# Backport of itertools.combinations_with_replacement for Python 2.6,
# from Python 3.4 documentation (http://tinyurl.com/comb-w-r), copyright
# Python Software Foundation (https://docs.python.org/3/license.html)
def combinations_with_replacement(iterable, r):
# combinations_with_replacement('ABC', 2) --> AA AB AC BB BC CC
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
if np_version < (1, 7):
# Prior to 1.7.0, np.frombuffer wouldn't work for empty first arg.
def frombuffer_empty(buf, dtype):
if len(buf) == 0:
return np.empty(0, dtype=dtype)
else:
return np.frombuffer(buf, dtype=dtype)
else:
frombuffer_empty = np.frombuffer
if np_version < (1, 8):
def in1d(ar1, ar2, assume_unique=False, invert=False):
# Backport of numpy function in1d 1.8.1 to support numpy 1.6.2
# Ravel both arrays, behavior for the first array could be different
ar1 = np.asarray(ar1).ravel()
ar2 = np.asarray(ar2).ravel()
# This code is significantly faster when the condition is satisfied.
if len(ar2) < 10 * len(ar1) ** 0.145:
if invert:
mask = np.ones(len(ar1), dtype=np.bool)
for a in ar2:
mask &= (ar1 != a)
else:
mask = np.zeros(len(ar1), dtype=np.bool)
for a in ar2:
mask |= (ar1 == a)
return mask
# Otherwise use sorting
if not assume_unique:
ar1, rev_idx = np.unique(ar1, return_inverse=True)
ar2 = np.unique(ar2)
ar = np.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
if invert:
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
flag = np.concatenate((bool_ar, [invert]))
indx = order.argsort(kind='mergesort')[:len(ar1)]
if assume_unique:
return flag[indx]
else:
return flag[indx][rev_idx]
else:
from numpy import in1d
if sp_version < (0, 15):
# Backport fix for scikit-learn/scikit-learn#2986 / scipy/scipy#4142
from ._scipy_sparse_lsqr_backport import lsqr as sparse_lsqr
else:
from scipy.sparse.linalg import lsqr as sparse_lsqr
if sys.version_info < (2, 7, 0):
# partial cannot be pickled in Python 2.6
# http://bugs.python.org/issue1398
class partial(object):
def __init__(self, func, *args, **keywords):
functools.update_wrapper(self, func)
self.func = func
self.args = args
self.keywords = keywords
def __call__(self, *args, **keywords):
args = self.args + args
kwargs = self.keywords.copy()
kwargs.update(keywords)
return self.func(*args, **kwargs)
else:
from functools import partial
def parallel_helper(obj, methodname, *args, **kwargs):
"""Helper to workaround Python 2 limitations of pickling instance methods"""
return getattr(obj, methodname)(*args, **kwargs)
if np_version < (1, 6, 2):
# Allow bincount to accept empty arrays
# https://github.com/numpy/numpy/commit/40f0844846a9d7665616b142407a3d74cb65a040
def bincount(x, weights=None, minlength=None):
if len(x) > 0:
return np.bincount(x, weights, minlength)
else:
if minlength is None:
minlength = 0
minlength = np.asscalar(np.asarray(minlength, dtype=np.intp))
return np.zeros(minlength, dtype=np.intp)
else:
from numpy import bincount
if 'exist_ok' in signature(os.makedirs).parameters:
makedirs = os.makedirs
else:
def makedirs(name, mode=0o777, exist_ok=False):
"""makedirs(name [, mode=0o777][, exist_ok=False])
Super-mkdir; create a leaf directory and all intermediate ones. Works
like mkdir, except that any intermediate path segment (not just the
rightmost) will be created if it does not exist. If the target
directory already exists, raise an OSError if exist_ok is False.
Otherwise no exception is raised. This is recursive.
"""
try:
os.makedirs(name, mode=mode)
except OSError as e:
if (not exist_ok or e.errno != errno.EEXIST
or not os.path.isdir(name)):
raise
if np_version < (1, 8, 1):
def array_equal(a1, a2):
# copy-paste from numpy 1.8.1
try:
a1, a2 = np.asarray(a1), np.asarray(a2)
except:
return False
if a1.shape != a2.shape:
return False
return bool(np.asarray(a1 == a2).all())
else:
from numpy import array_equal
if sp_version < (0, 13, 0):
def rankdata(a, method='average'):
if method not in ('average', 'min', 'max', 'dense', 'ordinal'):
raise ValueError('unknown method "{0}"'.format(method))
arr = np.ravel(np.asarray(a))
algo = 'mergesort' if method == 'ordinal' else 'quicksort'
sorter = np.argsort(arr, kind=algo)
inv = np.empty(sorter.size, dtype=np.intp)
inv[sorter] = np.arange(sorter.size, dtype=np.intp)
if method == 'ordinal':
return inv + 1
arr = arr[sorter]
obs = np.r_[True, arr[1:] != arr[:-1]]
dense = obs.cumsum()[inv]
if method == 'dense':
return dense
# cumulative counts of each unique value
count = np.r_[np.nonzero(obs)[0], len(obs)]
if method == 'max':
return count[dense]
if method == 'min':
return count[dense - 1] + 1
# average method
return .5 * (count[dense] + count[dense - 1] + 1)
else:
from scipy.stats import rankdata
| bsd-3-clause | 6,369,921,192,213,193,000 | 31.784119 | 84 | 0.577505 | false |
kakaroto/amsn2 | amsn2/ui/front_ends/qt4/contact_list.py | 1 | 15634 | # -*- coding: utf-8 -*-
#
# amsn - a python client for the WLM Network
#
# Copyright (C) 2008 Dario Freddi <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from amsn2.ui import base
from PyQt4 import Qt
from PyQt4 import QtCore
from PyQt4 import QtGui
from ui_contactlist import Ui_ContactList
from styledwidget import StyledWidget
from image import *
from amsn2.views import StringView, ContactView, GroupView, ImageView, PersonalInfoView
import common
class aMSNContactListWindow(base.aMSNContactListWindow):
def __init__(self, amsn_core, parent):
self._amsn_core = amsn_core
self._parent = parent
self._skin = amsn_core._skin_manager.skin
self._theme_manager = self._amsn_core._theme_manager
self._myview = amsn_core._personalinfo_manager._personalinfoview
self._clwidget = aMSNContactListWidget(amsn_core, self)
self._clwidget.show()
self.__create_controls()
self._clwidget.ui.pixUser.setIconSize(QtCore.QSize(96,96))
self._clwidget.ui.pixUser.setIcon(QtGui.QIcon("amsn2/ui/front_ends/qt4/msn-userimage2.png"))
QtCore.QObject.connect(self._clwidget.ui.pixUser, QtCore.SIGNAL("clicked()"),self._myview.changeDP)
def __create_controls(self):
#status list
for key in self._amsn_core.p2s:
name = self._amsn_core.p2s[key]
_, path = self._theme_manager.get_statusicon("buddy_%s" % name)
if (name == self._amsn_core.p2s['FLN']): continue
self._clwidget.ui.status.addItem(QtGui.QIcon(path), str.capitalize(name), key)
def show(self):
self._clwidget.show()
def hide(self):
self._clwidget.hide()
def set_title(self, text):
self._parent.setTitle(text)
def set_menu(self, menu):
self._parent.setMenu(menu)
def my_info_updated(self, view):
# TODO image, ...
imview = view.dp
if len(imview.imgs) > 0:
pixbuf = QtGui.QPixmap(imview.imgs[0][1])
pixbuf = pixbuf.scaled(96,96,0,1)
self._clwidget.ui.pixUser.setIcon(QtGui.QIcon(pixbuf))
nk = view.nick
self._clwidget.ui.nickName.setHtml(nk.to_HTML_string())
message = view.psm.to_HTML_string()
if len(view.current_media.to_HTML_string()) > 0:
message += ' ' + view.current_media.to_HTML_string()
self._clwidget.ui.statusMessage.setHtml('<i>'+message+'</i>')
for key in self._amsn_core.p2s:
if self._amsn_core.p2s[key] == view.presence:
self._clwidget.ui.status.setCurrentIndex(self._clwidget.ui.status.findData(key))
def get_contactlist_widget(self):
return self._clwidget
class itemDelegate(QtGui.QStyledItemDelegate):
#Dooooon't touch anything here!!! Or it will break into a million pieces and you'll be really sorry!!!
def paint(self, painter, option, index):
if not index.isValid():
return
painter.translate(0, 0)
options = QtGui.QStyleOptionViewItemV4(option)
self.initStyleOption(options, index)
painter.save()
painter.setRenderHint(QtGui.QPainter.Antialiasing, True)
doc = QtGui.QTextDocument()
doc.setHtml(options.text)
options.text = ""
QtGui.QApplication.style().drawControl(QtGui.QStyle.CE_ItemViewItem, options, painter, options.widget)
painter.translate(options.rect.left() + self.sizeDp(index) + 3, options.rect.top()) #paint text right after the dp + 3pixels
rect = QtCore.QRectF(0, 0, options.rect.width(), options.rect.height())
doc.drawContents(painter, rect)
painter.restore()
def sizeHint(self, option, index):
options = QtGui.QStyleOptionViewItemV4(option)
self.initStyleOption(options, index)
doc = QtGui.QTextDocument()
doc.setHtml(options.text)
doc.setTextWidth(options.rect.width())
#if group, leave as it, if contactitem, use dp height for calculating sizeHint.
model = index.model()
qv = QtGui.QPixmap(model.data(model.index(index.row(), 0,
index.parent()), QtCore.Qt.DecorationRole))
if qv.isNull():
size = QtCore.QSize(doc.idealWidth(), doc.size().height())
else:
size = QtCore.QSize(doc.idealWidth(), qv.height() + 6)
return size
def sizeDp(self, index):
model = index.model()
qv = QtGui.QPixmap(model.data(model.index(index.row(), 0,
index.parent()), QtCore.Qt.DecorationRole))
return qv.width()
class GlobalFilter(QtCore.QObject):
def __init__(self,parent =None):
QtCore.QObject.__init__(self,parent)
def eventFilter(self, obj, e):
if obj.objectName() == "nickName":
if e.type() == QtCore.QEvent.FocusOut:
obj.emit(QtCore.SIGNAL("nickChange()"))
return False
if e.type() == QtCore.QEvent.KeyPress and (e.key() ==
QtCore.Qt.Key_Enter or
e.key() == QtCore.Qt.Key_Return):
return True
if obj.objectName() == "statusMessage":
if e.type() == QtCore.QEvent.FocusOut:
obj.emit(QtCore.SIGNAL("psmChange()"))
return False
if e.type() == QtCore.QEvent.KeyPress and (e.key() ==
QtCore.Qt.Key_Enter or
e.key() == QtCore.Qt.Key_Return):
return True
return False
class aMSNContactListWidget(StyledWidget, base.aMSNContactListWidget):
def __init__(self, amsn_core, parent):
StyledWidget.__init__(self, parent._parent)
self._amsn_core = amsn_core
self._myview = parent._myview
self.ui = Ui_ContactList()
self.ui.setupUi(self)
delegate = itemDelegate(self)
self.ui.cList.setItemDelegate(delegate)
self._parent = parent
self._mainWindow = parent._parent
self._model = QtGui.QStandardItemModel(self)
self._model.setColumnCount(4)
self._proxyModel = QtGui.QSortFilterProxyModel(self)
self._proxyModel.setSourceModel(self._model)
self.ui.cList.setModel(self._proxyModel)
self._contactDict = dict()
self.groups = []
self.contacts = {}
self._proxyModel.setFilterCaseSensitivity(QtCore.Qt.CaseInsensitive)
self._proxyModel.setFilterKeyColumn(-1)
(self.ui.cList.header()).resizeSections(1) #auto-resize column wigth
(self.ui.cList.header()).setSectionHidden(1, True) #hide --> (group/contact ID)
(self.ui.cList.header()).setSectionHidden(2, True) #hide --> (boolean value. Do I really need this?)
(self.ui.cList.header()).setSectionHidden(3, True) #hide --> (contact/group view object)
self.connect(self.ui.searchLine, QtCore.SIGNAL('textChanged(QString)'),
self._proxyModel, QtCore.SLOT('setFilterFixedString(QString)'))
self.connect(self.ui.nickName, QtCore.SIGNAL('nickChange()'), self.__nickChange)
self.connect(self.ui.statusMessage, QtCore.SIGNAL('psmChange()'), self.__psmChange)
self.connect(self.ui.status, QtCore.SIGNAL('currentIndexChanged(int)'), self.__statusChange)
self.connect(self.ui.cList, QtCore.SIGNAL('doubleClicked(QModelIndex)'), self.__clDoubleClick)
self.ui.nickName.installEventFilter(GlobalFilter(self.ui.nickName))
self.ui.statusMessage.installEventFilter(GlobalFilter(self.ui.statusMessage))
def show(self):
self._mainWindow.fadeIn(self)
def hide(self):
pass
def __nickChange(self):
sv = StringView()
sv.append_text(str(self.ui.nickName.toPlainText()))
self._myview.nick = str(sv)
def __psmChange(self):
sv = StringView()
sv.append_text(str(self.ui.statusMessage.toPlainText()))
self._myview.psm = str(sv)
def __statusChange(self, i):
if self.ui.status.count()+1 != len(self._amsn_core.p2s): return
for key in self._amsn_core.p2s:
if key == str(self.ui.status.itemData(i).toString()):
self._myview.presence = self._amsn_core.p2s[key]
def __search_by_id(self, id):
parent = self._model.item(0)
children = []
while (parent is not None):
obj = str(self._model.item(self._model.indexFromItem(parent).row(), 1).text())
if (obj == id): return parent
child = parent.child(0)
nc = 0
while (child is not None):
cobj = str(parent.child(nc, 1).text())
if (cobj == id): children.append(child)
nc = nc + 1
child = self._model.item(self._model.indexFromItem(parent).row()).child(nc)
parent = self._model.item(self._model.indexFromItem(parent).row() + 1)
if parent is None: break
if children: return children
else: return None
def contactlist_updated(self, view):
guids = self.groups
self.groups = []
# New groups
for gid in view.group_ids:
if (gid == 0): gid = '0'
self.groups.append(gid)
if gid not in guids:
self._model.appendRow([QtGui.QStandardItem(gid),
QtGui.QStandardItem(gid),
QtGui.QStandardItem("group"),
QtGui.QStandardItem()])
# Remove unused groups
for gid in guids:
if gid not in self.groups:
gitem = self.__search_by_id(gid)
self._model.removeRow((self._model.indexFromItem(gitem)).row())
try:
del self.contacts[gid]
except KeyError:
pass
#self.groups.remove(gid)
def contact_updated(self, contact):
citems = self.__search_by_id(contact.uid)
if citems is None: return
dp = Image(self._parent._theme_manager, contact.dp)
dp = dp.to_size(28, 28)
#icon = Image(self._parent._theme_manager, contact.icon)
for citem in citems:
gitem = citem.parent()
if gitem is None: continue
gitem.child(self._model.indexFromItem(citem).row(),
0).setData(QtCore.QVariant(dp), QtCore.Qt.DecorationRole)
#gitem.child(self._model.indexFromItem(citem).row(), 0).setData(QVariant(icon), Qt.DecorationRole)
gitem.child(self._model.indexFromItem(citem).row(),
3).setData(QtCore.QVariant(contact), QtCore.Qt.DisplayRole)
cname = StringView()
cname = contact.name.to_HTML_string()
gitem.child(self._model.indexFromItem(citem).row(),
0).setText(QtCore.QString.fromUtf8(cname))
def group_updated(self, group):
if (group.uid == 0): group.uid = '0'
if group.uid not in self.groups: return
gitem = self.__search_by_id(group.uid)
self._model.item(self._model.indexFromItem(gitem).row(),
3).setData(QtCore.QVariant(group), QtCore.Qt.DisplayRole)
gname = StringView()
gname = group.name
self._model.item((self._model.indexFromItem(gitem)).row(),
0).setText('<b>'+QtCore.QString.fromUtf8(gname.to_HTML_string())+'</b>')
try:
cuids = self.contacts[group.uid]
except:
cuids = []
self.contacts[group.uid] = group.contact_ids.copy()
for cid in group.contact_ids:
if cid not in cuids:
gitem = self.__search_by_id(group.uid)
gitem.appendRow([QtGui.QStandardItem(cid),
QtGui.QStandardItem(cid),
QtGui.QStandardItem("contact"),
QtGui.QStandardItem()])
# Remove unused contacts
for cid in cuids:
if cid not in self.contacts[group.uid]:
citems = self.__search_by_id(cid)
for citem in citems:
self._model.removeRow((self._model.indexFromItem(citem)).row())
def group_removed(self, group):
gid = self.__search_by_id(group.uid)
self._model.takeRow(self._model.indexFromItem(gid))
def configure(self, option, value):
pass
def cget(self, option, value):
pass
def size_request_set(self, w, h):
pass
def __clDoubleClick(self, index):
model = index.model()
qvart = model.data(model.index(index.row(), 2, index.parent()))
qvarv = model.data(model.index(index.row(), 3, index.parent()))
type = qvart.toString()
view = qvarv.toPyObject()
#is the double-clicked item a contact?
if type == "contact":
view.on_click(view.uid)
else:
print "Double click on group!"
def contextMenuEvent(self, event):
l = self.ui.cList.selectedIndexes()
index = l[0]
model = index.model()
qvart = model.data(model.index(index.row(), 2, index.parent()))
qvarv = model.data(model.index(index.row(), 3, index.parent()))
type = qvart.toString()
view = qvarv.toPyObject()
if type == "contact":
menuview = view.on_right_click_popup_menu
menu = QtGui.QMenu("Contact Popup", self)
common.create_menu_items_from_view(menu, menuview.items)
menu.popup(event.globalPos())
if type == "group":
menuview = view.on_right_click_popup_menu
menu = QtGui.QMenu("Group Popup", self)
common.create_menu_items_from_view(menu, menuview.items)
menu.popup(event.globalPos())
def set_contact_context_menu(self, cb):
#TODO:
pass
def group_added(self, group):
pi = self._model.invisibleRootItem()
# Adding Group Item
groupItem = QtGui.QStandardItem()
gname = StringView()
gname = group.name
self._model.item(groupItem.row(), 0).setText('<b>'+QtCore.QString.fromUtf8(gname.toHtmlString())+'</b>')
self._model.item(groupItem.row(), 1).setText(QtCore.QString.fromUtf8(str(group.uid)))
pi.appendRow(groupItem)
for contact in group.contacts:
contactItem = QtGui.QStandardItem()
cname = StringView()
cname = contact.name
self._model.item(contactItem.row(), 0).setText(QtCore.QString.fromUtf8(cname.toHtmlString()))
self._model.item(contactItem.row(), 1).setText(QtCore.QString.fromUtf8(str(contact.uid)))
groupItem.appendRow(contactItem)
self._contactDict[contact.uid] = contact
| gpl-2.0 | -3,533,732,585,038,985,700 | 38.984655 | 132 | 0.592491 | false |
hfp/libxsmm | samples/deeplearning/sparse_training/fairseq/fairseq/tasks/fairseq_task.py | 1 | 16132 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import warnings
import os
import torch
from fairseq import metrics, search, tokenizer, utils
from fairseq.data import data_utils, FairseqDataset, iterators, Dictionary
class FairseqTask(object):
"""
Tasks store dictionaries and provide helpers for loading/iterating over
Datasets, initializing the Model/Criterion and calculating the loss.
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
pass
@staticmethod
def logging_outputs_can_be_summed(criterion) -> bool:
"""
Whether the logging outputs returned by `train_step` and `valid_step` can
be summed across workers prior to calling `aggregate_logging_outputs`.
Setting this to True will improves distributed training speed.
"""
return criterion.logging_outputs_can_be_summed()
def __init__(self, args):
self.args = args
self.datasets = {}
self.dataset_to_epoch_iter = {}
@classmethod
def load_dictionary(cls, filename):
"""Load the dictionary from the filename
Args:
filename (str): the filename
"""
return Dictionary.load(filename)
@classmethod
def build_dictionary(
cls, filenames, workers=1, threshold=-1, nwords=-1, padding_factor=8
):
"""Build the dictionary
Args:
filenames (list): list of filenames
workers (int): number of concurrent workers
threshold (int): defines the minimum word count
nwords (int): defines the total number of words in the final dictionary,
including special symbols
padding_factor (int): can be used to pad the dictionary size to be a
multiple of 8, which is important on some hardware (e.g., Nvidia
Tensor Cores).
"""
d = Dictionary()
for filename in filenames:
Dictionary.add_file_to_dictionary(
filename, d, tokenizer.tokenize_line, workers
)
d.finalize(threshold=threshold, nwords=nwords, padding_factor=padding_factor)
return d
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
return cls(args, **kwargs)
def has_sharded_data(self, split):
return (os.pathsep in getattr(self.args, 'data', ''))
def load_dataset(self, split, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
raise NotImplementedError
def dataset(self, split):
"""
Return a loaded dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
Returns:
a :class:`~fairseq.data.FairseqDataset` corresponding to *split*
"""
from fairseq.data import FairseqDataset
if split not in self.datasets:
raise KeyError("Dataset not loaded: " + split)
if not isinstance(self.datasets[split], FairseqDataset):
raise TypeError("Datasets are expected to be of type FairseqDataset")
return self.datasets[split]
def get_batch_iterator(
self,
dataset,
max_tokens=None,
max_sentences=None,
max_positions=None,
ignore_invalid_inputs=False,
required_batch_size_multiple=1,
seed=1,
num_shards=1,
shard_id=0,
num_workers=0,
epoch=1
):
"""
Get an iterator that yields batches of data from the given dataset.
Args:
dataset (~fairseq.data.FairseqDataset): dataset to batch
max_tokens (int, optional): max number of tokens in each batch
(default: None).
max_sentences (int, optional): max number of sentences in each
batch (default: None).
max_positions (optional): max sentence length supported by the
model (default: None).
ignore_invalid_inputs (bool, optional): don't raise Exception for
sentences that are too long (default: False).
required_batch_size_multiple (int, optional): require batch size to
be a multiple of N (default: 1).
seed (int, optional): seed for random number generator for
reproducibility (default: 1).
num_shards (int, optional): shard the data iterator into N
shards (default: 1).
shard_id (int, optional): which shard of the data iterator to
return (default: 0).
num_workers (int, optional): how many subprocesses to use for data
loading. 0 means the data will be loaded in the main process
(default: 0).
epoch (int, optional): the epoch to start the iterator from
(default: 1).
Returns:
~fairseq.iterators.EpochBatchIterator: a batched iterator over the
given dataset split
"""
# For default fairseq task, return same iterator across epochs
# as datasets are not dynamic, can be overridden in task specific
# setting.
if dataset in self.dataset_to_epoch_iter:
return self.dataset_to_epoch_iter[dataset]
assert isinstance(dataset, FairseqDataset)
# initialize the dataset with the correct starting epoch
dataset.set_epoch(epoch)
# get indices ordered by example size
with data_utils.numpy_seed(seed):
indices = dataset.ordered_indices()
# filter examples that are too large
if max_positions is not None:
indices = data_utils.filter_by_size(
indices,
dataset,
max_positions,
raise_exception=(not ignore_invalid_inputs),
)
# create mini-batches with given size constraints
batch_sampler = dataset.batch_by_size(
indices,
max_tokens=max_tokens,
max_sentences=max_sentences,
required_batch_size_multiple=required_batch_size_multiple,
)
# return a reusable, sharded iterator
epoch_iter = iterators.EpochBatchIterator(
dataset=dataset,
collate_fn=dataset.collater,
batch_sampler=batch_sampler,
seed=seed,
num_shards=num_shards,
shard_id=shard_id,
num_workers=num_workers,
epoch=epoch,
buffer_size=getattr(self.args, 'data_buffer_size', 0)
)
self.dataset_to_epoch_iter[dataset] = epoch_iter
return epoch_iter
def build_model(self, args):
"""
Build the :class:`~fairseq.models.BaseFairseqModel` instance for this
task.
Args:
args (argparse.Namespace): parsed command-line arguments
Returns:
a :class:`~fairseq.models.BaseFairseqModel` instance
"""
from fairseq import models, quantization_utils
model = models.build_model(args, self)
if getattr(args, 'tpu', False):
model.prepare_for_tpu_()
model = quantization_utils.quantize_model_scalar(model, args)
return model
def build_criterion(self, args):
"""
Build the :class:`~fairseq.criterions.FairseqCriterion` instance for
this task.
Args:
args (argparse.Namespace): parsed command-line arguments
Returns:
a :class:`~fairseq.criterions.FairseqCriterion` instance
"""
from fairseq import criterions
return criterions.build_criterion(args, self)
def build_generator(self, models, args, seq_gen_cls=None):
if getattr(args, "score_reference", False):
from fairseq.sequence_scorer import SequenceScorer
return SequenceScorer(
self.target_dictionary,
compute_alignment=getattr(args, "print_alignment", False),
)
from fairseq.sequence_generator import (
SequenceGenerator,
SequenceGeneratorWithAlignment,
)
# Choose search strategy. Defaults to Beam Search.
sampling = getattr(args, "sampling", False)
sampling_topk = getattr(args, "sampling_topk", -1)
sampling_topp = getattr(args, "sampling_topp", -1.0)
diverse_beam_groups = getattr(args, "diverse_beam_groups", -1)
diverse_beam_strength = getattr(args, "diverse_beam_strength", 0.5)
match_source_len = getattr(args, "match_source_len", False)
diversity_rate = getattr(args, "diversity_rate", -1)
if (
sum(
int(cond)
for cond in [
sampling,
diverse_beam_groups > 0,
match_source_len,
diversity_rate > 0,
]
)
> 1
):
raise ValueError("Provided Search parameters are mutually exclusive.")
assert sampling_topk < 0 or sampling, "--sampling-topk requires --sampling"
assert sampling_topp < 0 or sampling, "--sampling-topp requires --sampling"
if sampling:
search_strategy = search.Sampling(
self.target_dictionary, sampling_topk, sampling_topp
)
elif diverse_beam_groups > 0:
search_strategy = search.DiverseBeamSearch(
self.target_dictionary, diverse_beam_groups, diverse_beam_strength
)
elif match_source_len:
# this is useful for tagging applications where the output
# length should match the input length, so we hardcode the
# length constraints for simplicity
search_strategy = search.LengthConstrainedBeamSearch(
self.target_dictionary,
min_len_a=1,
min_len_b=0,
max_len_a=1,
max_len_b=0,
)
elif diversity_rate > -1:
search_strategy = search.DiverseSiblingsSearch(
self.target_dictionary, diversity_rate
)
else:
search_strategy = search.BeamSearch(self.target_dictionary)
if seq_gen_cls is None:
if getattr(args, "print_alignment", False):
seq_gen_cls = SequenceGeneratorWithAlignment
else:
seq_gen_cls = SequenceGenerator
return seq_gen_cls(
models,
self.target_dictionary,
beam_size=getattr(args, "beam", 5),
max_len_a=getattr(args, "max_len_a", 0),
max_len_b=getattr(args, "max_len_b", 200),
min_len=getattr(args, "min_len", 1),
normalize_scores=(not getattr(args, "unnormalized", False)),
len_penalty=getattr(args, "lenpen", 1),
unk_penalty=getattr(args, "unkpen", 0),
temperature=getattr(args, "temperature", 1.0),
match_source_len=getattr(args, "match_source_len", False),
no_repeat_ngram_size=getattr(args, "no_repeat_ngram_size", 0),
search_strategy=search_strategy,
)
def train_step(
self, sample, model, criterion, optimizer, update_num, ignore_grad=False, retain_graph=False
):
"""
Do forward and backward, and return the loss as computed by *criterion*
for the given *model* and *sample*.
Args:
sample (dict): the mini-batch. The format is defined by the
:class:`~fairseq.data.FairseqDataset`.
model (~fairseq.models.BaseFairseqModel): the model
criterion (~fairseq.criterions.FairseqCriterion): the criterion
optimizer (~fairseq.optim.FairseqOptimizer): the optimizer
update_num (int): the current update
ignore_grad (bool): multiply loss by 0 if this is set to True
Returns:
tuple:
- the loss
- the sample size, which is used as the denominator for the
gradient
- logging outputs to display while training
"""
model.train()
model.set_num_updates(update_num)
with torch.autograd.profiler.record_function("forward"):
loss, sample_size, logging_output = criterion(model, sample)
if ignore_grad:
loss *= 0
with torch.autograd.profiler.record_function("backward"):
optimizer.backward(loss, retain_graph=retain_graph)
return loss, sample_size, logging_output
def valid_step(self, sample, model, criterion):
model.eval()
with torch.no_grad():
loss, sample_size, logging_output = criterion(model, sample)
return loss, sample_size, logging_output
def inference_step(self, generator, models, sample, prefix_tokens=None):
with torch.no_grad():
return generator.generate(models, sample, prefix_tokens=prefix_tokens)
def begin_epoch(self, epoch, model):
"""Hook function called before the start of each epoch."""
pass
def aggregate_logging_outputs(self, logging_outputs, criterion):
"""[deprecated] Aggregate logging outputs from data parallel training."""
utils.deprecation_warning(
"The aggregate_logging_outputs API is deprecated. "
"Please use the reduce_metrics API instead."
)
with metrics.aggregate() as agg:
self.reduce_metrics(logging_outputs, criterion)
return agg.get_smoothed_values()
def reduce_metrics(self, logging_outputs, criterion):
"""Aggregate logging outputs from data parallel training."""
# backward compatibility for tasks that override aggregate_logging_outputs
base_func = FairseqTask.aggregate_logging_outputs
self_func = getattr(self, "aggregate_logging_outputs").__func__
if self_func is not base_func:
utils.deprecation_warning(
"Tasks should implement the reduce_metrics API. "
"Falling back to deprecated aggregate_logging_outputs API."
)
agg_logging_outputs = self.aggregate_logging_outputs(
logging_outputs, criterion
)
for k, v in agg_logging_outputs.items():
metrics.log_scalar(k, v)
return
if not any("ntokens" in log for log in logging_outputs):
warnings.warn(
"ntokens not found in Criterion logging outputs, cannot log wpb or wps"
)
else:
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
metrics.log_scalar("wpb", ntokens, priority=180, round=1)
metrics.log_speed("wps", ntokens, priority=90, round=1)
if not any("nsentences" in log for log in logging_outputs):
warnings.warn(
"nsentences not found in Criterion logging outputs, cannot log bsz"
)
else:
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
metrics.log_scalar("bsz", nsentences, priority=190, round=1)
criterion.__class__.reduce_metrics(logging_outputs)
def max_positions(self):
"""Return the max input length allowed by the task."""
return None
@property
def source_dictionary(self):
"""Return the source :class:`~fairseq.data.Dictionary` (if applicable
for this task)."""
raise NotImplementedError
@property
def target_dictionary(self):
"""Return the target :class:`~fairseq.data.Dictionary` (if applicable
for this task)."""
raise NotImplementedError
| bsd-3-clause | 1,230,206,139,522,722,600 | 36.691589 | 100 | 0.593603 | false |
bhrutledge/debugged-django | debugged/stream/signals.py | 1 | 2560 | from datetime import datetime, timedelta
from django.contrib.contenttypes.models import ContentType
from debugged.stream.models import StreamEntry, StreamItem
def _get_stream_item(instance):
instance_type = ContentType.objects.get_for_model(instance)
try:
s = StreamItem.objects.get(content_type=instance_type, object_id=instance.id)
except:
s = StreamItem(content_type=instance_type, object_id=instance.id)
return s
def _get_stream_entry(instance):
instance_type = ContentType.objects.get_for_model(instance)
try:
parent = instance.parent
parent_type = ContentType.objects.get_for_model(parent)
parent_id = parent.id
except:
parent = parent_type = parent_id = None
end_date = instance.publish_date + timedelta(minutes=30)
start_date = instance.publish_date - timedelta(minutes=30)
try:
e = StreamEntry.objects.get(item_type=instance_type,
content_type=parent_type, object_id=parent_id,
publish_date__range=(start_date, end_date))
except:
e = StreamEntry(item_type=instance_type,
content_type=parent_type, object_id=parent_id)
return e
def delete_stream_item(sender, instance, **kwargs):
instance_type = ContentType.objects.get_for_model(instance)
try:
item = StreamItem.objects.get(content_type=instance_type.id, object_id=instance.id)
entry = item.entry
item.delete()
if entry.items.count() == 0:
entry.delete()
except:
pass
def update_stream_item(sender, instance, **kwargs):
# TODO: What about StreamItems that already have StreamEntries?
if instance.published:
item = _get_stream_item(instance)
entry = _get_stream_entry(instance)
if entry.publish_date:
entry.publish_date = max(instance.publish_date, entry.publish_date)
else:
entry.publish_date = instance.publish_date
entry.modify_date = datetime.now()
entry.save()
try:
old_entry = item.entry
except:
old_entry = None
item.publish_date = instance.publish_date
item.modify_date = instance.modify_date
item.entry = entry
item.save()
if old_entry and old_entry.items.count() == 0:
old_entry.delete()
else:
delete_stream_item(sender, instance)
| mit | -8,103,646,417,448,047,000 | 32.246753 | 91 | 0.607031 | false |
0xf2/stackalytics | stackalytics/dashboard/web.py | 1 | 24373 | # Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import operator
import os
import time
import flask
from oslo_config import cfg
from oslo_log import log as logging
import six
from stackalytics.dashboard import config
from stackalytics.dashboard import decorators
from stackalytics.dashboard import helpers
from stackalytics.dashboard import kpi
from stackalytics.dashboard import parameters
from stackalytics.dashboard import reports
from stackalytics.dashboard import vault
from stackalytics.processor import config as processor_cfg
from stackalytics.processor import utils
# Application objects ---------
app = flask.Flask(__name__)
app.config.from_object(__name__)
app.config.from_envvar('DASHBOARD_CONF', silent=True)
app.register_blueprint(reports.blueprint)
app.register_blueprint(kpi.blueprint)
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_opts(processor_cfg.CONNECTION_OPTS + config.DASHBOARD_OPTS)
# Handlers ---------
@app.route('/')
@decorators.templated()
def overview():
pass
@app.route('/widget')
def widget():
return flask.render_template('widget.html')
# AJAX Handlers ---------
def _get_aggregated_stats(records, metric_filter, keys, param_id,
param_title=None, finalize_handler=None):
param_title = param_title or param_id
result = dict((c, {'metric': 0, 'id': c}) for c in keys)
context = {'vault': vault.get_vault()}
if metric_filter:
for record in records:
metric_filter(result, record, param_id, context)
result[getattr(record, param_id)]['name'] = (
getattr(record, param_title))
else:
for record in records:
record_param_id = getattr(record, param_id)
result[record_param_id]['metric'] += 1
result[record_param_id]['name'] = getattr(record, param_title)
response = [r for r in result.values() if r['metric']]
if finalize_handler:
response = [item for item in map(finalize_handler, response) if item]
response.sort(key=lambda x: x['metric'], reverse=True)
utils.add_index(response, item_filter=lambda x: x['id'] != '*independent')
return response
@app.route('/api/1.0/new_companies')
@decorators.exception_handler()
@decorators.response()
@decorators.jsonify('stats')
@decorators.record_filter(ignore=['start_date'])
def get_new_companies(records, **kwargs):
days = int(flask.request.args.get('days') or reports.DEFAULT_DAYS_COUNT)
start_date = int(time.time()) - days * 24 * 60 * 60
result = {}
for record in records:
company_name = record.company_name
date = record.date
if company_name not in result or result[company_name] > date:
result[company_name] = date
response = list(({'name': company_name,
'date': result[company_name],
'date_str': helpers.format_date(result[company_name])})
for company_name in result
if result[company_name] >= start_date)
response.sort(key=lambda x: x['date'], reverse=True)
utils.add_index(response)
return response
@app.route('/api/1.0/stats/companies')
@decorators.exception_handler()
@decorators.response()
@decorators.cached()
@decorators.jsonify('stats')
@decorators.record_filter()
@decorators.aggregate_filter()
def get_companies(records, metric_filter, finalize_handler, **kwargs):
return _get_aggregated_stats(records, metric_filter,
vault.get_memory_storage().get_companies(),
'company_name',
finalize_handler=finalize_handler)
@app.route('/api/1.0/stats/modules')
@decorators.exception_handler()
@decorators.response()
@decorators.cached()
@decorators.jsonify('stats')
@decorators.record_filter()
@decorators.aggregate_filter()
def get_modules(records, metric_filter, finalize_handler, **kwargs):
return _get_aggregated_stats(records, metric_filter,
vault.get_memory_storage().get_modules(),
'module', finalize_handler=finalize_handler)
def get_core_engineer_branch(user, modules):
is_core = None
for (module, branch) in (user.get('core') or []):
if module in modules:
is_core = branch
if branch == 'master': # master is preferable, but stables are ok
break
return is_core
@app.route('/api/1.0/stats/engineers')
@decorators.exception_handler()
@decorators.response()
@decorators.cached()
@decorators.jsonify('stats')
@decorators.record_filter()
@decorators.aggregate_filter()
def get_engineers(records, metric_filter, finalize_handler, **kwargs):
modules_names = parameters.get_parameter(kwargs, 'module')
modules = set([m for m, r in vault.resolve_modules(modules_names, [''])])
def postprocessing(record):
if finalize_handler:
record = finalize_handler(record)
user = vault.get_user_from_runtime_storage(record['id'])
record['core'] = get_core_engineer_branch(user, modules)
return record
return _get_aggregated_stats(records, metric_filter,
vault.get_memory_storage().get_user_ids(),
'user_id', 'author_name',
finalize_handler=postprocessing)
@app.route('/api/1.0/stats/engineers_extended')
@decorators.exception_handler()
@decorators.response()
@decorators.cached(ignore=['metric'])
@decorators.jsonify('stats')
@decorators.record_filter(ignore=['metric'])
def get_engineers_extended(records, **kwargs):
modules_names = parameters.get_parameter(kwargs, 'module')
modules = set([m for m, r in vault.resolve_modules(modules_names, [''])])
def postprocessing(record):
record = decorators.mark_finalize(record)
if not (record['mark'] or record['review'] or record['commit'] or
record['email'] or record['patch']):
return
user = vault.get_user_from_runtime_storage(record['id'])
record['company'] = helpers.get_current_company(user)
record['core'] = get_core_engineer_branch(user, modules)
return record
def record_processing(result, record, param_id):
result_row = result[getattr(record, param_id)]
record_type = record.record_type
result_row[record_type] = result_row.get(record_type, 0) + 1
if record_type == 'mark':
decorators.mark_filter(result, record, param_id, {})
result = {}
for record in records:
user_id = record.user_id
if user_id not in result:
result[user_id] = {'id': user_id, 'mark': 0, 'review': 0,
'commit': 0, 'email': 0, 'patch': 0,
'metric': 0}
record_processing(result, record, 'user_id')
result[user_id]['name'] = record.author_name
response = result.values()
response = [item for item in map(postprocessing, response) if item]
response.sort(key=lambda x: x['metric'], reverse=True)
utils.add_index(response)
return response
@app.route('/api/1.0/stats/distinct_engineers')
@decorators.exception_handler()
@decorators.response()
@decorators.cached()
@decorators.jsonify('stats')
@decorators.record_filter()
def get_distinct_engineers(records, **kwargs):
result = {}
for record in records:
result[record.user_id] = {
'author_name': record.author_name,
'author_email': record.author_email,
}
return result
@app.route('/api/1.0/activity')
@decorators.exception_handler()
@decorators.response()
@decorators.jsonify('activity')
@decorators.record_filter()
def get_activity_json(records, **kwargs):
start_record = int(flask.request.args.get('start_record') or 0)
page_size = int(flask.request.args.get('page_size') or
parameters.DEFAULT_RECORDS_LIMIT)
query_message = flask.request.args.get('query_message')
return helpers.get_activity(records, start_record, page_size,
query_message)
@app.route('/api/1.0/contribution')
@decorators.exception_handler()
@decorators.response()
@decorators.cached(ignore=['metric'])
@decorators.jsonify('contribution')
@decorators.record_filter(ignore=['metric'])
def get_contribution_json(records, **kwargs):
return helpers.get_contribution_summary(records)
@app.route('/api/1.0/companies')
@decorators.exception_handler()
@decorators.response()
@decorators.cached(ignore=['company'])
@decorators.jsonify()
@decorators.record_filter(ignore=['company'])
def get_companies_json(record_ids, **kwargs):
memory_storage = vault.get_memory_storage()
companies = set(company
for company in memory_storage.get_index_keys_by_record_ids(
'company_name', record_ids))
if kwargs['_params']['company']:
companies.add(memory_storage.get_original_company_name(
kwargs['_params']['company'][0]))
return [{'id': c.lower().replace('&', ''), 'text': c}
for c in sorted(companies)]
@app.route('/api/1.0/modules')
@decorators.exception_handler()
@decorators.response()
@decorators.cached(ignore=['module'])
@decorators.jsonify()
@decorators.record_filter(ignore=['module'])
def get_modules_json(record_ids, **kwargs):
module_id_index = vault.get_vault()['module_id_index']
tags = parameters.get_parameter(kwargs, 'tag', plural_name='tags')
# all modules mentioned in records
module_ids = vault.get_memory_storage().get_index_keys_by_record_ids(
'module', record_ids)
add_modules = set([])
for module in six.itervalues(module_id_index):
if set(module['modules']) & module_ids:
add_modules.add(module['id'])
module_ids |= add_modules
# keep only modules with specified tags
if tags:
module_ids = set(module_id for module_id in module_ids
if ((module_id in module_id_index) and
(module_id_index[module_id].get('tag') in tags)))
result = []
for module_id in module_ids:
module = module_id_index[module_id]
result.append({'id': module['id'],
'text': module['module_group_name'],
'tag': module['tag']})
return sorted(result, key=operator.itemgetter('text'))
@app.route('/api/1.0/companies/<company_name>')
@decorators.response()
@decorators.cached()
@decorators.jsonify('company')
def get_company(company_name, **kwargs):
memory_storage_inst = vault.get_memory_storage()
for company in memory_storage_inst.get_companies():
if company.lower() == company_name.lower():
return {
'id': company_name,
'text': memory_storage_inst.get_original_company_name(
company_name)
}
flask.abort(404)
@app.route('/api/1.0/modules/<module_id>')
@decorators.response()
@decorators.cached()
@decorators.jsonify('module')
def get_module(module_id, **kwargs):
project_type = parameters.get_single_parameter(kwargs, 'project_type')
release = parameters.get_single_parameter(kwargs, 'release')
module = helpers.extend_module(module_id, project_type, release)
if not module:
flask.abort(404)
return module
@app.route('/api/1.0/members')
@decorators.exception_handler()
@decorators.response()
@decorators.cached(ignore=['release', 'project_type', 'module'])
@decorators.jsonify('members')
@decorators.record_filter(ignore=['release', 'project_type', 'module'])
def get_members(records, **kwargs):
response = []
for record in records:
record = vault.extend_record(record)
nr = dict([(k, record[k]) for k in
['author_name', 'date', 'company_name', 'member_uri']])
nr['date_str'] = helpers.format_date(nr['date'])
response.append(nr)
response.sort(key=lambda x: x['date'], reverse=True)
utils.add_index(response)
return response
@app.route('/api/1.0/stats/bp')
@decorators.exception_handler()
@decorators.response()
@decorators.cached()
@decorators.jsonify('stats')
@decorators.record_filter()
def get_bpd(records, **kwargs):
result = []
for record in records:
if record.record_type in ['bpd', 'bpc']:
record = vault.extend_record(record)
mention_date = record.get('mention_date')
if mention_date:
date = helpers.format_date(mention_date)
else:
date = 'never'
result.append({
'date': date,
'status': record['lifecycle_status'],
'metric': record.get('mention_count') or 0,
'id': record['name'],
'name': record['name'],
'link': helpers.make_blueprint_link(record['module'],
record['name'])
})
result.sort(key=lambda x: x['metric'], reverse=True)
utils.add_index(result)
return result
@app.route('/api/1.0/languages')
@decorators.exception_handler()
@decorators.response()
@decorators.cached(ignore=['language'])
@decorators.jsonify()
@decorators.record_filter(ignore=['language'])
def get_languages_json(record_ids, **kwargs):
memory_storage = vault.get_memory_storage()
languages = set(r.value for r in memory_storage.get_records(record_ids))
return [{'id': c.lower().replace('&', ''), 'text': c}
for c in sorted(languages)]
@app.route('/api/1.0/stats/languages')
@decorators.exception_handler()
@decorators.response()
@decorators.cached()
@decorators.jsonify('stats')
@decorators.record_filter(ignore=['language'])
def get_languages(records, **kwargs):
result = []
languages = collections.defaultdict(int)
for record in records:
if record.record_type in ['tr']:
languages[record.value] += record.loc
for lang, val in six.iteritems(languages):
result.append({
'id': lang,
'name': lang,
'metric': val,
})
result.sort(key=lambda x: x['metric'], reverse=True)
utils.add_index(result)
return result
@app.route('/api/1.0/users')
@decorators.exception_handler()
@decorators.response()
@decorators.cached(ignore=['user_id'])
@decorators.jsonify()
@decorators.record_filter(ignore=['user_id'])
def get_users_json(record_ids, **kwargs):
core_in = parameters.get_single_parameter(kwargs, 'core_in') or None
valid_modules = set()
if core_in:
core_in = set(core_in.split(','))
valid_modules = vault.resolve_project_types(
kwargs['_params']['project_type'])
valid_modules = set(m[0] for m in vault.resolve_modules(
valid_modules, kwargs['_params']['release']))
user_ids = vault.get_memory_storage().get_index_keys_by_record_ids(
'user_id', record_ids)
if kwargs['_params']['user_id']:
user_ids.add(kwargs['_params']['user_id'][0])
result = []
for user_id in user_ids:
user = vault.get_user_from_runtime_storage(user_id)
r = {'id': user_id, 'text': user.get('user_name') or user['user_id']}
add_flag = not core_in
if core_in and user.get('core'):
core_modules = [module_branch[0] for module_branch in user['core']
if (module_branch[1] in core_in and
module_branch[0] in valid_modules)]
if core_modules:
r['core'] = core_modules
if user['companies']:
r['company_name'] = helpers.get_current_company(user)
add_flag = True
if add_flag:
result.append(r)
result.sort(key=lambda x: x['text'])
return result
@app.route('/api/1.0/users/<user_id>')
@decorators.response()
@decorators.jsonify('user')
def get_user(user_id):
user = vault.get_user_from_runtime_storage(user_id)
if not user:
flask.abort(404)
user = helpers.extend_user(user)
return user
@app.route('/api/1.0/releases')
@decorators.exception_handler()
@decorators.response()
@decorators.cached(ignore=parameters.FILTER_PARAMETERS)
@decorators.jsonify(root=('data', 'default'))
def get_releases_json(**kwargs):
releases = [{'id': release['release_name'],
'text': release['release_name'].capitalize()}
for release in vault.get_vault()['releases'].values()]
releases.append({'id': 'all', 'text': 'All'})
releases.reverse()
return (releases, parameters.get_default('release'))
@app.route('/api/1.0/metrics')
@decorators.exception_handler()
@decorators.response()
@decorators.cached(ignore=parameters.FILTER_PARAMETERS)
@decorators.jsonify(root=('data', 'default'))
def get_metrics_json(**kwargs):
return (sorted([{'id': m, 'text': t} for m, t in
six.iteritems(parameters.METRIC_LABELS)],
key=operator.itemgetter('text')),
parameters.get_default('metric'))
@app.route('/api/1.0/project_types')
@decorators.response()
@decorators.exception_handler()
@decorators.cached(ignore=parameters.FILTER_PARAMETERS)
@decorators.jsonify(root=('data', 'default'))
def get_project_types_json(**kwargs):
return ([{'id': pt['id'], 'text': pt['title'],
'child': pt.get('child', False)}
for pt in vault.get_project_types()],
parameters.get_default('project_type'))
@app.route('/api/1.0/affiliation_changes')
@decorators.exception_handler()
@decorators.response()
@decorators.jsonify('affiliation_changes')
def get_company_changes(**kwargs):
start_days = str(flask.request.args.get('start_days') or
utils.timestamp_to_date(int(time.time()) -
365 * 24 * 60 * 60))
end_days = str(flask.request.args.get('end_days') or
utils.timestamp_to_date(int(time.time())))
start_date = utils.date_to_timestamp_ext(start_days)
end_date = utils.date_to_timestamp_ext(end_days)
runtime_storage = vault.get_runtime_storage()
result = []
for user in runtime_storage.get_all_users():
companies = user.get('companies') or []
if len(companies) < 2:
continue
companies_iter = iter(companies)
company = companies_iter.next()
old_company_name = company['company_name']
date = company['end_date']
for company in companies_iter:
new_company_name = company['company_name']
if start_date <= date <= end_date:
result.append({
'user_id': user['user_id'],
'user_name': user['user_name'],
'old_company_name': old_company_name,
'new_company_name': new_company_name,
'date': date,
})
old_company_name = new_company_name
date = company['end_date']
return result
def _get_week(kwargs, param_name):
date_param = parameters.get_single_parameter(kwargs, param_name)
if date_param:
ts = utils.date_to_timestamp_ext(date_param)
else:
ts = vault.get_vault()[param_name]
return utils.timestamp_to_week(ts)
@app.route('/api/1.0/stats/timeline')
@decorators.exception_handler()
@decorators.response()
@decorators.cached()
@decorators.jsonify('timeline')
@decorators.record_filter(ignore=['release', 'start_date'])
def timeline(records, **kwargs):
# find start and end dates
metric = parameters.get_parameter(kwargs, 'metric')
start_date = int(parameters.get_single_parameter(kwargs, 'start_date')
or 0)
release_name = parameters.get_single_parameter(kwargs, 'release') or 'all'
releases = vault.get_vault()['releases']
if 'all' in release_name:
start_week = release_start_week = _get_week(kwargs, 'start_date')
end_week = release_end_week = _get_week(kwargs, 'end_date')
else:
release = releases[release_name]
start_week = release_start_week = utils.timestamp_to_week(
release['start_date'])
end_week = release_end_week = utils.timestamp_to_week(
release['end_date'])
now = utils.timestamp_to_week(int(time.time())) + 1
# expand start-end to year if needed
if release_end_week - release_start_week < 52:
expansion = (52 - (release_end_week - release_start_week)) // 2
if release_end_week + expansion < now:
end_week += expansion
else:
end_week = now
start_week = end_week - 52
# empty stats for all weeks in range
weeks = range(start_week, end_week)
week_stat_loc = dict((c, 0) for c in weeks)
week_stat_commits = dict((c, 0) for c in weeks)
week_stat_commits_hl = dict((c, 0) for c in weeks)
commits_handler = lambda record: 1
if 'translations' in metric:
commits_handler = lambda record: record.loc
if ('commits' in metric) or ('loc' in metric):
loc_handler = lambda record: record.loc
else:
loc_handler = lambda record: 0
# fill stats with the data
if 'person-day' in metric:
# special case for man-day effort metric
release_stat = collections.defaultdict(set)
all_stat = collections.defaultdict(set)
for record in records:
if start_week <= record.week < end_week:
day = utils.timestamp_to_day(record.date)
user_id = record.user_id
if record.release == release_name:
release_stat[day].add(user_id)
all_stat[day].add(user_id)
for day, users in six.iteritems(release_stat):
week = utils.timestamp_to_week(day * 24 * 3600)
week_stat_commits_hl[week] += len(users)
for day, users in six.iteritems(all_stat):
week = utils.timestamp_to_week(day * 24 * 3600)
week_stat_commits[week] += len(users)
else:
for record in records:
week = record.week
if start_week <= week < end_week:
week_stat_loc[week] += loc_handler(record)
week_stat_commits[week] += commits_handler(record)
if 'members' in metric:
if record.date >= start_date:
week_stat_commits_hl[week] += 1
else:
if record.release == release_name:
week_stat_commits_hl[week] += commits_handler(record)
if 'all' == release_name and 'members' not in metric:
week_stat_commits_hl = week_stat_commits
# form arrays in format acceptable to timeline plugin
array_loc = []
array_commits = []
array_commits_hl = []
for week in weeks:
week_str = utils.week_to_date(week)
array_loc.append([week_str, week_stat_loc[week]])
array_commits.append([week_str, week_stat_commits[week]])
array_commits_hl.append([week_str, week_stat_commits_hl[week]])
return [array_commits, array_commits_hl, array_loc]
@app.template_test()
def too_old(timestamp):
age = CONF.age_warn
now = time.time()
return timestamp + age < now
def main():
logging.register_options(CONF)
logging.set_defaults()
conf_file = os.getenv('STACKALYTICS_CONF')
if conf_file and os.path.isfile(conf_file):
CONF(default_config_files=[conf_file])
app.config['DEBUG'] = CONF.debug
LOG.info('Stackalytics.dashboard is configured via "%s"', conf_file)
else:
CONF(project='stackalytics')
logging.setup(CONF, 'stackalytics.dashboard')
app.run(CONF.listen_host, CONF.listen_port)
if __name__ == '__main__':
main()
| apache-2.0 | -1,381,392,780,574,979,000 | 33.088112 | 79 | 0.621261 | false |
olga-perederieieva/pyDEA | pyDEA/core/gui_modules/table_gui.py | 1 | 57836 | ''' This module contains classes responsible for displaying input data
in a table (TableFrame and TableFrameWithInputOutputBox).
It also contains many classes necessary for TableFrameWithInputOutputBox.
Attributes:
CELL_WIDTH (int): constant that defined width of a cell in a table
'''
from tkinter import S, N, E, W, END, VERTICAL, HORIZONTAL, ALL
from tkinter import IntVar, DISABLED, StringVar, NORMAL
from tkinter.ttk import Frame, Entry, Scrollbar, Checkbutton
from pyDEA.core.gui_modules.scrollable_frame_gui import MouseWheel
from pyDEA.core.utils.dea_utils import is_valid_coeff, NOT_VALID_COEFF, VALID_COEFF
from pyDEA.core.utils.dea_utils import WARNING_COEFF, EMPTY_COEFF, CELL_DESTROY
from pyDEA.core.utils.dea_utils import CHANGE_CATEGORY_NAME, INPUT_OBSERVER
from pyDEA.core.utils.dea_utils import OUTPUT_OBSERVER, on_canvas_resize
from pyDEA.core.utils.dea_utils import validate_category_name, calculate_nb_pages
from pyDEA.core.gui_modules.custom_canvas_gui import StyledCanvas
from pyDEA.core.data_processing.read_data_from_xls import convert_to_dictionary
CELL_WIDTH = 10
class TableFrame(Frame):
''' This class is a base class that defines minimal functionality of
a table.
Attributes:
parent (Tk object): parent of this widget.
nb_rows (int): number of rows of the table.
nb_cols (int): number of columns of the table.
cells (list of list of Entry): list with Entry widgets
(or derivatives of Entry)
that describes the table and its content.
canvas (Canvas): canvas that holds all widgets
(it is necessary to make the table scrollable).
frame_with_table (Frame): frame that holds all widgets.
Args:
parent (Tk object): parent of this widget.
nb_rows (int, optional): number of rows of the table,
defaults to 20.
nb_cols (int, optional): number of columns of the table,
defaults to 5.
'''
def __init__(self, parent, data, nb_rows=20, nb_cols=5):
Frame.__init__(self, parent)
self.data = data
self.parent = parent
self.nb_rows = nb_rows
self.nb_cols = nb_cols
self.cells = []
self.canvas = None
self.frame_with_table = None
self.create_widgets()
def create_widgets(self):
''' Creates all widgets.
'''
self.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=1)
yScrollbar = Scrollbar(self, orient=VERTICAL)
yScrollbar.grid(row=0, column=1, sticky=N+S)
xScrollbar = Scrollbar(self, orient=HORIZONTAL)
xScrollbar.grid(row=1, column=0, sticky=E+W)
canvas = StyledCanvas(self, yscrollcommand=yScrollbar.set,
xscrollcommand=xScrollbar.set, bd=0)
self.canvas = canvas
canvas.grid(row=0, column=0, sticky=N+S+W+E)
frame_with_table = Frame(canvas)
self.frame_with_table = frame_with_table
frame_with_table.grid(sticky=N+S+W+E, pady=15, padx=3)
for i in range(2, self.nb_rows + 2):
cols = []
for j in range(1, self.nb_cols + 1):
ent = self.create_entry_widget(frame_with_table)
ent.grid(row=i, column=j, sticky=N+S+E+W)
cols.append(ent)
self.cells.append(cols)
canvas.create_window(0, 0, window=frame_with_table, anchor='nw')
canvas.update_idletasks()
yScrollbar['command'] = canvas.yview
xScrollbar['command'] = canvas.xview
self._update_scroll_region()
MouseWheel(self).add_scrolling(canvas, yscrollbar=yScrollbar)
def create_entry_widget(self, parent):
''' Creates Entry widget.
Args:
parent (Tk object): parent of the Entry widget.
Returns:
Entry: created Entry widget.
'''
return Entry(parent, width=CELL_WIDTH)
def add_row(self):
''' Adds one row to the end of the table.
'''
self.cells.append([])
for j in range(self.nb_cols):
grid_row_index = self.nb_rows + 2
ent = self.create_entry_widget(self.frame_with_table)
ent.grid(row=grid_row_index, column=j + 1, sticky=N+S+E+W)
self.cells[self.nb_rows].append(ent)
self.nb_rows += 1
self._update_scroll_region()
def add_column(self):
''' Adds one column to the end of the table.
'''
for i in range(self.nb_rows):
grid_row_index = i + 2
ent = self.create_entry_widget(self.frame_with_table)
ent.grid(row=grid_row_index, column=self.nb_cols + 1,
sticky=N+S+E+W)
self.cells[i].append(ent)
self.nb_cols += 1
self._update_scroll_region()
def remove_row(self, row_index):
''' Removes row with a specified index from the table.
If row_index is zero or larger than the total number of rows,
no row is removed.
Args:
row_index (int): index of the row to remove.
Returns:
bool: True if row was deleted, False otherwise.
'''
# forbid deleting first row
if self.should_remove_row(row_index):
for j in range(self.nb_cols):
self.before_cell_destroy(self.cells[row_index][j])
self.cells[row_index][j].destroy()
for i in range(row_index + 1, self.nb_rows):
self.cells[i][j].grid_remove()
self.cells[i][j].grid(row=i + 1)
self.cells.remove(self.cells[row_index])
self.nb_rows -= 1
self._update_scroll_region()
return True
return False
def should_remove_row(self, row_index):
''' Checks if row with a specified row index can be removed.
Args:
row_index (int): index of the row to remove.
Returns:
bool: True if row_index is >= 1 and < total number of rows,
False otherwise.
'''
return row_index >= 1 and row_index < self.nb_rows
def remove_column(self, column_index):
''' Removes column with a specified index from the table.
If column index is zero or larger than the total number of
columns of the table, no column is removed.
Args:
column_index (int): index of the column to remove.
Returns:
bool: True if column was removed, False otherwise.
'''
# do not allow to delete first column
if column_index > 0 and column_index < self.nb_cols:
for i in range(self.nb_rows):
self.cells[i][column_index].destroy()
for j in range(column_index + 1, self.nb_cols):
self.cells[i][j].grid_remove()
self.cells[i][j].grid(column=j)
self.cells[i].remove(self.cells[i][column_index])
self.nb_cols -= 1
self._update_scroll_region()
return True
return False
def before_cell_destroy(self, cell):
''' This method is called before a table cell is destroyed.
In this class this method does nothing, but can be redefined
in children classes.
Args:
cell (Entry): cell that will be destroyed after call to
this method.
'''
pass
def clear_all_data(self):
''' Clears all data from all cells.
'''
for i in range(self.nb_rows):
for j in range(self.nb_cols):
self.before_cell_clear(self.cells[i][j])
self.cells[i][j].delete(0, END)
def before_cell_clear(self, cell):
''' This method is called before data is cleared from a given cell.
In this class this method does nothing, but can be redefined
in children classes.
Args:
cell (Entry): cell that will be cleared after call
to this method.
'''
pass
def _update_scroll_region(self):
''' Updates scroll region. This method must be called each
time table size or number of columns or rows change.
'''
# ensures that bbox will calculate border correctly
self.frame_with_table.update()
on_canvas_resize(self.canvas)
def read_coefficients(self):
''' Converts data stored as a list to a proper dictionary
necessary for constructing data instance.
'''
return convert_to_dictionary(self.data, self.check_value)
def check_value(self, count):
''' This method is called in read_coefficients method to check what
values must be returned for data instance construction.
In this class it always returns True and can be redefined in
children classes.
'''
return True
class TableFrameWithInputOutputBox(TableFrame):
''' Extends TableFrame with extra functionality necessary for data
modification and choosing input and output categories.
Attributes:
params_frame (ParamsFrame): frame with parameters, this
class communicates
with params_frame when data is loaded or modified.
combobox_text_var (StringVar): StringVar object that stores
categorical category.
panel_text_observer (PanelTextObserver): observer that adds star to
label frame of the parent of this widget.
This class notifies panel_text_observer
when data was modified.
frames (list of Frame): list of frames that hold Checkbuttons for
choosing input and output categories.
row_checkboxes (list of Checkbutton): list of Checkbuttons used
for removing rows.
col_checkboxes (list of Checkbutton): list of Checkbuttons used
for removing columns.
current_categories (list of str): list of current valid categories.
This class might modify this list.
str_var_for_input_output_boxes (StringVar): StringVar object that
is used for communication
with ParamsFrame. If the content of
str_var_for_input_output_boxes was modified,
it means that data was loaded from parameters file
and input and output categories
must be checked depending on parameters file.
data (list of list of str or float): input data, it might
be modified by this class.
Args:
parent (Tk object): parent of this widget.
params_frame (ParamsFrame): frame with parameters, this class
communicates
with params_frame when data is loaded or modified.
combobox_text_var (StringVar): StringVar object that stores
categorical category.
current_categories (list of str): list of current valid categories.
This class might modify this list.
str_var_for_input_output_boxes (StringVar): StringVar object
that is used for communication
with ParamsFrame. If the content of
str_var_for_input_output_boxes was modified,
it means that data was loaded from parameters file and input
and output categories
must be checked depending on parameters file.
if_text_modified_str (StringVar): StringVar object that is used
by PanelTextObserver, its content is modified when data
was modified.
data (list of list of str or float): input data, it might be
modified by this class.
nb_rows (int, optional): number of rows of the table, defaults
to 20.
nb_cols (int, optional): number of columns of the table,
defaults to 5.
'''
def __init__(self, parent, params_frame,
combobox_text_var, current_categories,
str_var_for_input_output_boxes,
if_text_modified_str, data,
nb_rows=20, nb_cols=5):
self.params_frame = params_frame
self.combobox_text_var = combobox_text_var
self.panel_text_observer = PanelTextObserver(if_text_modified_str)
self.frames = []
self.row_checkboxes = []
self.col_checkboxes = []
self.current_categories = current_categories
self.str_var_for_input_output_boxes = str_var_for_input_output_boxes
self.str_var_for_input_output_boxes.trace('w', self.on_load_categories)
super().__init__(parent, data, nb_rows, nb_cols)
def create_widgets(self):
''' Creates widgets of this class.
'''
super().create_widgets()
for column_index in range(self.nb_cols - 1):
self._create_input_output_box(column_index)
for row_index in range(self.nb_rows):
self.add_row_check_box(row_index)
# add observers to add * in the first column
for row_index in range(self.nb_rows):
self.cells[row_index][0].panel_text_observer = self.panel_text_observer
def create_entry_widget(self, parent):
''' Creates SelfValidatingEntry widget.
Args:
parent (Tk object): parent of the SelfValidatingEntry widget.
Returns:
SelfValidatingEntry: created SelfValidatingEntry widget.
'''
return SelfValidatingEntry(parent, self.data, self.cells, width=CELL_WIDTH)
def deselect_all_boxes(self):
''' Deselects all Checkbuttons used for choosing input and
output categories.
'''
for frame in self.frames:
for child in frame.winfo_children():
child.deselect()
def _create_input_output_box(self, column_index):
''' Creates Checkbuttons used for choosing input and output categories.
Args:
column_index (int): index of a column for which
Checkbuttons must be created.
'''
frame_for_btns = Frame(self.frame_with_table)
self.frames.append(frame_for_btns)
input_var = IntVar()
output_var = IntVar()
input_btn = ObserverCheckbutton(
frame_for_btns, input_var, output_var,
self.params_frame.input_categories_frame,
self.params_frame.output_categories_frame,
self.current_categories, self.cells, INPUT_OBSERVER,
self.params_frame.change_category_name,
self.data, self.combobox_text_var,
text='Input', state=DISABLED)
input_btn.grid(row=1, column=0, sticky=N+W)
output_btn = FollowingObserverCheckbutton(
frame_for_btns, output_var, input_var,
self.params_frame.output_categories_frame,
self.params_frame.input_categories_frame,
self.current_categories, self.cells, OUTPUT_OBSERVER,
self.params_frame.change_category_name,
self.data, self.combobox_text_var, input_btn,
text='Output', state=DISABLED)
output_btn.grid(row=2, column=0, sticky=N+W)
self._add_observers(input_btn, output_btn, column_index + 1)
var = IntVar()
column_checkbox = CheckbuttonWithVar(frame_for_btns, var)
column_checkbox.grid(row=0, column=0)
self.col_checkboxes.append((column_checkbox, var))
frame_for_btns.grid(row=1, column=column_index + 2, sticky=N)
def _add_observers(self, input_btn, output_btn, column_index):
''' Adds observers to newly created cells in a given column.
Args:
input_btn (ObserverCheckbutton): observer used to select
input categories.
output_btn (FollowingObserverCheckbutton): observer used
to select output categories.
column_index (int): index of the column to cells of
which observers must be added.
'''
names_modifier = DefaultCategoriesAndDMUModifier(
self.cells, self.current_categories)
for row_index in range(self.nb_rows):
self._add_observers_to_cell(self.cells[row_index][column_index],
names_modifier, input_btn, output_btn)
def _add_observers_to_cell(self, cell, names_modifier, input_btn,
output_btn):
''' Adds given observers to a given cell.
Args:
cell (SelfValidatingEntry): cell where observers must be added.
names_modifier (DefaultCategoriesAndDMUModifier): observer,
for details see DefaultCategoriesAndDMUModifier.
input_btn (ObserverCheckbutton): observer used to select
input categories.
output_btn (FollowingObserverCheckbutton): observer used to
select output categories.
'''
cell.observers.append(names_modifier) # IMPORTANT:
# this observer MUST be added first, it modifies data that
# is used by other observers!
cell.observers.append(input_btn)
cell.observers.append(output_btn)
cell.panel_text_observer = self.panel_text_observer
def on_load_categories(self, *args):
''' Selects input and output categories when data is loaded from
parameters file. Args are provided by the StringVar trace
methods and are ignored in this method.
'''
for frame in self.frames:
for child in frame.winfo_children():
try:
category = child.get_category()
except AttributeError:
pass
else:
if (child.observer_type == INPUT_OBSERVER and
child.get_category() in
self.str_var_for_input_output_boxes.input_categories):
child.select()
if (child.observer_type == OUTPUT_OBSERVER and
child.get_category() in
self.str_var_for_input_output_boxes.output_categories):
child.select()
def add_row_check_box(self, row_index):
''' Adds Checkbutton used for removing rows to a given row.
Args:
row_index (int): index of row to which Checkbutton
must be added.
'''
if row_index >= 1:
var = IntVar()
row_checkbox = Checkbutton(self.frame_with_table, variable=var)
self.row_checkboxes.append((row_checkbox, var))
row_checkbox.grid(row=row_index + 2, column=0)
else:
self.row_checkboxes.append((None, None))
def add_column(self):
''' Adds one column to the end of table.
'''
super().add_column()
self._create_input_output_box(self.nb_cols - 2)
def add_row(self):
''' Adds one row to the end of table.
Note: When data is spread across several pages, addition of
row must also update the display of data.
This functionality is implemented in TableModifierFrame.
'''
super().add_row()
self.add_row_check_box(self.nb_rows - 1)
names_modifier = DefaultCategoriesAndDMUModifier(
self.cells, self.current_categories)
for col in range(1, self.nb_cols):
input_btn, output_btn = self.get_check_boxes(col - 1)
self._add_observers_to_cell(self.cells[self.nb_rows - 1][col],
names_modifier,
input_btn, output_btn)
def get_check_boxes(self, column_index):
''' Gets Checkbuttons used for selecting input and output categories
for a given column.
Args:
column_index (int): index of the column for which Checkbuttons
must be returned.
Returns:
tuple of ObserverCheckbutton, FollowingObserverCheckbutton:
tuple of observers
or None, None if no observers were found.
'''
if column_index < 0 or column_index >= len(self.frames):
return None, None
input_btn = None
output_btn = None
for child in self.frames[column_index].winfo_children():
try:
observer_type = child.observer_type
except AttributeError:
pass
else:
if observer_type == INPUT_OBSERVER:
input_btn = child
elif observer_type == OUTPUT_OBSERVER:
output_btn = child
return input_btn, output_btn
def remove_column(self, column_index):
''' Removes column with a specified index from the table.
If column index is zero or larger than the total number of columns
of the table, no column is removed.
Args:
column_index (int): index of the column to remove.
Returns:
bool: True if column was removed, False otherwise.
'''
# we must record category name before removing column,
# because it will disappear
if column_index < len(self.cells[0]):
category_name = self.cells[0][column_index].get().strip()
else:
category_name = ''
if super().remove_column(column_index):
col = column_index - 1
if category_name:
self.params_frame.input_categories_frame.remove_category(
category_name)
self.params_frame.output_categories_frame.remove_category(
category_name)
if col < len(self.current_categories):
self.current_categories[col] = ''
# remove from data only if category is present
if self.data:
column_with_data_removed = False
for row_index in range(len(self.data)):
if column_index < len(self.data[row_index]):
self.data[row_index].pop(column_index)
column_with_data_removed = True
if column_with_data_removed:
for row in range(1, self.nb_rows):
for j in range(column_index, self.nb_cols):
self.cells[row][j].data_column -= 1
self.panel_text_observer.change_state_if_needed()
self.frames[col].destroy()
for i in range(col + 1, len(self.frames)):
self.frames[i].grid_remove()
self.frames[i].grid(column=i + 1)
self.frames.pop(col)
self.col_checkboxes.pop(col)
return True
return False
def remove_row(self, row_index):
''' Removes data row with a specified index from the table.
Row is not physically removed.
If row_index is zero or larger than the total number of rows,
no row is removed.
Args:
row_index (int): index of the row to remove.
Returns:
bool: True if row was deleted, False otherwise.
'''
if self.should_remove_row(row_index):
if self.data:
nb_pages = calculate_nb_pages(len(self.data), self.nb_rows)
data_index = self.get_data_index(row_index)
nb_cols = len(self.cells[row_index])
if data_index != -1 and data_index < len(self.data):
nb_rows_to_change = min(self.nb_rows, len(self.data) + 1)
self.data.pop(data_index)
for row in range(row_index + 1, nb_rows_to_change):
for col in range(0, nb_cols):
if self.cells[row][col].data_row != -1:
self.cells[row][col].data_row -= 1
self.panel_text_observer.change_state_if_needed()
super().remove_row(row_index)
if (nb_pages > 1):
self.add_row()
else:
super().remove_row(row_index)
self.row_checkboxes[row_index][0].destroy()
for i in range(row_index + 1, len(self.row_checkboxes)):
self.row_checkboxes[i][0].grid_remove()
self.row_checkboxes[i][0].grid(row=i + 1)
self.row_checkboxes.pop(row_index)
return True
return False
def get_data_index(self, row_index):
for j in range(0, len(self.cells[row_index])):
if self.cells[row_index][j].data_row != -1:
return self.cells[row_index][j].data_row
return -1
def before_cell_destroy(self, cell):
''' This method is called before a table cell is destroyed.
Notifies observers if data is not empty.
Args:
cell (SelfValidatingEntry): cell that will be destroyed
after call to this method.
'''
info = cell.grid_info()
col = int(info['column'])
row = int(info['row'])
if len(self.data) == 0:
cell.notify_observers(CELL_DESTROY, row, col)
def load_visible_data(self):
''' Displays data in the table. First, it adds more rows to fill
the frame, second, it displays data that fits the table.
'''
self.add_rows_to_fill_visible_frame()
self.display_data()
def display_data(self, start_row=0):
''' Displays data starting from a given data row.
This method is usually called by NavigationForTableFrame when
data spans across
several pages and users clicks on page navigation buttons.
Args:
start_row (int, optional): index of input data starting
from which data should be displayed, defaults to 0.
'''
nb_data_rows = len(self.data)
nb_displayed_rows = 0
for row_index in range(start_row, nb_data_rows):
values = self.data[row_index]
# do not insert data that is not visible
if nb_displayed_rows + 1 >= self.nb_rows:
return
for column_index, coeff in enumerate(values):
# row_index + 1 - first row has categories
self._display_one_cell(nb_displayed_rows, column_index,
coeff, row_index,
column_index, False)
row_index += 1
nb_displayed_rows += 1
if len(self.data) > 0:
nb_cols = len(self.data[0])
else:
nb_cols = self.nb_cols
nb_rows = self.nb_rows - 1 # -1 because we add +1 to row_index
while nb_displayed_rows < nb_rows:
for column_index in range(nb_cols):
self._display_one_cell(nb_displayed_rows, column_index, '',
-1, -1, False)
nb_displayed_rows += 1
def _display_one_cell(self, row_index, column_index, value_to_dispay,
data_row, data_col, modify_data=True):
''' Displays data in a cell and sets cell's fields to proper values.
Args:
row_index (int): index of a row where the cell is.
column_index (int): index of a column where the cell is.
value_to_dispay (str): new cell value_to_dispay.
data_row (int): row index of input data.
data_col (int): column index of input data.
modify_data (bool, optional): True if data was modified and
observers
must be notified, False otherwise.
'''
cell_row_index = row_index + 1
self.cells[cell_row_index][column_index].modify_data = modify_data
self.cells[cell_row_index][column_index].text_value.set(value_to_dispay)
self.cells[cell_row_index][column_index].data_row = data_row
self.cells[cell_row_index][column_index].data_column = data_col
def add_rows_to_fill_visible_frame(self):
''' Adds rows to table to fill the frame. Usually adds a bit more and
scroll gets activated.
Exact number of added rows depends on operating system, height of
widgets and screen size.
'''
self.canvas.update_idletasks()
frame_height = self.canvas.winfo_height()
while self.canvas.bbox(ALL)[3] <= frame_height - 20:
self.add_row()
self._update_scroll_region()
def check_value(self, count):
''' This method is called in read_coefficients method to check what
values must be returned for data instance construction.
Args:
count (int): data column index.
Returns:
bool: True if the category in the given column index is not
an empty string,
False otherwise.
'''
if self.current_categories[count]:
return True
return False
def clear_all_data(self):
''' Clears all data from all cells and clears input data.
'''
self.data.clear()
super().clear_all_data()
self.current_categories.clear()
# reset modify data back to true
for cell_row in self.cells:
for cell in cell_row:
cell.modify_data = True
def before_cell_clear(self, cell):
''' This method is called before data is cleared from a given cell.
It sets fields of the given cell to initial values.
Args:
cell (SelfValidatingEntry): cell that will be cleared after
call to this method.
'''
cell.modify_data = False
cell.data_row = -1
cell.data_column = -1
class ObserverCheckbutton(Checkbutton):
''' This class implements Checkbutton for choosing input/output categories.
Attributes:
var (IntVar): variable that is set to 1 when Checkbutton is
selected, to 0 otherwise.
opposite_var (IntVar): variable of the other Checkbutton that
must deselected if this Checkbutton is selected.
parent (Tk object): frame that holds this Checkbutton.
Warning:
it is important for the parent to be gridded in the
same column
as the entire column of table entries is gridded, because
this class uses parent grid column index to determine
the column where the category name can be read from.
category_frame (CategoriesCheckBox): frame that displays selected
input or output categories.
Note:
if this Checkbutton is used to select input categories,
category_frame must be CategoriesCheckBox object that
displays selected input categories.
if this Checkbutton is used to select output categories,
category_frame must be CategoriesCheckBox object that
displays selected output categories.
opposite_category_frame (CategoriesCheckBox): frame that displays
selected input or output categories. If category_frame
displays input categories, then opposite_category_frame
must display output categories, and vice versa.
current_categories (list of str): list of categories. This class
might modify this list by removing invalid categories and
adding the valid ones.
cells (list of list of SelfValidatingEntry): all entry widgets
collected in list.
data (list of list of str or float): input data.
observer_type (int): describes type of the observer, for possible
values see dea_utils.
change_category_name (callable function): this function is
called when name of a category was changed.
combobox_text_var (StringVar): variable of the combobox used for
selecting categorical category.
Arguments are the same as attributes.
'''
def __init__(self, parent, var, opposite_var, category_frame,
opposite_category_frame,
current_categories, cells,
observer_type, change_category_name, data,
combobox_text_var, *args, **kw):
Checkbutton.__init__(self, parent, variable=var,
command=self._process, *args, **kw)
self.var = var
self.opposite_var = opposite_var
self.parent = parent
self.category_frame = category_frame
self.opposite_category_frame = opposite_category_frame
self.current_categories = current_categories
self.cells = cells
self.data = data
self.observer_type = observer_type
self.change_category_name = change_category_name
self.combobox_text_var = combobox_text_var
def _process(self):
''' This method is called when user clicks on Checkbutton.
Makes sure that the same category can be only input or only
output, but not both, and that selected category cannot also
be selected as a categorical category.
'''
category_name = self.get_category()
if self.var.get() == 1:
self.opposite_var.set(0)
if category_name:
self.category_frame.add_category(category_name)
self.opposite_category_frame.remove_category(category_name)
if category_name == self.combobox_text_var.get():
self.combobox_text_var.set('')
elif category_name:
self.category_frame.remove_category(category_name)
def deselect(self):
''' Deselects Checkbutton.
Note:
method _process() is not called in this case.
'''
self.var.set(0)
def select(self):
''' Selects Checkbutton.
Note:
method _process() is not called in this case.
'''
self.var.set(1)
def change_state_if_needed(self, entry, entry_state, row, col):
''' Changes state of Checkbutton when data or categories were modified.
Also modifies current_categories if needed.
This widget becomes disabled if invalid category name value or input
data value were provided by user.
Args:
entry (SelfValidatingEntry): Entry widget whose content was
modified.
entry_state (int): state of the Entry widget after content
modification, for possible values see dea_utils module.
row (int): row index of entry widget. It is the real grid value,
we need to subtract 2 to get internal index.
col (int): column index of entry widget. It is the real grid
value, we need to subtract 2 to get internal index.
'''
if entry_state == CHANGE_CATEGORY_NAME:
old_name = ''
internal_col = col - 2
if internal_col < len(self.current_categories):
old_name = self.current_categories[internal_col]
category_name = validate_category_name(
self.cells[0][col - 1].text_value.get().strip(),
internal_col, self.current_categories)
if category_name:
index = len(self.current_categories)
while index <= internal_col:
self.current_categories.append('')
index += 1
self.current_categories[internal_col] = category_name
if old_name:
# change category name in params_frame
self.change_category_name(old_name.strip(), category_name)
self.change_state_based_on_data(entry, entry_state, row, col)
entry.config(foreground='black')
else:
# if category name is empty, disable
self.disable(internal_col, old_name)
entry.config(foreground='red')
else:
self.change_state_based_on_data(entry, entry_state, row, col)
def change_state_based_on_data(self, entry, entry_state, row, col):
''' Changes state of Checkbutton when data was modified.
Args:
entry (SelfValidatingEntry): Entry widget whose content
was modified.
entry_state (int): state of the Entry widget after content
modification, for possible values see dea_utils module.
row (int): row index of entry widget. It is the real grid
value, we need to subtract 2 to get internal index.
col (int): column index of entry widget. It is the real grid
value, we need to subtract 2 to get internal index.
'''
internal_col = col - 2
# IMPORTANT: read from cells, not from current_categories, they might
# be empty at this stage
category_name = self.cells[0][col - 1].text_value.get().strip()
nb_rows = len(self.data)
if nb_rows == 0:
self.disable(internal_col, category_name)
return
elif len(self.data[0]) == 0:
self.disable(internal_col, category_name)
return
has_one_valid_entry = False
for row_index in range(nb_rows):
# can happen if some values are empty
while col - 1 >= len(self.data[row_index]):
self.data[row_index].append('')
try:
# col - 1 - first column contains DMU names
data_elem = float(self.data[row_index][col - 1])
except ValueError:
state = NOT_VALID_COEFF
else:
state = is_valid_coeff(data_elem)
if state == NOT_VALID_COEFF:
has_one_valid_entry = False
self.disable(internal_col, category_name)
return
elif state == VALID_COEFF or state == WARNING_COEFF:
has_one_valid_entry = True
if has_one_valid_entry:
self.config(state=NORMAL)
if category_name:
if category_name not in self.current_categories:
assert internal_col < len(self.current_categories)
self.current_categories[internal_col] = category_name
if entry_state != CELL_DESTROY and self.var.get() == 1:
self.category_frame.add_category(category_name)
return
def disable(self, internal_col, category_name):
''' Disables Checkbutton.
Args:
internal_col (int): internal column index.
category_name (str): name of category.
'''
self.config(state=DISABLED)
if category_name:
if self.var.get() == 1:
self.category_frame.remove_category(category_name)
if self.opposite_var.get() == 1:
self.opposite_category_frame.remove_category(category_name)
if category_name in self.current_categories:
assert(internal_col < len(self.current_categories))
self.current_categories[internal_col] = ''
if category_name == self.combobox_text_var.get():
self.combobox_text_var.set('')
def get_category(self):
''' Finds category name stored in the corresponding Entry widget
based on where parent of Checkbutton was gridded.
Returns:
str: category name, might be empty string.
'''
info = self.parent.grid_info()
# convertion to int is necessary for Windows
# for some reason in Windows grid info is stored as str
col = int(info['column'])
return self.cells[0][col - 1].text_value.get().strip()
class FollowingObserverCheckbutton(ObserverCheckbutton):
''' This class follows state of another ObserverCheckbutton that is
used to select input or output categories.
This class is used in order to skip checking if data is valid
second time. The first Checkbutton has already performed this check.
Attributes:
var (IntVar): variable that is set to 1 when Checkbutton
is selected, to 0 otherwise.
opposite_var (IntVar): variable of the other Checkbutton that
must deselected if this Checkbutton is selected.
parent (Tk object): frame that holds this Checkbutton.
Warning:
it is important for the parent to be gridded in the
same column as the entire column of table entries
is gridded, because this class uses parent grid column
index to determine the column
where the category name can be read from.
category_frame (CategoriesCheckBox): frame that displays
selected input or output categories.
Note:
if this Checkbutton is used to select input categories,
category_frame must be CategoriesCheckBox object that
displays selected input categories.
if this Checkbutton is used to select output categories,
category_frame
must be CategoriesCheckBox object that displays selected
output categories.
opposite_category_frame (CategoriesCheckBox): frame that displays
selected input or output categories. If category_frame displays
input categories, then opposite_category_frame
must display output categories, and vice versa.
current_categories (list of str): list of categories. This class
might modify this list by removing invalid categories and
adding the valid ones.
cells (list of list of SelfValidatingEntry): all entry widgets
collected in list.
data (list of list of str or float): input data.
observer_type (int): describes type of the observer, for
possible values see dea_utils.
change_category_name (callable function): this function is called
when name of a category was changed.
combobox_text_var (StringVar): variable of the combobox used for
selecting categorical category.
main_box (ObserverCheckbutton): Checkbutton that changes state
first. This Checkbutton changes its state to the same state
as main_box, but does not do extra things
that have been already performed by main_box
(changes to current_categories, for example).
'''
def __init__(self, parent, var, opposite_var, category_frame,
opposite_category_frame,
current_categories, cells,
observer_type, params_frame, data,
combobox_text_var, main_box, *args, **kw):
super().__init__(parent, var, opposite_var, category_frame,
opposite_category_frame, current_categories, cells,
observer_type, params_frame, data,
combobox_text_var, *args, **kw)
self.main_box = main_box
def change_state_if_needed(self, entry, entry_state, row, col):
''' Changes state of Checkbutton when data was modified depending on
the state of main_box.
Args:
entry (SelfValidatingEntry): Entry widget whose content
was modified.
entry_state (int): state of the Entry widget after content
modification, for possible values see dea_utils module.
row (int): row index of entry widget. It is the real grid
value, we need to subtract 2 to get internal index.
col (int): column index of entry widget. It is the real grid
value, we need to subtract 2 to get internal index.
'''
category_name = self.get_category()
if str(self.main_box.cget('state')) == DISABLED:
self.disable(col - 2, category_name)
else:
self.config(state=NORMAL)
if entry_state != CELL_DESTROY and self.var.get() == 1:
self.category_frame.add_category(category_name)
class DefaultCategoriesAndDMUModifier(object):
''' This class is responsible for adding automatic category and DMU names
if user starts typing data without providing such names first.
Attributes:
cells (list of list of SelfValidatingEntry): list of all Entry
widgets with data.
current_categories (list of str): list of categories.
Args:
cells (list of list of SelfValidatingEntry): list of all Entry
widgets with data.
current_categories (list of str): list of categories.
'''
def __init__(self, cells, current_categories):
self.cells = cells
self.current_categories = current_categories
def change_state_if_needed(self, entry, entry_state, row, col):
''' Writes automatic category and DMU names if they were not
specified before.
Args:
entry (SelfValidatingEntry): Entry widget the content
of which was modified.
entry_state (int): constant that describes entry state,
for details see dea_utils module.
row (int): row index of entry widget. It is the real grid value,
we need to subtract 2 to get internal index.
col (int): column index of entry widget. It is the real grid
value, we need to subtract 2 to get internal index.
'''
if (entry_state != EMPTY_COEFF and entry_state != CELL_DESTROY and
entry_state != CHANGE_CATEGORY_NAME):
internal_row_index = row - 2
dmu_name = self.cells[internal_row_index][0].text_value.get().strip()
if not dmu_name:
self.cells[internal_row_index][0].text_value.set(
'DMU{0}'.format(internal_row_index))
category_name = self.cells[0][col - 1].text_value.get().strip()
if not category_name:
internal_col_index = col - 2
name = 'Category{0}'.format(internal_col_index)
if internal_col_index >= len(self.current_categories):
index = len(self.current_categories) - 1
while index != internal_col_index:
self.current_categories.append('')
index += 1
# category name MUST be written first, because next line calls
# ObserverCheckbutton
self.cells[0][col - 1].text_value.set(name)
class SelfValidatingEntry(Entry):
''' This class implement Entry widget that knows how to highlight
invalid data. It also notifies other widgets if the content of
Entry changes. Other widgets must implement method
change_state_if_needed().
Such widgets should be appended to the list of listening widgets
called observers.
Attributes:
text_value (StringVar): textvariable of Entry widget that
calls method on_text_changed when the content on Entry changes.
observers (list of objects that implement method change_state_if_needed):
list of widgets or other objects that must be notified if the
content of Entry changes.
data_row (int): row index in data table which should be modified
when the content of Entry changes.
data_column (int): column index in data table which should be
modified when the content of Entry changes.
data (list of list of srt or float): data that will be modified.
modify_data (bool): True if data should be modified, False
otherwise. It is usually set to False when data is uploaded
from file.
panel_text_observer (PanelTextObserver): object that is notified
when data changes.
This object is responsible for adding star to file name when
data was modified.
all_cells (list of list of SelfValidatingEntry): refernce where all cells
are stored.
Warning: all cells must be created before any cell content
can be modified.
Args:
parent (Tk object): parent of this Entry widget.
data (list of list of srt or float): input data that will
be modified.
all_cells (list of list of SelfValidatingEntry): refernce where all cells
are stored.
Warning: all cells must be created before any cell content
can be modified.
'''
def __init__(self, parent, data, all_cells, *args, **kw):
self.text_value = StringVar(master=parent)
self.text_value.trace("w", self.on_text_changed)
super().__init__(parent, *args, **kw)
self.config(textvariable=self.text_value)
self.observers = []
self.all_cells = all_cells
self.data_row = -1
self.data_column = -1
self.data = data
self.modify_data = True
self.panel_text_observer = None
def on_text_changed(self, *args):
''' This method is called each time the content of Entry is modified.
It highlights invalid data, changes data if needed and notifies
other objects when data was changed.
Args are provided by StringVar trace method, but are not used.
'''
info = self.grid_info()
# phisical grid indeces
col = int(info['column'])
row = int(info['row'])
self.notify_panel_observer()
if row == 2: # possibly name of category is modified
self.notify_observers(CHANGE_CATEGORY_NAME, row, col)
elif col == 1 and row > 2: # column with DMU names, strings are allowed
self.modify_data_if_needed(row, col)
elif col > 1 and row > 2: # everything left
self.modify_data_if_needed(row, col)
try:
value = float(self.text_value.get().strip())
except ValueError:
self.modify_data = True
self.config(foreground='red')
if len(self.text_value.get().strip()) == 0:
self.notify_observers(EMPTY_COEFF, row, col)
else:
self.notify_observers(NOT_VALID_COEFF, row, col)
return
text_status = is_valid_coeff(value)
if text_status == NOT_VALID_COEFF:
self.config(foreground='red')
elif text_status == WARNING_COEFF:
self.config(foreground='orange')
else:
self.config(foreground='black')
self.notify_observers(text_status, row, col)
self.modify_data = True
def modify_data_if_needed(self, row, col):
''' Modifies data if modify_data is set to True.
Adds empty strings to data when user modifies Entry for which
data_row or/and data_column are equal to -1. Updates data with new
values entered by user.
Args:
row (int): row where Entry is gridded
col (int): column where Entry is gridded
'''
if self.modify_data:
if self.data_row != -1 and self.data_column != -1:
self.data[self.data_row][self.data_column] = self.text_value.get().strip()
else:
row_for_data = len(self.data)
added_rows = False
# -2 because row is physical grid index, not cell index
row_count = len(self.all_cells) - 1
for cells_row in reversed(self.all_cells):
if cells_row[0].data_row != -1:
break
row_count -= 1
if row_count == -1:
row_count = 0
while row_count < row - 2:
self.data.append([])
added_rows = True
row_count += 1
if added_rows:
self.data_row = len(self.data) - 1
else:
assert row_count >= row - 2
self.data_row = len(self.data) - 1 - (row_count - (row - 2))
col_for_data = len(self.data[self.data_row])
added_cols = False
max_nb_col = 0
nb_rows = len(self.data)
for r_ind in range(nb_rows):
row_len = len(self.data[r_ind])
if row_len > max_nb_col:
max_nb_col = row_len
max_nb_col = max(max_nb_col, col)
c_ind = col_for_data
while c_ind < max_nb_col:
self.data[self.data_row].append('')
grid_col = len(self.data[self.data_row])
self.all_cells[row - 2][grid_col - 1].data_row = self.data_row
self.all_cells[row - 2][grid_col - 1].data_column = c_ind
self.notify_observers(EMPTY_COEFF, row, grid_col)
added_cols = True
c_ind += 1
if (col_for_data < col):
col_for_data += 1
if added_cols:
for r_ind in range(nb_rows):
while len(self.data[r_ind]) < max_nb_col:
self.data[r_ind].append('')
grid_col = len(self.data[r_ind])
if r_ind >= self.data_row - (row - 3): # 3 is the first physical
# row with data on the page
grid_row = row - (self.data_row - r_ind)
self.all_cells[grid_row - 2][grid_col - 1].data_row = r_ind
self.all_cells[grid_row - 2][grid_col - 1].data_column = grid_col - 1
self.notify_observers(EMPTY_COEFF, grid_row, grid_col)
self.data_column = col_for_data - 1
else:
self.data_column = col - 1
self.data[self.data_row][self.data_column] = self.text_value.get().strip()
def notify_panel_observer(self):
''' Notifies panel observer that data was modified.
'''
if self.panel_text_observer is not None and self.modify_data is True:
self.panel_text_observer.change_state_if_needed()
def notify_observers(self, entry_state, row, col):
''' Notifies all observers stored in list of observers that data
was modified.
Args:
entry_state (int): state of the Entry widget that describes if
data is valid after modification, for possible values see
dea_utils module.
row (int): row where Entry is gridded.
col (int): column where Entry is gridded.
'''
for observer in self.observers:
observer.change_state_if_needed(self, entry_state, row, col)
class PanelTextObserver(object):
''' This class changes StringVar value that is traced in other classes.
Attributes:
if_text_modified_str (StringVar): StringVar object that
changes value when this observer is notified.
'''
def __init__(self, if_text_modified_str):
self.if_text_modified_str = if_text_modified_str
def change_state_if_needed(self):
''' Changes value of internal StringVar object.
'''
self.if_text_modified_str.set('*')
class CheckbuttonWithVar(Checkbutton):
''' Custom Checkbutton widget that provides deselect method.
Attributes:
var (IntVar): 0 if not selected, 1 otherwise.
Args:
parent (Tk object): parent of this widget.
var (IntVar): variable that controls if Checkbutton is selected.
'''
def __init__(self, parent, var, *args, **kw):
super().__init__(parent, variable=var, *args, **kw)
self.var = var
def deselect(self):
''' Deselects Checkbutton.
'''
self.var.set(0)
| mit | -219,998,321,544,119,700 | 42.22571 | 101 | 0.564147 | false |
sbesson/zeroc-ice | py/test/Ice/admin/Client.py | 1 | 1029 | #!/usr/bin/env python
# **********************************************************************
#
# Copyright (c) 2003-2013 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# **********************************************************************
import os, sys, traceback
import Ice
slice_dir = Ice.getSliceDir()
if not slice_dir:
print(sys.argv[0] + ': Slice directory not found.')
sys.exit(1)
Ice.loadSlice("'-I" + slice_dir + "' Test.ice")
import AllTests
def test(b):
if not b:
raise RuntimeError('test assertion failed')
def run(args, communicator):
AllTests.allTests(communicator)
return True
try:
communicator = Ice.initialize(sys.argv)
status = run(sys.argv, communicator)
except:
traceback.print_exc()
status = False
if communicator:
try:
communicator.destroy()
except:
traceback.print_exc()
status = False
sys.exit(not status)
| gpl-2.0 | 4,657,194,139,776,982,000 | 22.386364 | 72 | 0.578231 | false |
pydanny/dj-stripe | tests/test_event.py | 1 | 7576 | """
dj-stripe Event Model Tests.
"""
from copy import deepcopy
from unittest.mock import patch
from django.contrib.auth import get_user_model
from django.test import TestCase
from stripe.error import StripeError
from djstripe import webhooks
from djstripe.models import Event, Transfer
from . import FAKE_CUSTOMER, FAKE_EVENT_TRANSFER_CREATED, FAKE_TRANSFER
class EventTest(TestCase):
def setUp(self):
self.user = get_user_model().objects.create_user(
username="pydanny", email="[email protected]"
)
self.customer = FAKE_CUSTOMER.create_for_user(self.user)
patcher = patch.object(webhooks, "call_handlers")
self.addCleanup(patcher.stop)
self.call_handlers = patcher.start()
def test_str(self):
event = self._create_event(FAKE_EVENT_TRANSFER_CREATED)
self.assertEqual(
"<type={type}, id={id}>".format(
type=FAKE_EVENT_TRANSFER_CREATED["type"],
id=FAKE_EVENT_TRANSFER_CREATED["id"],
),
str(event),
)
def test_invoke_webhook_handlers_event_with_log_stripe_error(self):
event = self._create_event(FAKE_EVENT_TRANSFER_CREATED)
self.call_handlers.side_effect = StripeError("Boom!")
with self.assertRaises(StripeError):
event.invoke_webhook_handlers()
def test_invoke_webhook_handlers_event_with_raise_stripe_error(self):
event = self._create_event(FAKE_EVENT_TRANSFER_CREATED)
self.call_handlers.side_effect = StripeError("Boom!")
with self.assertRaises(StripeError):
event.invoke_webhook_handlers()
def test_invoke_webhook_handlers_event_when_invalid(self):
event = self._create_event(FAKE_EVENT_TRANSFER_CREATED)
event.valid = False
event.invoke_webhook_handlers()
@patch(target="djstripe.models.core.transaction.atomic", autospec=True)
@patch.object(target=Event, attribute="_create_from_stripe_object", autospec=True)
@patch.object(target=Event, attribute="objects", autospec=True)
def test_process_event(
self, mock_objects, mock__create_from_stripe_object, mock_atomic
):
"""Test that process event creates a new event and invokes webhooks
when the event doesn't already exist.
"""
# Set up mocks
mock_objects.filter.return_value.exists.return_value = False
mock_data = {"id": "foo_id", "other_stuff": "more_things"}
result = Event.process(data=mock_data)
# Check that all the expected work was performed
mock_objects.filter.assert_called_once_with(id=mock_data["id"])
mock_objects.filter.return_value.exists.assert_called_once_with()
mock_atomic.return_value.__enter__.assert_called_once_with()
mock__create_from_stripe_object.assert_called_once_with(mock_data)
(
mock__create_from_stripe_object.return_value.invoke_webhook_handlers
).assert_called_once_with()
# Make sure the event was returned.
self.assertEqual(mock__create_from_stripe_object.return_value, result)
@patch(target="djstripe.models.core.transaction.atomic", autospec=True)
@patch.object(target=Event, attribute="_create_from_stripe_object", autospec=True)
@patch.object(target=Event, attribute="objects", autospec=True)
def test_process_event_exists(
self, mock_objects, mock__create_from_stripe_object, mock_atomic
):
"""
Test that process event returns the existing event and skips webhook processing
when the event already exists.
"""
# Set up mocks
mock_objects.filter.return_value.exists.return_value = True
mock_data = {"id": "foo_id", "other_stuff": "more_things"}
result = Event.process(data=mock_data)
# Make sure that the db was queried and the existing results used.
mock_objects.filter.assert_called_once_with(id=mock_data["id"])
mock_objects.filter.return_value.exists.assert_called_once_with()
mock_objects.filter.return_value.first.assert_called_once_with()
# Make sure the webhook actions and event object creation were not performed.
mock_atomic.return_value.__enter__.assert_not_called()
mock__create_from_stripe_object.assert_not_called()
(
mock__create_from_stripe_object.return_value.invoke_webhook_handlers
).assert_not_called()
# Make sure the existing event was returned.
self.assertEqual(mock_objects.filter.return_value.first.return_value, result)
@patch("djstripe.models.Event.invoke_webhook_handlers", autospec=True)
def test_process_event_failure_rolls_back(self, invoke_webhook_handlers_mock):
"""Test that process event rolls back event creation on error"""
class HandlerException(Exception):
pass
invoke_webhook_handlers_mock.side_effect = HandlerException
real_create_from_stripe_object = Event._create_from_stripe_object
def side_effect(*args, **kwargs):
return real_create_from_stripe_object(*args, **kwargs)
event_data = deepcopy(FAKE_EVENT_TRANSFER_CREATED)
self.assertFalse(
Event.objects.filter(id=FAKE_EVENT_TRANSFER_CREATED["id"]).exists()
)
with self.assertRaises(HandlerException), patch(
"djstripe.models.Event._create_from_stripe_object",
side_effect=side_effect,
autospec=True,
) as create_from_stripe_object_mock:
Event.process(data=event_data)
create_from_stripe_object_mock.assert_called_once_with(event_data)
self.assertFalse(
Event.objects.filter(id=FAKE_EVENT_TRANSFER_CREATED["id"]).exists()
)
#
# Helpers
#
@patch("stripe.Event.retrieve", autospec=True)
def _create_event(self, event_data, event_retrieve_mock):
event_data = deepcopy(event_data)
event_retrieve_mock.return_value = event_data
event = Event.sync_from_stripe_data(event_data)
return event
class EventRaceConditionTest(TestCase):
@patch(
"stripe.Transfer.retrieve", return_value=deepcopy(FAKE_TRANSFER), autospec=True
)
def test_process_event_race_condition(self, transfer_retrieve_mock):
transfer = Transfer.sync_from_stripe_data(deepcopy(FAKE_TRANSFER))
transfer_retrieve_mock.reset_mock()
event_data = deepcopy(FAKE_EVENT_TRANSFER_CREATED)
# emulate the race condition in _get_or_create_from_stripe_object where
# an object is created by a different request during the call
#
# Sequence of events:
# 1) first Transfer.stripe_objects.get fails with DoesNotExist
# (due to it not existing in reality, but due to our side_effect in the test)
# 2) object is really created by a different request in reality
# 3) Transfer._create_from_stripe_object fails with IntegrityError due to
# duplicate id
# 4) second Transfer.stripe_objects.get succeeds
# (due to being created by step 2 in reality, due to side effect in the test)
side_effect = [Transfer.DoesNotExist(), transfer]
with patch(
"djstripe.models.Transfer.stripe_objects.get",
side_effect=side_effect,
autospec=True,
) as transfer_objects_get_mock:
Event.process(event_data)
self.assertEqual(transfer_objects_get_mock.call_count, 2)
self.assertEqual(transfer_retrieve_mock.call_count, 1)
| bsd-3-clause | -7,752,896,912,591,846,000 | 40.173913 | 88 | 0.663147 | false |
DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/Resources/Python/Core/Lib/distutils/cygwinccompiler.py | 1 | 9736 | # uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: cygwinccompiler.py
"""distutils.cygwinccompiler
Provides the CygwinCCompiler class, a subclass of UnixCCompiler that
handles the Cygwin port of the GNU C compiler to Windows. It also contains
the Mingw32CCompiler class which handles the mingw32 port of GCC (same as
cygwin in no-cygwin mode).
"""
__revision__ = '$Id$'
import os
import sys
import copy
from distutils.ccompiler import gen_preprocess_options, gen_lib_options
from distutils.unixccompiler import UnixCCompiler
from distutils.file_util import write_file
from distutils.errors import DistutilsExecError, CompileError, UnknownFileError
from distutils import log
def get_msvcr():
"""Include the appropriate MSVC runtime library if Python was built
with MSVC 7.0 or later.
"""
msc_pos = sys.version.find('MSC v.')
if msc_pos != -1:
msc_ver = sys.version[msc_pos + 6:msc_pos + 10]
if msc_ver == '1300':
return [
'msvcr70']
if msc_ver == '1310':
return [
'msvcr71']
if msc_ver == '1400':
return [
'msvcr80']
if msc_ver == '1500':
return [
'msvcr90']
raise ValueError('Unknown MS Compiler version %s ' % msc_ver)
class CygwinCCompiler(UnixCCompiler):
compiler_type = 'cygwin'
obj_extension = '.o'
static_lib_extension = '.a'
shared_lib_extension = '.dll'
static_lib_format = 'lib%s%s'
shared_lib_format = '%s%s'
exe_extension = '.exe'
def __init__(self, verbose=0, dry_run=0, force=0):
UnixCCompiler.__init__(self, verbose, dry_run, force)
status, details = check_config_h()
self.debug_print("Python's GCC status: %s (details: %s)" % (
status, details))
if status is not CONFIG_H_OK:
self.warn("Python's pyconfig.h doesn't seem to support your compiler. Reason: %s. Compiling may fail because of undefined preprocessor macros." % details)
self.gcc_version, self.ld_version, self.dllwrap_version = get_versions()
self.debug_print(self.compiler_type + ': gcc %s, ld %s, dllwrap %s\n' % (
self.gcc_version,
self.ld_version,
self.dllwrap_version))
if self.ld_version >= '2.10.90':
self.linker_dll = 'gcc'
else:
self.linker_dll = 'dllwrap'
if self.ld_version >= '2.13':
shared_option = '-shared'
else:
shared_option = '-mdll -static'
self.set_executables(compiler='gcc -mcygwin -O -Wall', compiler_so='gcc -mcygwin -mdll -O -Wall', compiler_cxx='g++ -mcygwin -O -Wall', linker_exe='gcc -mcygwin', linker_so='%s -mcygwin %s' % (
self.linker_dll, shared_option))
if self.gcc_version == '2.91.57':
self.dll_libraries = [
'msvcrt']
self.warn('Consider upgrading to a newer version of gcc')
else:
self.dll_libraries = get_msvcr()
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
if ext == '.rc' or ext == '.res':
try:
self.spawn(['windres', '-i', src, '-o', obj])
except DistutilsExecError as msg:
raise CompileError, msg
else:
try:
self.spawn(self.compiler_so + cc_args + [src, '-o', obj] + extra_postargs)
except DistutilsExecError as msg:
raise CompileError, msg
def link(self, target_desc, objects, output_filename, output_dir=None, libraries=None, library_dirs=None, runtime_library_dirs=None, export_symbols=None, debug=0, extra_preargs=None, extra_postargs=None, build_temp=None, target_lang=None):
extra_preargs = copy.copy(extra_preargs or [])
libraries = copy.copy(libraries or [])
objects = copy.copy(objects or [])
libraries.extend(self.dll_libraries)
if export_symbols is not None and (target_desc != self.EXECUTABLE or self.linker_dll == 'gcc'):
temp_dir = os.path.dirname(objects[0])
dll_name, dll_extension = os.path.splitext(os.path.basename(output_filename))
def_file = os.path.join(temp_dir, dll_name + '.def')
lib_file = os.path.join(temp_dir, 'lib' + dll_name + '.a')
contents = [
'LIBRARY %s' % os.path.basename(output_filename),
'EXPORTS']
for sym in export_symbols:
contents.append(sym)
self.execute(write_file, (def_file, contents), 'writing %s' % def_file)
if self.linker_dll == 'dllwrap':
extra_preargs.extend(['--output-lib', lib_file])
extra_preargs.extend(['--def', def_file])
else:
objects.append(def_file)
if not debug:
extra_preargs.append('-s')
UnixCCompiler.link(self, target_desc, objects, output_filename, output_dir, libraries, library_dirs, runtime_library_dirs, None, debug, extra_preargs, extra_postargs, build_temp, target_lang)
return
def object_filenames(self, source_filenames, strip_dir=0, output_dir=''):
if output_dir is None:
output_dir = ''
obj_names = []
for src_name in source_filenames:
base, ext = os.path.splitext(os.path.normcase(src_name))
if ext not in self.src_extensions + ['.rc', '.res']:
raise UnknownFileError, "unknown file type '%s' (from '%s')" % (
ext, src_name)
if strip_dir:
base = os.path.basename(base)
if ext == '.res' or ext == '.rc':
obj_names.append(os.path.join(output_dir, base + ext + self.obj_extension))
else:
obj_names.append(os.path.join(output_dir, base + self.obj_extension))
return obj_names
class Mingw32CCompiler(CygwinCCompiler):
compiler_type = 'mingw32'
def __init__(self, verbose=0, dry_run=0, force=0):
CygwinCCompiler.__init__(self, verbose, dry_run, force)
if self.ld_version >= '2.13':
shared_option = '-shared'
else:
shared_option = '-mdll -static'
if self.gcc_version <= '2.91.57':
entry_point = '--entry _DllMain@12'
else:
entry_point = ''
self.set_executables(compiler='gcc -mno-cygwin -O -Wall', compiler_so='gcc -mno-cygwin -mdll -O -Wall', compiler_cxx='g++ -mno-cygwin -O -Wall', linker_exe='gcc -mno-cygwin', linker_so='%s -mno-cygwin %s %s' % (
self.linker_dll, shared_option,
entry_point))
self.dll_libraries = []
self.dll_libraries = get_msvcr()
CONFIG_H_OK = 'ok'
CONFIG_H_NOTOK = 'not ok'
CONFIG_H_UNCERTAIN = 'uncertain'
def check_config_h():
"""Check if the current Python installation (specifically, pyconfig.h)
appears amenable to building extensions with GCC. Returns a tuple
(status, details), where 'status' is one of the following constants:
CONFIG_H_OK
all is well, go ahead and compile
CONFIG_H_NOTOK
doesn't look good
CONFIG_H_UNCERTAIN
not sure -- unable to read pyconfig.h
'details' is a human-readable string explaining the situation.
Note there are two ways to conclude "OK": either 'sys.version' contains
the string "GCC" (implying that this Python was built with GCC), or the
installed "pyconfig.h" contains the string "__GNUC__".
"""
from distutils import sysconfig
import string
if string.find(sys.version, 'GCC') >= 0:
return (CONFIG_H_OK, "sys.version mentions 'GCC'")
else:
fn = sysconfig.get_config_h_filename()
try:
f = open(fn)
try:
s = f.read()
finally:
f.close()
except IOError as exc:
return (
CONFIG_H_UNCERTAIN,
"couldn't read '%s': %s" % (fn, exc.strerror))
if string.find(s, '__GNUC__') >= 0:
return (CONFIG_H_OK, "'%s' mentions '__GNUC__'" % fn)
return (
CONFIG_H_NOTOK, "'%s' does not mention '__GNUC__'" % fn)
def get_versions():
""" Try to find out the versions of gcc, ld and dllwrap.
If not possible it returns None for it.
"""
from distutils.version import LooseVersion
from distutils.spawn import find_executable
import re
gcc_exe = find_executable('gcc')
if gcc_exe:
out = os.popen(gcc_exe + ' -dumpversion', 'r')
out_string = out.read()
out.close()
result = re.search('(\\d+\\.\\d+(\\.\\d+)*)', out_string)
if result:
gcc_version = LooseVersion(result.group(1))
else:
gcc_version = None
else:
gcc_version = None
ld_exe = find_executable('ld')
if ld_exe:
out = os.popen(ld_exe + ' -v', 'r')
out_string = out.read()
out.close()
result = re.search('(\\d+\\.\\d+(\\.\\d+)*)', out_string)
if result:
ld_version = LooseVersion(result.group(1))
else:
ld_version = None
else:
ld_version = None
dllwrap_exe = find_executable('dllwrap')
if dllwrap_exe:
out = os.popen(dllwrap_exe + ' --version', 'r')
out_string = out.read()
out.close()
result = re.search(' (\\d+\\.\\d+(\\.\\d+)*)', out_string)
if result:
dllwrap_version = LooseVersion(result.group(1))
else:
dllwrap_version = None
else:
dllwrap_version = None
return (gcc_version, ld_version, dllwrap_version) | unlicense | 2,730,727,172,121,840,000 | 38.104418 | 243 | 0.578677 | false |
tonybaloney/st2 | st2api/tests/unit/controllers/v1/test_rules_rbac.py | 1 | 9412 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import httplib
import mock
import six
from st2common.transport.publishers import PoolPublisher
from st2common.rbac.types import PermissionType
from st2common.rbac.types import ResourceType
from st2common.persistence.auth import User
from st2common.persistence.rbac import Role
from st2common.persistence.rbac import UserRoleAssignment
from st2common.persistence.rbac import PermissionGrant
from st2common.models.db.auth import UserDB
from st2common.models.db.rbac import RoleDB
from st2common.models.db.rbac import UserRoleAssignmentDB
from st2common.models.db.rbac import PermissionGrantDB
from st2tests.fixturesloader import FixturesLoader
from tests.base import APIControllerWithRBACTestCase
http_client = six.moves.http_client
__all__ = [
'RuleControllerRBACTestCase'
]
FIXTURES_PACK = 'generic'
TEST_FIXTURES = {
'runners': ['testrunner1.yaml'],
'actions': ['action1.yaml', 'local.yaml'],
'triggers': ['trigger1.yaml'],
'triggertypes': ['triggertype1.yaml']
}
class RuleControllerRBACTestCase(APIControllerWithRBACTestCase):
fixtures_loader = FixturesLoader()
def setUp(self):
super(RuleControllerRBACTestCase, self).setUp()
self.fixtures_loader.save_fixtures_to_db(fixtures_pack=FIXTURES_PACK,
fixtures_dict=TEST_FIXTURES)
file_name = 'rule_with_webhook_trigger.yaml'
RuleControllerRBACTestCase.RULE_1 = self.fixtures_loader.load_fixtures(
fixtures_pack=FIXTURES_PACK,
fixtures_dict={'rules': [file_name]})['rules'][file_name]
file_name = 'rule_example_pack.yaml'
RuleControllerRBACTestCase.RULE_2 = self.fixtures_loader.load_fixtures(
fixtures_pack=FIXTURES_PACK,
fixtures_dict={'rules': [file_name]})['rules'][file_name]
# Insert mock users, roles and assignments
# Users
user_1_db = UserDB(name='rule_create')
user_1_db = User.add_or_update(user_1_db)
self.users['rule_create'] = user_1_db
user_2_db = UserDB(name='rule_create_webhook_create')
user_2_db = User.add_or_update(user_2_db)
self.users['rule_create_webhook_create'] = user_2_db
user_3_db = UserDB(name='rule_create_webhook_create_core_local_execute')
user_3_db = User.add_or_update(user_3_db)
self.users['rule_create_webhook_create_core_local_execute'] = user_3_db
# Roles
# rule_create grant on parent pack
grant_db = PermissionGrantDB(resource_uid='pack:examples',
resource_type=ResourceType.PACK,
permission_types=[PermissionType.RULE_CREATE])
grant_db = PermissionGrant.add_or_update(grant_db)
permission_grants = [str(grant_db.id)]
role_1_db = RoleDB(name='rule_create', permission_grants=permission_grants)
role_1_db = Role.add_or_update(role_1_db)
self.roles['rule_create'] = role_1_db
# rule_create grant on parent pack, webhook_create on webhook "sample"
grant_1_db = PermissionGrantDB(resource_uid='pack:examples',
resource_type=ResourceType.PACK,
permission_types=[PermissionType.RULE_CREATE])
grant_1_db = PermissionGrant.add_or_update(grant_1_db)
grant_2_db = PermissionGrantDB(resource_uid='webhook:sample',
resource_type=ResourceType.WEBHOOK,
permission_types=[PermissionType.WEBHOOK_CREATE])
grant_2_db = PermissionGrant.add_or_update(grant_2_db)
permission_grants = [str(grant_1_db.id), str(grant_2_db.id)]
role_2_db = RoleDB(name='rule_create_webhook_create', permission_grants=permission_grants)
role_2_db = Role.add_or_update(role_2_db)
self.roles['rule_create_webhook_create'] = role_2_db
# rule_create grant on parent pack, webhook_create on webhook "sample", action_execute on
# core.local
grant_1_db = PermissionGrantDB(resource_uid='pack:examples',
resource_type=ResourceType.PACK,
permission_types=[PermissionType.RULE_CREATE])
grant_1_db = PermissionGrant.add_or_update(grant_1_db)
grant_2_db = PermissionGrantDB(resource_uid='webhook:sample',
resource_type=ResourceType.WEBHOOK,
permission_types=[PermissionType.WEBHOOK_CREATE])
grant_2_db = PermissionGrant.add_or_update(grant_2_db)
grant_3_db = PermissionGrantDB(resource_uid='action:core:local',
resource_type=ResourceType.ACTION,
permission_types=[PermissionType.ACTION_EXECUTE])
grant_3_db = PermissionGrant.add_or_update(grant_3_db)
permission_grants = [str(grant_1_db.id), str(grant_2_db.id), str(grant_3_db.id)]
role_3_db = RoleDB(name='rule_create_webhook_create_core_local_execute',
permission_grants=permission_grants)
role_3_db = Role.add_or_update(role_3_db)
self.roles['rule_create_webhook_create_core_local_execute'] = role_3_db
# Role assignments
user_db = self.users['rule_create']
role_assignment_db = UserRoleAssignmentDB(
user=user_db.name,
role=self.roles['rule_create'].name)
UserRoleAssignment.add_or_update(role_assignment_db)
user_db = self.users['rule_create_webhook_create']
role_assignment_db = UserRoleAssignmentDB(
user=user_db.name,
role=self.roles['rule_create_webhook_create'].name)
UserRoleAssignment.add_or_update(role_assignment_db)
user_db = self.users['rule_create_webhook_create_core_local_execute']
role_assignment_db = UserRoleAssignmentDB(
user=user_db.name,
role=self.roles['rule_create_webhook_create_core_local_execute'].name)
UserRoleAssignment.add_or_update(role_assignment_db)
def test_post_webhook_trigger_no_trigger_and_action_permission(self):
# Test a scenario when user selects a webhook trigger, but only has "rule_create"
# permission
user_db = self.users['rule_create']
self.use_user(user_db)
resp = self.__do_post(RuleControllerRBACTestCase.RULE_1)
expected_msg = ('User "rule_create" doesn\'t have required permission (webhook_create) '
'to use trigger core.st2.webhook')
self.assertEqual(resp.status_code, httplib.FORBIDDEN)
self.assertEqual(resp.json['faultstring'], expected_msg)
def test_post_no_webhook_trigger(self):
# Test a scenario when user with only "rule_create" permission selects a non-webhook
# trigger for which we don't perform any permission checking right now
user_db = self.users['rule_create']
self.use_user(user_db)
resp = self.__do_post(RuleControllerRBACTestCase.RULE_2)
expected_msg = ('User "rule_create" doesn\'t have required (action_execute) permission '
'to use action wolfpack.action-1')
self.assertEqual(resp.status_code, httplib.FORBIDDEN)
self.assertEqual(resp.json['faultstring'], expected_msg)
def test_post_webhook_trigger_webhook_create_permission_no_action_permission(self):
# Test a scenario where user with "rule_create" and "webhook_create" selects a webhook
# trigger and core.local action
user_db = self.users['rule_create_webhook_create']
self.use_user(user_db)
resp = self.__do_post(RuleControllerRBACTestCase.RULE_1)
expected_msg = ('User "rule_create_webhook_create" doesn\'t have required '
'(action_execute) permission to use action core.local')
self.assertEqual(resp.status_code, httplib.FORBIDDEN)
self.assertEqual(resp.json['faultstring'], expected_msg)
def test_post_action_webhook_trigger_webhook_create_and_action_execute_permission(self):
# Test a scenario where user selects a webhook trigger and has all the required permissions
user_db = self.users['rule_create_webhook_create_core_local_execute']
self.use_user(user_db)
resp = self.__do_post(RuleControllerRBACTestCase.RULE_1)
self.assertEqual(resp.status_code, httplib.CREATED)
@mock.patch.object(PoolPublisher, 'publish', mock.MagicMock())
def __do_post(self, rule):
return self.app.post_json('/v1/rules', rule, expect_errors=True)
| apache-2.0 | 6,333,003,820,156,640,000 | 47.766839 | 99 | 0.659584 | false |
USCLiquidPropulsionLaboratory/Engine-sizing-snake | Blue_Steel.py | 1 | 38183 | ## GOX-kerosene sim
#@ Author Juha Nieminen
#import sys
#sys.path.insert(0, '/Users/juhanieminen/Documents/adamrocket')
import RocketComponents as rc
from physical_constants import poise, inches, Runiv, gallons, lbm, \
gearth, atm, psi, lbf
from numpy import pi, linspace, cos, radians, sqrt, exp, log, array, full, ceil
from scipy import optimize as opt
import matplotlib.pyplot as plt
from matplotlib import collections as mc
import Flows1D as flows
#DESIGN VARIABLES____________________________________________________________________________________
# nominal parameters
Preg_N2 = 1300*psi # regulated N2 outlet pressure [Pa]
mdot_fuel_nom = 0.2 # This is only for cooling jacket pressure drop purposes [kg/s]
Pdrop_jacket_nom= 1*psi # Cooling jacket pressure drop at mdot_nominal [Pa]
OF_nom = 2.25 # Oxidizer-to-fuel ratio. This has only effect on initial guesses during solving
# Pressurant tank dimensions
Vprestank = 0.053 # N2 pressurant tank volume [m3]
# Propellant tank dimensions
Vfueltank = 4*gallons # fuel tank volume [m3]
Voxtank = 4*0.053 # ox tank volume [m3]
# Tubing
d_presfuel_tube = 1.0*inches # pressurant tank -> fuel tank tube diameter [m]
L_presfuel_tube = 0.5 # pressurant tank -> fuel tank tube length [m]
d_oxtube = 0.87*inches # ox tank -> manifold tube diameter [m]
L_oxtube = 2.4 # ox tank -> manifold tube length [m]
d_fueltube = 0.87*inches # fuel tank -> manifold tube diameter [m]
L_fueltube = 3.0 # fuel tank -> manifold tube length [m]
roughness = 0.005 # epsilon/diameter, dimensionless
# Valves
Cv_ox_check = 4.7 # oxidizer check valve flow coefficient, dimensionless
Pcrack_ox_check = 10*psi # oxidizer check valve opening pressure [Pa]
Cv_pres_check = 1.8 # nitrogen check valve flow coefficient, dimensionless
Pcrack_pres_check = 0.33*psi # nitrogen check valve opening pressure [Pa]
Cv_pres_valve = 8.8 # nitrogen solenoid valve flow coefficient, dimensionless
Cv_ox_valve = 8.8 # oxidizer solenoid valve flow coefficient, dimensionless
Cv_fuel_valve = 8.8 # fuel solenoid valve flow coefficient, dimensionless
# Injector
cd_oxInjector = 0.767 # orifice discharge coefficient
diameter_oxInjectorHoles = 2.54e-3 #number xx drill # ox orifice diameter [m]
#length_oxHole = 0.005 # ox orifice length [m]
numOxInjectorHoles = 24 # number of ox orifices in the injector
area_oxInjector = numOxInjectorHoles*pi*diameter_oxInjectorHoles**2/4 # total ox flow area [m2]
cd_fuelInjector = 0.767 # orifice discharge coefficient
diameter_fuelInjectorHoles = 0.508e-3 #number xx drill # fuel orifice diameter [m]
numFuelHoles = 59 # number of fuel orifices in the injector
area_fuelInjector = numFuelHoles*pi*diameter_fuelInjectorHoles**2/4 # total fuel flow area [m2]
# Define initial/nominal conditions in the chamber (obtained from CEA code assuming OFratio = 2.25)
TfireInit = 293 # initial flame temperature [K]
Pfire = 1*atm # initial chamber pressure [Pa]
gammaFireInit = 1.148 # dimensionless
ga = gammaFireInit
mbarFireInit = 21.87 # combustion products' initial molecular mass [kg/kmol]
RfireInit = Runiv/mbarFireInit # combustion products' initial specific gas constant [J/kgK]
Pambient = atm # ambient pressure [Pa]
# Nozzle and chamber
d_nozzleThroat = 1.0*inches # throat diameter [m]
A_nozzleThroat = pi*d_nozzleThroat**2/4 # throat area [m2]
area_ratio = 7.46 # nozzle exit-to-throat area ratio
A_nozzleExit = area_ratio*A_nozzleThroat # nozzle exit area [m2]
d_nozzleExit = sqrt(4*A_nozzleExit/pi) # nozzle exit diameter [m]
Dchamber = 0.08 # chamber diameter [m]
Achamber = pi*Dchamber**2/4 # chamber cross sectional area [m2]
Lchamber = 0.14 # chamber length [m]
Vchamber = Achamber*Lchamber # chamber volume [m3]
Lstar = Vchamber/A_nozzleThroat # chamber characteristic length [m]
Mc_nom = flows.getIsentropicMs(A_nozzleThroat, Achamber, gammaFireInit)[0] # nominal chamber Mach number
print("throat diameter is", '%.1f'%(d_nozzleThroat*1000), 'mm')
print("exit diameter is", '%.1f'%(d_nozzleExit*1000), 'mm')
print("chamber volume is", '%.5f'%Vchamber, "m3")
print("chamber Lstar is", '%.2f'%Lstar, "m")
print("chamber Mach_nom is", '%.2f'%Mc_nom)
# INITIAL CONDITIONS____________________________________________________________________________________________
#Define initial conditions in the tanks
TfuelPresStart = 293 # Fuel pressurant (=nitrogen) temp [K]
FFfueltankStart = 0.9 # Fuel tank fill fraction (Vfuel/Vtank)
PfuelPrestankStart = 2640*psi - Preg_N2*Vfueltank*(1-FFfueltankStart)/Vprestank # Fuel pressurant tank pressure once fueltank has been pressurized [Pa]
ToxStart = 293 # Oxidizer (GOX) temp [K]
PoxtankStart = 1600*psi # Oxidizer tank pressure [Pa]
TfuelStart = 293 # Fuel temp [K]
PfueltankStart = Preg_N2 -1*psi # Fuel tank pressure [Pa] (-10psi helps convergence on first timestep)
# initialize propellants
nitrogen = rc.NitrogenFluid()
GOX = rc.GOXFluid()
kerosene = rc.Kerosene()
#initialize nozzle and chamber
nozzle = rc.ConvergingDivergingNozzle(A_nozzleExit, A_nozzleThroat)
mdot_init_noz = nozzle.getmdot(gammaFireInit, GOX.R, Pfire, TfireInit, atm)
chamber = rc.GOXKeroCombustionChamber(nozzle, Vchamber, TfireInit, ga, mbarFireInit, Pfire, atm, mdot_init_noz)
#initialize injector orifices
ox_orifice = rc.GasOrifice(area_oxInjector, cd_oxInjector, GOX.gamma, GOX.R)
fuel_orifice = rc.LiquidOrifice(area_fuelInjector, cd_fuelInjector )
#initialize pressurant tanks
fuelprestank = rc.IdealgasTank(nitrogen, Vprestank, TfuelPresStart, PfuelPrestankStart)
#initialize propellant tanks
oxtank = rc.IdealgasTank(GOX, Voxtank, ToxStart, PoxtankStart)
fueltank = rc.LiquidPropellantTank(nitrogen, kerosene, Vfueltank, TfuelStart, TfuelPresStart,\
PfueltankStart, FFfueltankStart, Preg_N2)
#initialize pressure regulators
N2_regu = rc.PressureRegulator(Preg_N2, nitrogen)
#initialize solenoids
fuelSole = rc.IncompressibleFlowSolenoid( Cv_fuel_valve)
oxSole = rc.CompressibleFlowSolenoid( Cv_ox_valve, GOX)
presSole = rc.CompressibleFlowSolenoid( Cv_pres_valve, nitrogen)
#initialize check valves
ox_check = rc.CompressibleFlowCheckValve( Cv_ox_check, Pcrack_ox_check, GOX)
pres_check = rc.CompressibleFlowCheckValve( Cv_pres_check, Pcrack_pres_check, nitrogen)
#initialize tubing
ox_tube = rc.RoughStraightCylindricalTube(d_oxtube, L_oxtube, roughness, True)
fuel_tube = rc.RoughStraightCylindricalTube(d_fueltube, L_fueltube, roughness, True)
presfuel_tube = rc.RoughStraightCylindricalTube(d_presfuel_tube, L_presfuel_tube, roughness, True)
#initialize cooling jacket
jacket = rc.CoolingJacket(mdot_fuel_nom, Pdrop_jacket_nom)
#initialize arrays for various data time histories
T_chamber = [chamber.T] # combustion chamber temperature [K]
Pchamber = [chamber.get_P_inlet()] # combustion chamber pressure [Pa]
Pexit = [nozzle.getPe(Pchamber[0], gammaFireInit, Pambient)] # nozzle exit pressure [Pa]
Mexit = [nozzle.getMe(Pchamber[0], gammaFireInit, Pambient)] # nozzle exit Mach number
cmass = [chamber.m] # resident propellant mass in combustion chamber [kg]
mdot_nozzle = [nozzle.getmdot(gammaFireInit, RfireInit, chamber.get_P_inlet(), chamber.T, chamber.Pa)] # mass flow out of the nozzle [kg/s]
Poxtank = [oxtank.getPtank()] # ox tank pressure [Pa]
Toxtank = [oxtank.getTtank()] # ox tank temperature [K]
mox = [oxtank.getM()] # oxidizer mass in tank [kg]
Pfueltank = [fueltank.getPtank()] # fuel tank pressure [Pa]
Tfueltank = [fueltank.getTpres()] # pressurant temperature in fuel tank[K]
mPresFueltank = [fueltank.getMpres()] # pressurant mass in fuel tank [kg]
mfuel = [fueltank.getMprop()] # fuel mass in tank [kg]
FFfueltank = [fueltank.getFF()] # fuel tank fill fraction defined as Vfuel/(Vfueltank)
TfuelPres = [fuelprestank.getTtank()] # temperature in fuel pressurant tank [K]
PfuelPres = [fuelprestank.getPtank()] # pressure in fuel pressurant tank [Pa]
mfuelPres = [fuelprestank.getM()] # pressurant mass in fuel pressurant tank [Pa]
time = [0] # time array [s]
mdot_ox = [0] # ox mass flow out of the tank [kg/s]
P1ox = [0] # ox tank presssure [Pa]
P2ox = [0] # ox check valve outlet pressure [Pa]
P3ox = [0] # ox flow solenoid outlet pressure [Pa]
P4ox = [0] # ox injector inlet pressure [Pa]
T1ox = [0] # ox tank temp [K]
T2ox = [0] # ox check valve outlet temp [K]
T3ox = [0] # ox flow solenoid outlet temp [K]
T4ox = [0] # ox injector inlet temp [K]
mdot_fuel = [0] # fuel mass flow out of the tank [kg/s]
rooFuel = fueltank.propellant.density # fuel density, assumed constant [kg/m3]
P1fuel = [0] # fuel tank presssure [Pa]
P2fuel = [0] # fuel solenoid outlet pressure [Pa]
P3fuel = [0] # fuel cooling jacket inlet pressure [Pa]
P4fuel = [0] # fuel injector inlet pressure [Pa]
mdot_fuel_pres = [0] # fuel pressurant mass flow rate [kg/s]
P3pres = [0] # pressurant pressure at check valve outlet [kg/s]
P4pres = [0] # pressurant pressure at solenoid valve outlet [kg/s]
mTotal = [0] # propellant mass in the system [kg]
mprs = [mfuelPres[0]+mPresFueltank[0]] # pressurant mass in the system [kg]
OFratio = [0] # oxidizer to fuel mass flow ratio
Isp = [0] # specific impulse [s]
Thrust = [nozzle.getThrust(chamber.get_P_inlet(), Pambient, gammaFireInit) ] # rocket thrust [N]
#SIMULATE_______________________________________________________________________________________________________
# using orifices as follows: ejecting GOX from manifold to chamber, fuel liq-to-liq from manifold to chamber
print("")
print("STARTING SIM...")
print("")
print("mOxStart is", '%.2f'%mox[0], "kg")
print("mKerostart is", mfuel[0], "kg")
print("mN2start in N2 tank is", '%.2f'%mfuelPres[0], "kg")
print("mN2start in fuel tank is", '%.2f'%(fueltank.getMpres()), "kg")
# The first step is to solve oxidizer and fuel mass flow rates from the tank to combustion chamber.
# definitions:
# P1ox = GOX tank pressure
# P2ox = check valve outlet pressure
# P3ox = ox valve outlet pressure
# P4ox = injector inlet, pressure
# (P1ox-P2ox) = ox check valve pressure drop, eq 1
# (P2ox-P3ox) = ox flow solenoid pressure drop, eq 2
# (P3ox-P4ox) = ox tubing pressure drop, eq 3
# (P4ox-Pchamber) = ox injector pressure drop, eq 4
# P1pres = Nitrogen tank pressure
# P2pres = Regulation pressure
# P3pres = Check valve outlet pressure
# P4pres = Nitrogen solenoid outlet
# P5pres = Nitrogen tubing outlet = fuel tank pressure
# (P2pres-P3pres) = Nitrogen check valve pressure drop
# (P3pres-P4pres) = Nitrogen solenoid valve pressure drop
# (P4pres-P5pres) = Nitrogen tubing pressure drop
# P1fuel = fuel tank pressure
# P2fuel = fuel valve outlet pressure
# P3fuel = cooling jacket inlet pressure
# P4fuel = injector inlet pressure
# (P1fuel-P2fuel) = fuel valve pressure drop, eq1
# (P2fuel-P3fuel) = fuel tubing pressure drop, eq2
# (P3fuel-P4fuel) = cooling jacket pressure drop, eq3
# (P4fuel-Pchamber) = injector pressure drop, eq4
# In the case of oxidizer, P1 and Pchamber are known, so one must solve for P2, P3, and P4. Fourth unknown is the mass flow rate. The four equations are check valve/solenoid/tubing/injector pressure drops. These equations are defined in oxfunks method below, and underlying physics are in RocketComponents.py under their respective classes.
# With pressurant, P2 (regulation pressure) and P5 (fuel tank pressure) are known, so one must solve for P3 and P4. The third unknown is pressurant mass flow rate. Equations to be solved are pressure drops over the check valve, solenoid valve, and the tubing.
# With fuel P1 and Pchamber are known, so one must solve for P2, P3, and P4. Fourth unknown is mass flow rate.
# fsolve requires sensible initial guesses for all unknowns. They are established by guessing the mass flow rate, because all other pressures trickle down from that.
timestep_small = 1e-5 # seconds, used during initial transient
timestep_nom = 1e-4 # seconds, used after 0.01 seconds of simulation time
t_transient = 0.01 # seconds, estimated time of initial transient
t_simulation = 3 # seconds
if t_simulation <= t_transient:
simsteps = int(ceil(t_simulation/timestep_small))
else:
simsteps = int(ceil( t_transient/timestep_small + (t_simulation-t_transient)/timestep_nom ))
print("Sim time is", t_simulation, "s, number of simsteps is", simsteps)
i=0
for i in range(0, simsteps):
if time[i] < t_transient:
timestep = timestep_small # use shorter timestep during initial transient
else: timestep = timestep_nom # proceed with nominal timestep
#while True:
print("i=", i)
P1ox = Poxtank[i]
P1fuel = Pfueltank[i]
Pchamb = Pchamber[i]
mu_ox = GOX.getViscosity(P1ox, Toxtank[i])
roo_ox = GOX.getDensity(P1ox, Toxtank[i])
Tox = Toxtank[i]
Tpres = TfuelPres[i]
mu_fuel = kerosene.mu
mu_N2_fuel = nitrogen.getViscosity(Preg_N2, TfuelPres[i])
roo_N2_fuel = nitrogen.getDensity(Preg_N2, TfuelPres[i])
if i==0: # First guesses. Based on choked flow at ox injector (multiplied by 0.7 to adjust for better convergence)
mdot_injector_choked = ox_orifice.getMdot(P1ox, Pfire, Tox)
'''
mdot_checkvalve_choked = ox_check.getMdot(P1ox, Pfire, GOX.roo_std, roo_ox, Tox)
if mdot_injector_choked >= mdot_checkvalve_choked: #check valve is choking
print("check valve is initially choking")
mdot_ox_guess = mdot_checkvalve_choked
print("mdot_ox_guess is", mdot_ox_guess)
P4ox_guess = ox_orifice.getUpstreamPressure(Pchamb, Tox, mdot_ox_guess)
P3ox_guess = P4ox_guess + ox_tube.getPressureDrop(mdot_ox_guess, mu_ox, roo_ox)
P2ox_guess = P3ox_guess + oxSole.getPressureDrop(mdot_ox_guess, P2ox_guess, roo_ox)
else:
'''
mdot_ox_guess = mdot_injector_choked *0.7
P2ox_guess = P1ox - ox_check.getPressureDrop(mdot_ox_guess, P1ox, GOX.roo_std, roo_ox, Tox)
P3ox_guess = P2ox_guess - oxSole.getPressureDrop(mdot_ox_guess, P2ox_guess, roo_ox)
P4ox_guess = P3ox_guess - ox_tube.getPressureDrop(mdot_ox_guess, mu_ox, roo_ox)
print("mdot_ox_guess is", mdot_ox_guess)
#print("P2ox is", P2ox/psi, "psi")
#print("P3ox_guess is", P3ox_guess/psi, "psi")
#print("P4ox_guess is", P4ox_guess/psi, "psi")
#print("P5ox_guess is", P5ox_guess/psi, "psi")
#print("P_chamber is", Pchamber[i]/psi, "psi")
mdot_fuel_guess = mdot_ox_guess/OF_nom
P2fuel_guess = P1fuel - fuelSole.getPressureDrop(mdot_fuel_guess, rooFuel)
P3fuel_guess = P2fuel_guess - fuel_tube.getPressureDrop(mdot_fuel_guess, mu_fuel, rooFuel)
P4fuel_guess = P3fuel_guess - jacket.getPressureDrop(mdot_fuel_guess)
mdot_pres_guess = mdot_fuel_guess*roo_N2_fuel/rooFuel #volumetric flowrates of fuel and pressurant are the same
P3pres_guess = Preg_N2 - pres_check.getPressureDrop(mdot_pres_guess, Preg_N2, nitrogen.roo_std, roo_N2_fuel, Tpres)
P4pres_guess = P3pres_guess - presSole.getPressureDrop(mdot_pres_guess, P3pres_guess, roo_N2_fuel)
P5pres_guess = P4pres_guess - presfuel_tube.getPressureDrop(mdot_pres_guess, mu_N2_fuel, roo_N2_fuel)
#print("mdot_pres_guess is is", mdot_pres_guess, "kg/s")
#print("P3pres_guess is is", P3pres_guess/psi, "psi")
#print("P4pres_guess is is", P4pres_guess/psi, "psi")
#print("P5pres_guess is is", P5pres_guess/psi, "psi")
#print("mdot_fuel_guess is", mdot_fuel_guess)
#print("P2fuel is", P2fuel/psi, "psi")
#print("P3fuel_guess is is", P3fuel_guess/psi, "psi")
#print("P4fuel_guess is is", P4fuel_guess/psi, "psi")
#print("P5fuel_guess is is", P5fuel_guess/psi, "psi")
#print("P_chamber is", Pchamber[i]/psi, "psi")
else : # guesses for further steps. Use values from previous timestep
mdot_ox_guess = mdot_ox[i-1] #ox_orifice.getMdot(Preg_ox, Pchamb, Tox)
#P3ox_guess = P2ox - oxSole.getPressureDrop(mdot_ox_guess, P2ox,roo_ox)
#P4ox_guess = P3ox_guess - ox_tube.getPressureDrop(mdot_ox_guess, mu_ox, roo_ox)
P2ox_guess = P2ox[i-1]
P3ox_guess = P3ox[i-1]
P4ox_guess = P4ox[i-1]
#print("mdot_ox_guess is", mdot_ox_guess)
#print("P2ox_guess is", P2ox_guess/psi, "psi")
#print("P3ox_guess is", P3ox_guess/psi, "psi")
#print("P4ox_guess is", P4ox_guess/psi, "psi")
#print("P_chamber is", Pchamber[i]/psi, "psi")
mdot_fuel_guess = mdot_fuel[i-1] #mdot_ox_guess/OF_nom*1
P2fuel_guess = P2fuel[i-1]
P3fuel_guess = P3fuel[i-1]
P4fuel_guess = P4fuel[i-1]
#print("P2fuel is", P2fuel/psi, "psi")
#print("P3fuel_guess is is", P3fuel_guess/psi, "psi")
#print("P4fuel_guess is is", P4fuel_guess/psi, "psi")
#print("P_chamber is", Pchamber[i]/psi, "psi")
mdot_pres_guess = mdot_fuel_pres[i-1]
P3pres_guess = P3pres[i-1]
P4pres_guess = P4pres[i-1]
initial_ox_guesses = [P2ox_guess, P3ox_guess, P4ox_guess, mdot_ox_guess]
initial_fuel_guesses= [P2fuel_guess, P3fuel_guess, P4fuel_guess, mdot_fuel_guess]
initial_pres_guesses= [P3pres_guess, P4pres_guess, mdot_pres_guess]
def oxfunks(U): # defines the system of equations and unknowns U to be solved
P2 = U[0]
P3 = U[1]
P4 = U[2]
mdot = U[3]
#print("nyt TAALLA")
#print("P3 as U0 is", P3/psi, "psi")
#print("P4 as U1 is", P4/psi, "psi")
#print("P5 as U2 is", P5/psi, "psi")
#print("mdot as U3 is", mdot, "kg/s")
#print("mdot is", mdot, "kg/s")
#print("P4ox is", P4/psi, "psi")
#print("Pchamb is", Pchamb/psi, "psi")
#out = [ P2ox - P3 - ox_check.getPressureDrop(mdot, P2ox, GOX.roo_std, roo_ox, Tox) ]
out = [ mdot - ox_check.getMdot(P1ox, P2, GOX.roo_std, roo_ox, Tox) ]
out.append( P2 - P3 - oxSole.getPressureDrop( mdot, P2, roo_ox) )
out.append( P3 - P4 - ox_tube.getPressureDrop(mdot, mu_ox, roo_ox) )
out.append( mdot - ox_orifice.getMdot(P4, Pchamb, Tox) )
#print("oxoutti", out)
return out
ox_solution = opt.fsolve(oxfunks, initial_ox_guesses) # iterates until finds a solution or goes bust
#print("ox solution is", ox_solution)
mdot_ox_new = ox_solution[3]
#print("mdot_ox_nyyy is", mdot_ox_new, "kg/s")
def fuelfunks(U): # defines the system of equations and unknowns U to be solved
P2 = U[0]
P3 = U[1]
P4 = U[2]
mdot = U[3]
#print("U is", U)
#print("fuelmdot is", mdot)
out = [ mdot - fuelSole.getMdot(P1fuel, P2, rooFuel, kerosene.P_crit, kerosene.P_vapor) ]
out.append( P2 - P3 - fuel_tube.getPressureDrop(mdot, mu_fuel, rooFuel) )
out.append( P3 - P4 - jacket.getPressureDrop(mdot) )
out.append( P4 - Pchamb - fuel_orifice.getPressureDrop(mdot, rooFuel) )
#print("fueloutti", out)
return out
fuel_solution = opt.fsolve(fuelfunks, initial_fuel_guesses)
#print("fuel solution is", fuel_solution)
mdot_fuel_new = fuel_solution[3]
# Now that fuel mass flow rate out has been solved, intermediate state (=no N2 inflow yet) of the fuel tank can be established:
fueltank.update(TfuelPres[i], 0, mdot_fuel_new, timestep)
Pfuel_intermediate = fueltank.getPtank()
Pfuel_eff = (Pfuel_intermediate + P1fuel)/2 # average of pressures before and after ejection of fuel from tank; incoming nitrogen will see this 'effective' pressure in the tank
# Next, nitrogen flow into the void created by ejected fuel is calculated
def presfunks(U): # defines the system of equations and unknowns U to be solved
P3 = U[0]
P4 = U[1]
mdot = U[2]
out = [mdot - pres_check.getMdot(Preg_N2, P3, nitrogen.roo_std, roo_N2_fuel, Tpres) ]
#out.append( P3 - P4 - presSole.getPressureDrop(mdot, P3, roo_N2_fuel) )
out.append( mdot - presSole.getMdot(P3, P4, roo_N2_fuel) )
out.append( P4 - Pfuel_eff - presfuel_tube.getPressureDrop(mdot, mu_N2_fuel, roo_N2_fuel) )
#out.append( mdot - presfuel_tube.getMdot(P4, Pfuel_eff, mu_N2_fuel, roo_N2_fuel) )
#print("presoutti", out)
return out
pres_solution = opt.fsolve(presfunks, initial_pres_guesses)
#print("pres solution is", pres_solution)
mdot_pres_new = pres_solution[2]
#print("mdot_pres_new is", mdot_pres_new, "kg/s")
# Determine final conditions in prop tanks now that N2 inflow has been determined
oxtank.update(mdot_ox_new, timestep)
fueltank.update(TfuelPres[i], mdot_pres_new, 0, timestep)
# ...and fuel pressurant tank
fuelprestank.update(mdot_pres_new, timestep)
# Check if OFratio is within limits. If not, stop simulation (no CEA data beyond OFratio 0.5-3.0)
if (mdot_ox_new/mdot_fuel_new) < 0.5 or (mdot_ox_new/mdot_fuel_new) > 8.0:
print("OF ratio out of range, terminate (",(mdot_ox_new/mdot_fuel_new),")")
print("mdot_ox_new is", mdot_ox_new, "kg/s")
print("mdot_fuel_new is", mdot_fuel_new, "kg/s")
break
# Update chamber parameters:
chamber.update(mdot_ox_new, mdot_fuel_new, Pambient, timestep) # mdot_ox_in, mdot_fuel_in, Pambient, timestep
#print("mdot_ox_new is", mdot_ox_new, "kg/s")
#print("mdot_fuel_new is", mdot_fuel_new, "kg/s")
#print("kammiopaine on", chamber.get_P_inlet()/psi, "psi" )
# Check if ox or fuel tank will empty during this timestep. If so, stop simulation.
if oxtank.getPtank() < chamber.get_P_inlet()*1.2:
print("Ox tank reached chamber pressure x1.2 (=empty) after", i, " iterations, ie", time[-1], "seconds")
print("remaining fuel", mfuel[i], "kg")
print("remaining fuel prs", mfuelPres[i], "kg,", "i.e.", mfuelPres[i]/mfuelPres[0]*100, " % of initial amount")
break
if fueltank.getMprop() < 0:
print("Fuel tank empty after", i, " iterations, ie", itime[-1], "seconds")
print("remaining GOX", mox[i], "kg")
print("remaining fuel prs", mfuelPres[i], "kg,", "i.e.", mfuelPres[i]/mfuelPres[0]*100, " % of initial amount")
break
if fuelprestank.getPtank() < Preg_N2:
print("Out of fuel pressurant after", i, " iterations, ie", time[-1], "seconds")
print("remaining fuel", mfuel[i], "kg")
print("remaining GOX", mox[i], "kg")
break
#update mass flow time histories. These are values during the CURRENT time step.
if i==0:
P2ox = [ox_solution[0]]
P3ox = [ox_solution[1]]
P4ox = [ox_solution[2]]
mdot_ox = [ox_solution[3]]
P2fuel = [fuel_solution[0]]
P3fuel = [fuel_solution[1]]
P4fuel = [fuel_solution[2]]
mdot_fuel = [fuel_solution[3]]
P3pres = [pres_solution[0]]
P4pres = [pres_solution[1]]
mdot_fuel_pres = [pres_solution[2]]
OFratio = [ mdot_ox[0]/mdot_fuel[0] ]
else:
P2ox.append( ox_solution[0])
P3ox.append( ox_solution[1])
P4ox.append( ox_solution[2])
mdot_ox.append( ox_solution[3])
P2fuel.append( fuel_solution[0])
P3fuel.append( fuel_solution[1])
P4fuel.append( fuel_solution[2])
mdot_fuel.append( fuel_solution[3])
P3pres.append( pres_solution[0])
P4pres.append( pres_solution[1])
#print("mdot_pres_new solution is", pres_solution[2], "kg/s")
mdot_fuel_pres.append( pres_solution[2])
#print("i is= ", i)
OFratio.append( mdot_ox[i]/mdot_fuel[i])
#update the rest of the time histories. System will have these values during the NEXT time step.
Poxtank.append( oxtank.getPtank())
Toxtank.append( oxtank.getTtank())
mox.append( oxtank.getM())
Pfueltank.append( fueltank.getPtank())
Tfueltank.append( fueltank.getTpres())
mPresFueltank.append( fueltank.getMpres())
mfuel.append( fueltank.getMprop())
FFfueltank.append( fueltank.getFF())
TfuelPres.append( fuelprestank.getTtank())
PfuelPres.append( fuelprestank.getPtank())
mfuelPres.append( fuelprestank.getM())
#mdot_fuel_pres.append( mdot_pres_new)
Pchamber.append( chamber.get_P_inlet() )
Pexit.append( nozzle.getPe(Pchamber[i+1], chamber.gamma, Pambient) )
Mexit.append( nozzle.getMe(Pchamber[i+1], chamber.gamma, Pambient) )
cmass.append( chamber.m)
mdot_nozzle.append( nozzle.getmdot(chamber.gamma, Runiv/chamber.mbar, chamber.get_P_inlet(),\
chamber.T, chamber.Pa) )
Thrust.append( nozzle.getThrust(chamber.get_P_inlet(), Pambient, chamber.gamma) )
T_chamber.append( chamber.T)
Isp.append( Thrust[i+1]/(mdot_ox[i] + mdot_fuel[i])/9.81 )
mTotal.append(mox[i+1] + mfuel[i+1] + cmass[i+1] + mdot_nozzle[i]*timestep )
mprs.append( mPresFueltank[i+1] + mfuelPres[i+1] )
time.append( time[i]+timestep )
#dP_ox_check = (Poxtank[-1] - P2ox[-1])
#print("Ox check valve pressure drop is", '%.1f'%(dP_ox_check/psi), "psi")
i+=1
# Print some values
bindex = 1001
print("")
print("mdot_nozzle initial is", '%.3f'%mdot_nozzle[bindex], "kg/s")
print("initial thrust is", '%.1f'%Thrust[bindex], "N")
print("initial Isp is", '%.1f'%Isp[bindex], "s")
print("initial T_chamber is",'%.1f'%T_chamber[bindex], "K")
print("initial P_chamber is", '%.1f'%(Pchamber[bindex]/psi), "psi")
print("initial P_exit is", '%.3f'%(Pexit[bindex]/atm), "atm")
print("initial thrust coeff is", '%.3f'%nozzle.getCf(Pchamber[bindex], atm, chamber.get_gamma(OFratio[bindex], Pchamber[bindex])) )
print("initial mdot_N2 is", '%.3f'%mdot_fuel_pres[bindex], "kg/s")
print("initial N2 flow rate is", '%.3f'%(mdot_fuel_pres[bindex]/roo_N2_fuel*1000/3.78*60), "GPM")
print("initial mdot_ox is", '%.3f'%mdot_ox[bindex], "kg/s")
print("initial mdot_fuel is", '%.3f'%mdot_fuel[bindex], "kg/s")
print("initial O/F ratio is", '%.3f'%OFratio[bindex])
print("initial ox tube velocity is", '%.1f'%(mdot_ox[bindex]/(roo_ox*pi*d_oxtube**2/4)), "m/s")
print("initial fuel tube velocity is", '%.1f'%(mdot_fuel[bindex]/(rooFuel*pi*d_fueltube**2/4)), "m/s")
print("initial ox injection velocity is", '%.1f'%(mdot_ox[bindex]/(roo_ox*pi*diameter_oxInjectorHoles**2/4*numOxInjectorHoles)), "m/s")
print("initial fuel injection velocity is", '%.1f'%(mdot_fuel[bindex]/(rooFuel*pi*diameter_fuelInjectorHoles**2/4*numFuelHoles)), "m/s")
print("initial ox injector P_drop", '%.1f'%((P4ox[bindex]-Pchamber[bindex])/Pchamber[bindex]*100), "% of Pchamber")
print("initial fuel injector P_drop", '%.1f'%((P4fuel[bindex]-Pchamber[bindex])/Pchamber[bindex]*100), "% of Pchamber")
print("")
print("")
print("mdot_nozzle steady state (end of sim) is", '%.3f'%mdot_nozzle[-1], "kg/s")
print("SS thrust is", '%.1f'%Thrust[-1], "N")
print("SS Isp is", '%.1f'%Isp[-1], "s")
print("SS T_chamber is",'%.1f'%T_chamber[-1], "K")
print("SS P_chamber is", '%.1f'%(Pchamber[-1]/psi), "psi")
print("SS P_exit is", '%.3f'%(Pexit[-1]/atm), "atm")
print("SS thrust coeff is", '%.3f'%nozzle.getCf(Pchamber[-1], atm, chamber.get_gamma(OFratio[-1], Pchamber[-1])) )
print("SS mdot_N2 is", '%.3f'%mdot_fuel_pres[-1], "kg/s")
print("SS N2 flow rate is", '%.3f'%(mdot_fuel_pres[-1]/roo_N2_fuel*1000/3.78*60), "GPM")
print("SS mdot_ox is", '%.3f'%mdot_ox[-1], "kg/s")
print("SS mdot_fuel is", '%.3f'%mdot_fuel[-1], "kg/s")
print("SS O/F ratio is", '%.3f'%OFratio[-1])
print("SS ox tube velocity is", '%.1f'%(mdot_ox[-1]/(roo_ox*pi*d_oxtube**2/4)), "m/s")
print("SS fuel tube velocity is", '%.1f'%(mdot_fuel[-1]/(rooFuel*pi*d_fueltube**2/4)), "m/s")
print("SS ox injection velocity is", '%.1f'%(mdot_ox[-1]/(roo_ox*pi*diameter_oxInjectorHoles**2/4*numOxInjectorHoles)), "m/s")
print("SS fuel injection velocity is", '%.1f'%(mdot_fuel[-1]/(rooFuel*pi*diameter_fuelInjectorHoles**2/4*numFuelHoles)), "m/s")
print("SS ox injector P_drop", '%.1f'%((P4ox[-1]-Pchamber[-1])/Pchamber[-1]*100), "% of Pchamber")
print("SS fuel injector P_drop", '%.1f'%((P4fuel[-1]-Pchamber[-1])/Pchamber[-1]*100), "% of Pchamber")
print("")
# See what check valves are doing
dP_ox_check = (Poxtank[-1] - P2ox[-1])
dP_N2_check = (Preg_N2 - P3pres[-1])
if dP_ox_check < ox_check.Pcrack:
print("Warning: Pressure drop over ox check valve (",'%.1f'%(dP_ox_check/psi),"psi) is less than its cracking pressure (",ox_check.Pcrack/psi,"psi) and will remain shut")
else:
print("Ox check valve pressure drop is", '%.1f'%(dP_ox_check/psi), "psi, enough to keep it flowing")
if dP_N2_check < pres_check.Pcrack:
print("Warning: Pressure drop over N2 check valve(",'%.1f'%(dP_N2_check/psi),"psi) is less than its cracking pressure (",pres_check.Pcrack/psi,"psi) and will remain shut")
else:
print("N2 check valve pressure drop is", '%.1f'%(dP_N2_check/psi), "psi, enough to keep it flowing")
# following time histories are one element shorter than the rest, so the last calculated value will be duplicated to match the length of other time histories.
P2ox.append( ox_solution[0])
P3ox.append( ox_solution[1])
P4ox.append( ox_solution[2])
mdot_ox.append( ox_solution[3])
P2fuel.append( fuel_solution[0])
P3fuel.append( fuel_solution[1])
P4fuel.append( fuel_solution[2])
mdot_fuel.append( fuel_solution[3])
P3pres.append( pres_solution[0])
P4pres.append( pres_solution[1])
mdot_fuel_pres.append( pres_solution[2])
OFratio.append( mdot_ox[i]/mdot_fuel[i])
# plot time histories
plt.ion()
plt.figure(1)
plt.plot(time, array(Poxtank)/psi, label='ox tank')
plt.figure(1)
plt.plot(time,array(P2ox)/psi, label='Pcheck_out')
plt.figure(1)
plt.plot(time,array(P3ox)/psi, label='Psolenoid_out')
plt.figure(1)
plt.plot(time,array(P4ox)/psi, label='Pinj_in')
plt.figure(1)
plt.plot(time,array(Pchamber)/psi, label='Pchamber')
plt.figure(1)
plt.plot(time,array(Pexit)/psi, label='Pexit')
plt.title('Ox pressures')
plt.legend( loc='upper right')
plt.xlabel('Time [s]')
plt.ylabel('psia')
plt.show()
Preg_N2_array = full((1, len(time)), Preg_N2/psi)
plt.figure(2)
plt.plot(time, array(PfuelPres)/psi, label='fuelpres tank')
plt.figure(2)
plt.plot(time, Preg_N2_array.T, label="P_regulation")
plt.figure(2)
plt.plot(time,array(P3pres)/psi, label='N2 check valve out')
plt.figure(2)
plt.plot(time,array(P4pres)/psi, label='N2 solenoid valve out')
plt.figure(2)
plt.plot(time,array(Pfueltank)/psi, label='fuel tank')
plt.figure(2)
plt.plot(time,array(P2fuel)/psi, label='Pvalve_out')
plt.figure(2)
plt.plot(time,array(P3fuel)/psi, label='Pjacket_in')
plt.figure(2)
plt.plot(time,array(P4fuel)/psi, label='Pinj_in')
plt.figure(2)
plt.plot(time,array(Pchamber)/psi, label='Pchamber')
plt.figure(2)
plt.plot(time,array(Pexit)/psi, label='Pexit')
plt.title('Fuel pressures')
plt.legend( loc='upper right')
plt.xlabel('Time [s]')
plt.ylabel('Psia')
plt.show()
plt.figure(3)
plt.plot(time,Toxtank, label='Ox tank')
plt.figure(3)
plt.plot(time,Tfueltank, label='Fuel tank')
plt.figure(3)
plt.plot(time,TfuelPres, label='fuel pressurant tank')
plt.title('Tank temperatures')
plt.legend( loc='lower left')
plt.xlabel('Time [s]')
plt.ylabel('K')
plt.show()
plt.figure(4)
plt.plot(time,mdot_ox, label='mdot_ox')
plt.figure(4)
plt.plot(time,mdot_fuel, label='mdot_fuel')
plt.figure(4)
plt.plot(time,mdot_nozzle, label='mdot_nozzle')
plt.figure(4)
plt.plot(time,mdot_fuel_pres, label='mdot_fuel_pres')
plt.title('Mass flows')
plt.xlabel('Time [s]')
plt.ylabel('kg/s')
plt.legend( loc='upper right')
plt.show()
plt.figure(5)
plt.plot(time,FFfueltank, label='fuel tank')
plt.title('Fill fractions in fuel tank (Vfuel_/Vtank)')
plt.xlabel('Time [s]')
plt.ylabel('')
plt.legend( loc='upper right')
plt.show()
plt.figure(6)
plt.plot(time, OFratio)
plt.title('O/F ratio')
plt.xlabel('Time [s]')
plt.ylabel('')
plt.show()
plt.figure(7)
plt.plot(time,mox, label='GOX')
plt.figure(7)
plt.plot(time,mfuel, label='fuel')
plt.figure(7)
plt.plot(time,mfuelPres, label='fuel pressurant')
plt.figure(7)
plt.plot(time,mPresFueltank, label='pressurant in fuel tank')
plt.figure(7)
plt.plot(time,mprs, label='total pressurant')
plt.title('Fluid masses')
plt.xlabel('Time [s]')
plt.ylabel('kg')
plt.legend( loc='upper right')
plt.show()
plt.figure(8)
plt.plot(time, cmass)
plt.title('Resident mass in chamber')
plt.xlabel('Time [s]')
plt.ylabel('kg')
plt.show()
plt.figure(9)
plt.plot(time, Thrust)
plt.title('Thrust')
plt.xlabel('Time [s]')
plt.ylabel('N')
plt.show()
plt.figure(10)
plt.plot(time, Isp)
plt.title('Isp')
plt.xlabel('Time [s]')
plt.ylabel('s')
plt.show()
plt.figure(11)
plt.plot(time, T_chamber)
plt.title('T chamber')
plt.xlabel('Time [s]')
plt.ylabel('K')
plt.show()
plt.figure(12)
plt.plot(time, Mexit)
plt.title('Exit Mach number')
plt.xlabel('Time [s]')
plt.ylabel('-')
plt.show()
plt.figure(13)
y1 = PfuelPres[-1]/psi
y2 = Preg_N2/psi
y3 = P3pres[-1]/psi
y4 = P4pres[-1]/psi
y5 = Pfueltank[-1]/psi
y6 = P2fuel[-1]/psi
y7 = P3fuel[-1]/psi
y8 = P4fuel[-1]/psi
y9 = Pchamber[-1]/psi
plt.plot( [0, 1], [y1, y1], linewidth=2, label="Pressurant tank")
plt.plot( [1, 2], [y1, y2], linewidth=2, label="Regulator")
plt.plot( [2, 3], [y2, y3], linewidth=2, label="Check valve")
plt.plot( [3, 4], [y3, y4], linewidth=2, label="Pressurant solenoid")
plt.plot( [4, 5], [y4, y5], linewidth=2, label="Pressurant tubing")
plt.plot( [5, 6], [y5, y5], linewidth=2, label="Fuel tank")
plt.plot( [6, 7], [y5, y6], linewidth=2, label="Fuel solenoid")
plt.plot( [7, 8], [y6, y7], linewidth=2, label="Piping")
plt.plot( [8, 9], [y7, y8], linewidth=2, label="Cooling jacket")
plt.plot( [9, 10], [y8, y9], linewidth=2, label="Fuel injector")
plt.plot( [10, 11], [y9, y9], linewidth=2, label="Chamber")
plt.title('Fuel line pressures at end of burn')
plt.ylabel('psi')
plt.legend( loc='upper right')
plt.figure(14)
y1 = Poxtank[-1]/psi
y2 = P2ox[-1]/psi
y3 = P3ox[-1]/psi
y4 = P4ox[-1]/psi
y5 = Pchamber[-1]/psi
plt.plot( [0, 1], [y1, y1], linewidth=2, label="Ox tank")
plt.plot( [1, 2], [y1, y2], linewidth=2, label="Check valve")
plt.plot( [2, 3], [y2, y3], linewidth=2, label="Ox solenoid")
plt.plot( [3, 4], [y3, y4], linewidth=2, label="Tubing")
plt.plot( [4, 5], [y4, y5], linewidth=2, label="Ox injector")
plt.plot( [5, 6], [y5, y5], linewidth=2, label="Chamber")
plt.title('Ox line pressures at end of burn')
plt.ylabel('psi')
plt.legend( loc='upper right')
| mit | 7,787,178,057,936,864,000 | 45.792892 | 340 | 0.601917 | false |
uclmr/inferbeddings | scripts/synth/create_table_iterative.py | 1 | 6170 | import numpy as np
from collections import defaultdict
results = '/Users/tdmeeste/workspace/inferbeddings/logs/synth/synth_paper_iterative_aggregated.txt'
models_lst = ['DistMult', 'ComplEx']
clauses_lst = ['symm', 'impl', 'impl_inv', 'trans_single', 'trans_diff']
confs_lst = ['0.0']
versions_lst = ['v0', 'v1', 'v2', 'v3', 'v4', 'v5', 'v6', 'v7', 'v8', 'v9']
adv_weights_lst = ['0', '1']
adv_epochs_lst = ['0', '10']
disc_epochs_lst = ['10']
def string(s):
return {'TransE' : r"\emph{ASR}-\mdl{TransE}",
'DistMult' : r"\mdl{DistM.}",
'ComplEx' : r"\mdl{Compl.}",
'symm' : r"\multirow{ 2}{*}{ $\begin{array} {l@{}} r(X_1, X_2) \\ \quad\Rightarrow r(X_2, X_1) \end{array}$ }",
'impl' : r"\multirow{ 2}{*}{ $\begin{array} {l@{}} r(X_1, X_2) \\ \quad\Rightarrow s(X_1, X_2) \end{array}$ }",
'impl_inv' : r"\multirow{ 2}{*}{ $\begin{array} {l@{}} r(X_1, X_2) \\ \quad\Rightarrow s(X_2, X_1) \end{array}$ }",
'trans_single': r"\multirow{ 2}{*}{$\begin{array} {l@{}} r(X_1, X_2) \wedge r(X_2, X_3) \\ \quad\Rightarrow r(X_1, X_3) \end{array}$}",
'trans_diff': r"\multirow{ 2}{*}{$\begin{array} {l@{}} r(X_1, X_2) \wedge s(X_2, X_3) \\ \quad\Rightarrow t(X_1, X_3) \end{array}$}"
}[s]
#'symm': r"$r(\x_2, \x_1) :- r(\x_1, \x_2)$",
#'impl': r"$s(\x_1, \x_2) :- r(\x_1, \x_2)$",
#'impl_inv': r"$s(\x_2, \x_1) :- r(\x_1, \x_2)$",
#'trans_single': r"$r(\x_1, \x_3) :- r(\x_1, \x_2), r(\x_2, \x_3)$",
#'trans_diff': r"$t(\x_1, \x_3) :- r(\x_1, \x_2), s(\x_2, \x_3)$"
def id2clause(id):
if 'tag=impl_inv' in id:
return 'impl_inv' #first!!
elif 'tag=impl' in id:
return 'impl'
for clause in ['symm', 'trans_single', 'trans_diff']:
if 'tag=%s'%clause in id:
return clause
return None
def id2model(id):
for model in models_lst:
if 'model=%s'%model in id:
return model
return None
def id2adv_init_ground(id):
if 'adv_init_ground=True' in id:
return True
elif 'adv_init_ground=False' in id:
return False
else:
return None
def id2conf(id):
for conf in confs_lst:
if '_c%s'%conf in id:
return conf
return None
def id2version(id):
for version in versions_lst:
if '_%s.log'%version in id:
return version
return None
def id2adv_weight(id):
for adv_weight in adv_weights_lst:
if 'adv_weight=%s_'%adv_weight in id:
return adv_weight
return None
def id2adv_epochs(id):
for adv_epoch in adv_epochs_lst:
if 'adv_epochs=%s_'%adv_epoch in id:
return adv_epoch
return None
def id2disc_epochs(id):
for disc_epoch in disc_epochs_lst:
if 'disc_epochs=%s_'%disc_epoch in id:
return disc_epoch
return None
def id2entity_space(id):
return 'unit_sphere' if 'unit-sphere' in id else 'unit_cube'
from time import sleep
ID2AUC = {}
found = False
with open(results) as rID:
for line in rID:
auc, id = line.strip().split('\t')
clause = id2clause(id)
model = id2model(id)
adv_init_ground = id2adv_init_ground(id)
conf = id2conf(id)
adv_weight = id2adv_weight(id)
adv_epochs = id2adv_epochs(id)
disc_epochs = id2disc_epochs(id)
entity_space = id2entity_space(id)
version = id2version(id)
if not None in (clause, model, adv_init_ground, conf, adv_weight, adv_epochs, disc_epochs, entity_space, version):
ID2AUC[(clause, model, adv_init_ground, conf, adv_weight, adv_epochs, disc_epochs, entity_space, version)] = float(auc)
ID2AUC_versions = {}
for (clause, model, adv_init_ground, conf, adv_weight, adv_epochs, disc_epochs, entity_space, version), auc in ID2AUC.items():
if not (clause, model, adv_init_ground, conf, adv_weight, adv_epochs, disc_epochs, entity_space) in ID2AUC_versions:
ID2AUC_versions[(clause, model, adv_init_ground, conf, adv_weight, adv_epochs, disc_epochs, entity_space)] = []
ID2AUC_versions[(clause, model, adv_init_ground, conf, adv_weight, adv_epochs, disc_epochs, entity_space)].append(auc)
ID2MEAN = defaultdict(lambda: -1)
for k in ID2AUC_versions:
ID2MEAN[k] = np.mean(ID2AUC_versions[k])
#construct table:
title = r"PR-AUC results for \emph{ASR}-DistMult (DistM.) and \emph{ASR}-ComplEx (Compl.) on synthetic datasets with various types of clauses (with $r\not=s\not=t$). Comparison of standard models without clauses ($\alpha=0$), and iterative adversarial training with clauses ($\alpha=1$). "
header = r"""
\begin{table}[t!]
\centering
\caption{
""" + title + \
r"""
}
\label{synth}
\vspace{1em}
\resizebox{\columnwidth}{!}{
\begin{tabular}{llcccc}
\toprule
\multirow{ 2}{*}{Clauses} & \multirow{ 2}{*}{Model} & $\alpha=0$ & $\alpha=0$ & $\alpha=1$ & $\alpha=1$ \\
&& cube & sphere & cube & sphere \\
\midrule
"""
footer = r"""
\bottomrule
\end{tabular}
}
\end{table}
"""
def results_line(clause, model):
res = string(model) + " & "
conf = "0.0"
res_STD_cube = ID2MEAN[(clause, model, True, conf, '0', '10', '10','unit_cube')]
res_STD_sphere = ID2MEAN[(clause, model, True, conf, '0', '10', '10','unit_sphere')]
#res_SMPL = ID2MEAN[(clause, model, True, conf, '1', '0', '10')]
#res_ASR_R = ID2MEAN[(clause, model, False, conf, '1', '1')]
res_ASR_cube = ID2MEAN[(clause, model, True, conf, '1', '10', '10', 'unit_cube')]
res_ASR_sphere = ID2MEAN[(clause, model, True, conf, '1', '10', '10', 'unit_sphere')]
resu = [res_STD_cube, res_STD_sphere, res_ASR_cube, res_ASR_sphere]
resu = [np.round(1000*res)/10. for res in resu]
maxvalue = max(resu)
resu_str = ["\\textbf{%.1f}"%res if res == maxvalue else "%.1f"%res for res in resu]
res += " & ".join(resu_str)
return res + r" \\"
print(header)
for clause in clauses_lst:
for model in models_lst:
show_clause = string(clause) if model == models_lst[0] else ""
line = show_clause + " & " + results_line(clause, model)
print(line)
if not clause == clauses_lst[-1]:
print(r"\midrule")
print(footer)
| mit | 2,269,847,217,643,453,000 | 30.968912 | 289 | 0.583468 | false |
hlange/LogSoCR | .waf/waflib/Runner.py | 1 | 9332 | #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2005-2016 (ita)
"""
Runner.py: Task scheduling and execution
"""
import random
try:
from queue import Queue
except ImportError:
from Queue import Queue
from waflib import Utils, Task, Errors, Logs
GAP = 20
"""
Wait for at least ``GAP * njobs`` before trying to enqueue more tasks to run
"""
class Consumer(Utils.threading.Thread):
"""
Daemon thread object that executes a task. It shares a semaphore with
the coordinator :py:class:`waflib.Runner.Spawner`. There is one
instance per task to consume.
"""
__slots__ = ('task', 'spawner')
def __init__(self, spawner, task):
Utils.threading.Thread.__init__(self)
self.task = task
"""Task to execute"""
self.spawner = spawner
"""Coordinator object"""
self.setDaemon(1)
self.start()
def run(self):
"""
Processes a single task
"""
try:
if not self.spawner.master.stop:
self.task.process()
finally:
self.spawner.sem.release()
self.spawner.master.out.put(self.task)
self.task = None
self.spawner = None
class Spawner(Utils.threading.Thread):
"""
Daemon thread that consumes tasks from :py:class:`waflib.Runner.Parallel` producer and
spawns a consuming thread :py:class:`waflib.Runner.Consumer` for each
:py:class:`waflib.Task.TaskBase` instance.
"""
def __init__(self, master):
Utils.threading.Thread.__init__(self)
self.master = master
""":py:class:`waflib.Runner.Parallel` producer instance"""
self.sem = Utils.threading.Semaphore(master.numjobs)
"""Bounded semaphore that prevents spawning more than *n* concurrent consumers"""
self.setDaemon(1)
self.start()
def run(self):
"""
Spawns new consumers to execute tasks by delegating to :py:meth:`waflib.Runner.Spawner.loop`
"""
try:
self.loop()
except Exception:
# Python 2 prints unnecessary messages when shutting down
# we also want to stop the thread properly
pass
def loop(self):
"""
Consumes task objects from the producer; ends when the producer has no more
task to provide.
"""
master = self.master
while 1:
task = master.ready.get()
self.sem.acquire()
task.log_display(task.generator.bld)
Consumer(self, task)
class Parallel(object):
"""
Schedule the tasks obtained from the build context for execution.
"""
def __init__(self, bld, j=2):
"""
The initialization requires a build context reference
for computing the total number of jobs.
"""
self.numjobs = j
"""
Amount of parallel consumers to use
"""
self.bld = bld
"""
Instance of :py:class:`waflib.Build.BuildContext`
"""
self.outstanding = Utils.deque()
"""List of :py:class:`waflib.Task.TaskBase` that may be ready to be executed"""
self.frozen = Utils.deque()
"""List of :py:class:`waflib.Task.TaskBase` that are not ready yet"""
self.ready = Queue(0)
"""List of :py:class:`waflib.Task.TaskBase` ready to be executed by consumers"""
self.out = Queue(0)
"""List of :py:class:`waflib.Task.TaskBase` returned by the task consumers"""
self.count = 0
"""Amount of tasks that may be processed by :py:class:`waflib.Runner.TaskConsumer`"""
self.processed = 1
"""Amount of tasks processed"""
self.stop = False
"""Error flag to stop the build"""
self.error = []
"""Tasks that could not be executed"""
self.biter = None
"""Task iterator which must give groups of parallelizable tasks when calling ``next()``"""
self.dirty = False
"""
Flag that indicates that the build cache must be saved when a task was executed
(calls :py:meth:`waflib.Build.BuildContext.store`)"""
self.spawner = Spawner(self)
"""
Coordinating daemon thread that spawns thread consumers
"""
def get_next_task(self):
"""
Obtains the next Task instance to run
:rtype: :py:class:`waflib.Task.TaskBase`
"""
if not self.outstanding:
return None
return self.outstanding.popleft()
def postpone(self, tsk):
"""
Adds the task to the list :py:attr:`waflib.Runner.Parallel.frozen`.
The order is scrambled so as to consume as many tasks in parallel as possible.
:param tsk: task instance
:type tsk: :py:class:`waflib.Task.TaskBase`
"""
if random.randint(0, 1):
self.frozen.appendleft(tsk)
else:
self.frozen.append(tsk)
def refill_task_list(self):
"""
Adds the next group of tasks to execute in :py:attr:`waflib.Runner.Parallel.outstanding`.
"""
while self.count > self.numjobs * GAP:
self.get_out()
while not self.outstanding:
if self.count:
self.get_out()
elif self.frozen:
try:
cond = self.deadlock == self.processed
except AttributeError:
pass
else:
if cond:
msg = 'check the build order for the tasks'
for tsk in self.frozen:
if not tsk.run_after:
msg = 'check the methods runnable_status'
break
lst = []
for tsk in self.frozen:
lst.append('%s\t-> %r' % (repr(tsk), [id(x) for x in tsk.run_after]))
raise Errors.WafError('Deadlock detected: %s%s' % (msg, ''.join(lst)))
self.deadlock = self.processed
if self.frozen:
self.outstanding.extend(self.frozen)
self.frozen.clear()
elif not self.count:
self.outstanding.extend(next(self.biter))
self.total = self.bld.total()
break
def add_more_tasks(self, tsk):
"""
If a task provides :py:attr:`waflib.Task.TaskBase.more_tasks`, then the tasks contained
in that list are added to the current build and will be processed before the next build group.
:param tsk: task instance
:type tsk: :py:attr:`waflib.Task.TaskBase`
"""
if getattr(tsk, 'more_tasks', None):
self.outstanding.extend(tsk.more_tasks)
self.total += len(tsk.more_tasks)
def get_out(self):
"""
Waits for a Task that task consumers add to :py:attr:`waflib.Runner.Parallel.out` after execution.
Adds more Tasks if necessary through :py:attr:`waflib.Runner.Parallel.add_more_tasks`.
:rtype: :py:attr:`waflib.Task.TaskBase`
"""
tsk = self.out.get()
if not self.stop:
self.add_more_tasks(tsk)
self.count -= 1
self.dirty = True
return tsk
def add_task(self, tsk):
"""
Enqueue a Task to :py:attr:`waflib.Runner.Parallel.ready` so that consumers can run them.
:param tsk: task instance
:type tsk: :py:attr:`waflib.Task.TaskBase`
"""
self.ready.put(tsk)
def skip(self, tsk):
"""
Mark a task as skipped/up-to-date
"""
tsk.hasrun = Task.SKIPPED
def error_handler(self, tsk):
"""
Called when a task cannot be executed. The flag :py:attr:`waflib.Runner.Parallel.stop` is set, unless
the build is executed with::
$ waf build -k
:param tsk: task instance
:type tsk: :py:attr:`waflib.Task.TaskBase`
"""
if hasattr(tsk, 'scan') and hasattr(tsk, 'uid'):
# TODO waf 2.0 - this breaks encapsulation
try:
del self.bld.imp_sigs[tsk.uid()]
except KeyError:
pass
if not self.bld.keep:
self.stop = True
self.error.append(tsk)
def task_status(self, tsk):
"""
Obtains the task status to decide whether to run it immediately or not.
:return: the exit status, for example :py:attr:`waflib.Task.ASK_LATER`
:rtype: integer
"""
try:
return tsk.runnable_status()
except Exception:
self.processed += 1
tsk.err_msg = Utils.ex_stack()
if not self.stop and self.bld.keep:
self.skip(tsk)
if self.bld.keep == 1:
# if -k stop at the first exception, if -kk try to go as far as possible
if Logs.verbose > 1 or not self.error:
self.error.append(tsk)
self.stop = True
else:
if Logs.verbose > 1:
self.error.append(tsk)
return Task.EXCEPTION
tsk.hasrun = Task.EXCEPTION
self.error_handler(tsk)
return Task.EXCEPTION
def start(self):
"""
Obtains Task instances from the BuildContext instance and adds the ones that need to be executed to
:py:class:`waflib.Runner.Parallel.ready` so that the :py:class:`waflib.Runner.Spawner` consumer thread
has them executed. Obtains the executed Tasks back from :py:class:`waflib.Runner.Parallel.out`
and marks the build as failed by setting the ``stop`` flag.
If only one job is used, then executes the tasks one by one, without consumers.
"""
self.total = self.bld.total()
while not self.stop:
self.refill_task_list()
# consider the next task
tsk = self.get_next_task()
if not tsk:
if self.count:
# tasks may add new ones after they are run
continue
else:
# no tasks to run, no tasks running, time to exit
break
if tsk.hasrun:
# if the task is marked as "run", just skip it
self.processed += 1
continue
if self.stop: # stop immediately after a failure was detected
break
st = self.task_status(tsk)
if st == Task.RUN_ME:
self.count += 1
self.processed += 1
if self.numjobs == 1:
tsk.log_display(tsk.generator.bld)
try:
tsk.process()
finally:
self.out.put(tsk)
else:
self.add_task(tsk)
if st == Task.ASK_LATER:
self.postpone(tsk)
elif st == Task.SKIP_ME:
self.processed += 1
self.skip(tsk)
self.add_more_tasks(tsk)
# self.count represents the tasks that have been made available to the consumer threads
# collect all the tasks after an error else the message may be incomplete
while self.error and self.count:
self.get_out()
self.ready.put(None)
assert (self.count == 0 or self.stop)
| agpl-3.0 | -2,180,526,306,143,412,700 | 25.662857 | 104 | 0.673168 | false |
pacoqueen/bbinn | gajim-0.9.1/src/common/logger.py | 1 | 16495 | # -*- coding: utf-8 -*-
## logger.py
##
## Contributors for this file:
## - Yann Le Boulanger <[email protected]>
## - Nikos Kouremenos <[email protected]>
##
## Copyright (C) 2003-2004 Yann Le Boulanger <[email protected]>
## Vincent Hanquez <[email protected]>
## Copyright (C) 2005 Yann Le Boulanger <[email protected]>
## Vincent Hanquez <[email protected]>
## Nikos Kouremenos <[email protected]>
## Dimitur Kirov <[email protected]>
## Travis Shirk <[email protected]>
## Norman Rasmussen <[email protected]>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published
## by the Free Software Foundation; version 2 only.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
# XXX Modificado por queen para deshabilitar el log y que así no necesite psqlite para funcionar.
# XXX Modificado para que vuelva a usar logger.
# OJO: Necesitará entonces psqlite y gnupg en los clientes (qué remedio).
import os
import sys
import time
import datetime
import exceptions
import i18n
_ = i18n._
try:
from pysqlite2 import dbapi2 as sqlite
except ImportError:
raise exceptions.PysqliteNotAvailable
# pass
if os.name == 'nt':
try:
# Documents and Settings\[User Name]\Application Data\Gajim\logs.db
LOG_DB_PATH = os.path.join(os.environ['appdata'], 'Gajim', 'logs.db')
except KeyError:
# win9x, ./logs.db
LOG_DB_PATH = 'logs.db'
else: # Unices
LOG_DB_PATH = os.path.expanduser('~/.gajim/logs.db')
try:
LOG_DB_PATH = LOG_DB_PATH.decode(sys.getfilesystemencoding())
except:
pass
class Constants:
def __init__(self):
(
self.JID_NORMAL_TYPE,
self.JID_ROOM_TYPE
) = range(2)
(
self.KIND_STATUS,
self.KIND_GCSTATUS,
self.KIND_GC_MSG,
self.KIND_SINGLE_MSG_RECV,
self.KIND_CHAT_MSG_RECV,
self.KIND_SINGLE_MSG_SENT,
self.KIND_CHAT_MSG_SENT
) = range(7)
(
self.SHOW_ONLINE,
self.SHOW_CHAT,
self.SHOW_AWAY,
self.SHOW_XA,
self.SHOW_DND,
self.SHOW_OFFLINE
) = range(6)
constants = Constants()
class Logger:
def __init__(self):
self.jids_already_in = [] # holds jids that we already have in DB
if not os.path.exists(LOG_DB_PATH):
# this can happen only the first time (the time we create the db)
# db is not created here but in src/common/checks_paths.py
return
self.init_vars()
def init_vars(self):
# if locked, wait up to 20 sec to unlock
# before raise (hopefully should be enough)
self.con = sqlite.connect(LOG_DB_PATH, timeout = 20.0,
isolation_level = 'IMMEDIATE')
self.cur = self.con.cursor()
self.get_jids_already_in_db()
def get_jids_already_in_db(self):
self.cur.execute('SELECT jid FROM jids')
rows = self.cur.fetchall() # list of tupples: (u'aaa@bbb',), (u'cc@dd',)]
for row in rows:
# row[0] is first item of row (the only result here, the jid)
self.jids_already_in.append(row[0])
def jid_is_from_pm(self, jid):
'''if jid is gajim@conf/nkour it's likely a pm one, how we know
gajim@conf is not a normal guy and nkour is not his resource?
we ask if gajim@conf is already in jids (with type room jid)
this fails if user disables logging for room and only enables for
pm (so higly unlikely) and if we fail we do not go chaos
(user will see the first pm as if it was message in room's public chat)
and after that all okay'''
possible_room_jid, possible_nick = jid.split('/', 1)
self.cur.execute('SELECT jid_id FROM jids WHERE jid="%s" AND type=%d' %\
(possible_room_jid, constants.JID_ROOM_TYPE))
row = self.cur.fetchone()
if row is not None:
return True
else:
return False
def get_jid_id(self, jid, typestr = None):
'''jids table has jid and jid_id
logs table has log_id, jid_id, contact_name, time, kind, show, message
so to ask logs we need jid_id that matches our jid in jids table
this method asks jid and returns the jid_id for later sql-ing on logs
'''
if jid.find('/') != -1: # if it has a /
jid_is_from_pm = self.jid_is_from_pm(jid)
if not jid_is_from_pm: # it's normal jid with resource
jid = jid.split('/', 1)[0] # remove the resource
if jid in self.jids_already_in: # we already have jids in DB
self.cur.execute('SELECT jid_id FROM jids WHERE jid="%s"' % jid)
jid_id = self.cur.fetchone()[0]
else: # oh! a new jid :), we add it now
if typestr == 'ROOM':
typ = constants.JID_ROOM_TYPE
else:
typ = constants.JID_NORMAL_TYPE
self.cur.execute('INSERT INTO jids (jid, type) VALUES (?, ?)', (jid, typ))
try:
self.con.commit()
except sqlite.OperationalError, e:
print >> sys.stderr, str(e)
jid_id = self.cur.lastrowid
self.jids_already_in.append(jid)
return jid_id
def convert_human_values_to_db_api_values(self, kind, show):
'''coverts from string style to constant ints for db'''
if kind == 'status':
kind_col = constants.KIND_STATUS
elif kind == 'gcstatus':
kind_col = constants.KIND_GCSTATUS
elif kind == 'gc_msg':
kind_col = constants.KIND_GC_MSG
elif kind == 'single_msg_recv':
kind_col = constants.KIND_SINGLE_MSG_RECV
elif kind == 'single_msg_sent':
kind_col = constants.KIND_SINGLE_MSG_SENT
elif kind == 'chat_msg_recv':
kind_col = constants.KIND_CHAT_MSG_RECV
elif kind == 'chat_msg_sent':
kind_col = constants.KIND_CHAT_MSG_SENT
if show == 'online':
show_col = constants.SHOW_ONLINE
elif show == 'chat':
show_col = constants.SHOW_CHAT
elif show == 'away':
show_col = constants.SHOW_AWAY
elif show == 'xa':
show_col = constants.SHOW_XA
elif show == 'dnd':
show_col = constants.SHOW_DND
elif show == 'offline':
show_col = constants.SHOW_OFFLINE
elif show is None:
show_col = None
else: # invisible in GC when someone goes invisible
# it's a RFC violation .... but we should not crash
show_col = 'UNKNOWN'
return kind_col, show_col
def commit_to_db(self, values):
#print 'saving', values
sql = 'INSERT INTO logs (jid_id, contact_name, time, kind, show, message, subject) VALUES (?, ?, ?, ?, ?, ?, ?)'
self.cur.execute(sql, values)
try:
self.con.commit()
except sqlite.OperationalError, e:
print >> sys.stderr, str(e)
def write(self, kind, jid, message = None, show = None, tim = None, subject = None):
'''write a row (status, gcstatus, message etc) to logs database
kind can be status, gcstatus, gc_msg, (we only recv for those 3),
single_msg_recv, chat_msg_recv, chat_msg_sent, single_msg_sent
we cannot know if it is pm or normal chat message, we try to guess
see jid_is_from_pm() which is called by get_jid_id()
we analyze jid and store it as follows:
jids.jid text column will hold JID if TC-related, room_jid if GC-related,
ROOM_JID/nick if pm-related.'''
if self.jids_already_in == []: # only happens if we just created the db
self.con = sqlite.connect(LOG_DB_PATH, timeout = 20.0,
isolation_level = 'IMMEDIATE')
self.cur = self.con.cursor()
jid = jid.lower()
contact_name_col = None # holds nickname for kinds gcstatus, gc_msg
# message holds the message unless kind is status or gcstatus,
# then it holds status message
message_col = message
subject_col = subject
if tim:
time_col = int(float(time.mktime(tim)))
else:
time_col = int(float(time.time()))
kind_col, show_col = self.convert_human_values_to_db_api_values(kind,
show)
# now we may have need to do extra care for some values in columns
if kind == 'status': # we store (not None) time, jid, show, msg
# status for roster items
jid_id = self.get_jid_id(jid)
if show is None: # show is None (xmpp), but we say that 'online'
show_col = constants.SHOW_ONLINE
elif kind == 'gcstatus':
# status in ROOM (for pm status see status)
if show is None: # show is None (xmpp), but we say that 'online'
show_col = constants.SHOW_ONLINE
jid, nick = jid.split('/', 1)
jid_id = self.get_jid_id(jid, 'ROOM') # re-get jid_id for the new jid
contact_name_col = nick
elif kind == 'gc_msg':
if jid.find('/') != -1: # if it has a /
jid, nick = jid.split('/', 1)
else:
# it's server message f.e. error message
# when user tries to ban someone but he's not allowed to
nick = None
jid_id = self.get_jid_id(jid, 'ROOM') # re-get jid_id for the new jid
contact_name_col = nick
else:
jid_id = self.get_jid_id(jid)
if show_col == 'UNKNOWN': # unknown show, do not log
return
values = (jid_id, contact_name_col, time_col, kind_col, show_col,
message_col, subject_col)
self.commit_to_db(values)
def get_last_conversation_lines(self, jid, restore_how_many_rows,
pending_how_many, timeout):
'''accepts how many rows to restore and when to time them out (in minutes)
(mark them as too old) and number of messages that are in queue
and are already logged but pending to be viewed,
returns a list of tupples containg time, kind, message,
list with empty tupple if nothing found to meet our demands'''
jid = jid.lower()
jid_id = self.get_jid_id(jid)
now = int(float(time.time()))
timed_out = now - (timeout * 60) # before that they are too old
# so if we ask last 5 lines and we have 2 pending we get
# 3 - 8 (we avoid the last 2 lines but we still return 5 asked)
self.cur.execute('''
SELECT time, kind, message FROM logs
WHERE jid_id = %d AND kind IN (%d, %d, %d, %d) AND time > %d
ORDER BY time DESC LIMIT %d OFFSET %d
''' % (jid_id, constants.KIND_SINGLE_MSG_RECV, constants.KIND_CHAT_MSG_RECV,
constants.KIND_SINGLE_MSG_SENT, constants.KIND_CHAT_MSG_SENT,
timed_out, restore_how_many_rows, pending_how_many)
)
results = self.cur.fetchall()
results.reverse()
return results
def get_unix_time_from_date(self, year, month, day):
# year (fe 2005), month (fe 11), day (fe 25)
# returns time in seconds for the second that starts that date since epoch
# gimme unixtime from year month day:
d = datetime.date(year, month, day)
local_time = d.timetuple() # time tupple (compat with time.localtime())
start_of_day = int(time.mktime(local_time)) # we have time since epoch baby :)
return start_of_day
def get_conversation_for_date(self, jid, year, month, day):
'''returns contact_name, time, kind, show, message
for each row in a list of tupples,
returns list with empty tupple if we found nothing to meet our demands'''
jid = jid.lower()
jid_id = self.get_jid_id(jid)
start_of_day = self.get_unix_time_from_date(year, month, day)
seconds_in_a_day = 86400 # 60 * 60 * 24
last_second_of_day = start_of_day + seconds_in_a_day - 1
self.cur.execute('''
SELECT contact_name, time, kind, show, message FROM logs
WHERE jid_id = %d
AND time BETWEEN %d AND %d
ORDER BY time
''' % (jid_id, start_of_day, last_second_of_day))
results = self.cur.fetchall()
return results
def get_search_results_for_query(self, jid, query):
'''returns contact_name, time, kind, show, message
for each row in a list of tupples,
returns list with empty tupple if we found nothing to meet our demands'''
jid = jid.lower()
jid_id = self.get_jid_id(jid)
if False: #query.startswith('SELECT '): # it's SQL query
try:
self.cur.execute(query)
except sqlite.OperationalError, e:
results = [('', '', '', '', str(e))]
return results
else: # user just typed something, we search in message column
like_sql = '%' + query + '%'
self.cur.execute('''
SELECT contact_name, time, kind, show, message, subject FROM logs
WHERE jid_id = ? AND message LIKE ?
ORDER BY time
''', (jid_id, like_sql))
results = self.cur.fetchall()
return results
def get_days_with_logs(self, jid, year, month, max_day):
'''returns the list of days that have logs (not status messages)'''
return
jid = jid.lower()
jid_id = self.get_jid_id(jid)
list = []
# First select all date of month whith logs we want
start_of_month = self.get_unix_time_from_date(year, month, 1)
seconds_in_a_day = 86400 # 60 * 60 * 24
last_second_of_month = start_of_month + (seconds_in_a_day * max_day) - 1
self.cur.execute('''
SELECT time FROM logs
WHERE jid_id = %d
AND time BETWEEN %d AND %d
AND kind NOT IN (%d, %d)
ORDER BY time
''' % (jid_id, start_of_month, last_second_of_month,
constants.KIND_STATUS, constants.KIND_GCSTATUS))
result = self.cur.fetchall()
#Copy all interesant time in a temporary table
self.cur.execute('CREATE TEMPORARY TABLE blabla(time,INTEGER)')
for line in result:
self.cur.execute('''
INSERT INTO blabla (time) VALUES (%d)
''' % (line[0]))
#then search in this small temp table for each day
for day in xrange(1, max_day):
start_of_day = self.get_unix_time_from_date(year, month, day)
last_second_of_day = start_of_day + seconds_in_a_day - 1
# just ask one row to see if we have sth for this date
self.cur.execute('''
SELECT time FROM blabla
WHERE time BETWEEN %d AND %d
LIMIT 1
''' % (start_of_day, last_second_of_day))
result = self.cur.fetchone()
if result:
list[0:0]=[day]
#Delete temporary table
self.cur.execute('DROP TABLE blabla')
result = self.cur.fetchone()
return list
def get_last_date_that_has_logs(self, jid):
'''returns last time (in seconds since EPOCH) for which
we had logs (excluding statuses)'''
jid = jid.lower()
jid_id = self.get_jid_id(jid)
self.cur.execute('''
SELECT time FROM logs
WHERE jid_id = ?
AND kind NOT IN (?, ?)
ORDER BY time DESC LIMIT 1
''', (jid_id, constants.KIND_STATUS, constants.KIND_GCSTATUS))
results = self.cur.fetchone()
if results is not None:
result = results[0]
else:
result = None
return result
| gpl-2.0 | 279,528,875,289,906,300 | 38.266667 | 120 | 0.563182 | false |
joshwatson/binaryninja-api | python/mediumlevelil.py | 1 | 51471 | # Copyright (c) 2018-2021 Vector 35 Inc
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import ctypes
import struct
# Binary Ninja components
import binaryninja
from binaryninja import _binaryninjacore as core
from binaryninja.enums import MediumLevelILOperation, InstructionTextTokenType, ILBranchDependence, DataFlowQueryOption
from binaryninja import basicblock #required for MediumLevelILBasicBlock argument
from binaryninja import function
from binaryninja import types
from binaryninja import lowlevelil
# 2-3 compatibility
from binaryninja import range
class SSAVariable(object):
def __init__(self, var, version):
self._var = var
self._version = version
def __repr__(self):
return "<ssa %s version %d>" % (repr(self._var), self._version)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return (self._var, self._version) == (other.var, other.version)
def __ne__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return not (self == other)
def __hash__(self):
return hash((self._var, self._version))
@property
def var(self):
""" """
return self._var
@var.setter
def var(self, value):
self._var = value
@property
def version(self):
""" """
return self._version
@version.setter
def version(self, value):
self._version = value
class MediumLevelILLabel(object):
def __init__(self, handle = None):
if handle is None:
self.handle = (core.BNMediumLevelILLabel * 1)()
core.BNMediumLevelILInitLabel(self.handle)
else:
self.handle = handle
class MediumLevelILOperationAndSize(object):
def __init__(self, operation, size):
self._operation = operation
self._size = size
def __repr__(self):
if self._size == 0:
return "<%s>" % self._operation.name
return "<%s %d>" % (self._operation.name, self._size)
def __eq__(self, other):
if isinstance(other, MediumLevelILOperation):
return other == self._operation
if isinstance(other, self.__class__):
return (other.size, other.operation) == (self._size, self._operation)
return NotImplemented
def __ne__(self, other):
if isinstance(other, MediumLevelILOperation) or isinstance(other, self.__class__):
return not (self == other)
return NotImplemented
def __hash__(self):
return hash((self._operation, self._size))
@property
def operation(self):
""" """
return self._operation
@property
def size(self):
""" """
return self._size
class MediumLevelILInstruction(object):
"""
``class MediumLevelILInstruction`` Medium Level Intermediate Language Instructions are infinite length tree-based
instructions. Tree-based instructions use infix notation with the left hand operand being the destination operand.
Infix notation is thus more natural to read than other notations (e.g. x86 ``mov eax, 0`` vs. MLIL ``eax = 0``).
"""
ILOperations = {
MediumLevelILOperation.MLIL_NOP: [],
MediumLevelILOperation.MLIL_SET_VAR: [("dest", "var"), ("src", "expr")],
MediumLevelILOperation.MLIL_SET_VAR_FIELD: [("dest", "var"), ("offset", "int"), ("src", "expr")],
MediumLevelILOperation.MLIL_SET_VAR_SPLIT: [("high", "var"), ("low", "var"), ("src", "expr")],
MediumLevelILOperation.MLIL_LOAD: [("src", "expr")],
MediumLevelILOperation.MLIL_LOAD_STRUCT: [("src", "expr"), ("offset", "int")],
MediumLevelILOperation.MLIL_STORE: [("dest", "expr"), ("src", "expr")],
MediumLevelILOperation.MLIL_STORE_STRUCT: [("dest", "expr"), ("offset", "int"), ("src", "expr")],
MediumLevelILOperation.MLIL_VAR: [("src", "var")],
MediumLevelILOperation.MLIL_VAR_FIELD: [("src", "var"), ("offset", "int")],
MediumLevelILOperation.MLIL_VAR_SPLIT: [("high", "var"), ("low", "var")],
MediumLevelILOperation.MLIL_ADDRESS_OF: [("src", "var")],
MediumLevelILOperation.MLIL_ADDRESS_OF_FIELD: [("src", "var"), ("offset", "int")],
MediumLevelILOperation.MLIL_CONST: [("constant", "int")],
MediumLevelILOperation.MLIL_CONST_PTR: [("constant", "int")],
MediumLevelILOperation.MLIL_EXTERN_PTR: [("constant", "int"), ("offset", "int")],
MediumLevelILOperation.MLIL_FLOAT_CONST: [("constant", "float")],
MediumLevelILOperation.MLIL_IMPORT: [("constant", "int")],
MediumLevelILOperation.MLIL_ADD: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_ADC: [("left", "expr"), ("right", "expr"), ("carry", "expr")],
MediumLevelILOperation.MLIL_SUB: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_SBB: [("left", "expr"), ("right", "expr"), ("carry", "expr")],
MediumLevelILOperation.MLIL_AND: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_OR: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_XOR: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_LSL: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_LSR: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_ASR: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_ROL: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_RLC: [("left", "expr"), ("right", "expr"), ("carry", "expr")],
MediumLevelILOperation.MLIL_ROR: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_RRC: [("left", "expr"), ("right", "expr"), ("carry", "expr")],
MediumLevelILOperation.MLIL_MUL: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_MULU_DP: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_MULS_DP: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_DIVU: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_DIVU_DP: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_DIVS: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_DIVS_DP: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_MODU: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_MODU_DP: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_MODS: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_MODS_DP: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_NEG: [("src", "expr")],
MediumLevelILOperation.MLIL_NOT: [("src", "expr")],
MediumLevelILOperation.MLIL_SX: [("src", "expr")],
MediumLevelILOperation.MLIL_ZX: [("src", "expr")],
MediumLevelILOperation.MLIL_LOW_PART: [("src", "expr")],
MediumLevelILOperation.MLIL_JUMP: [("dest", "expr")],
MediumLevelILOperation.MLIL_JUMP_TO: [("dest", "expr"), ("targets", "target_map")],
MediumLevelILOperation.MLIL_RET_HINT: [("dest", "expr")],
MediumLevelILOperation.MLIL_CALL: [("output", "var_list"), ("dest", "expr"), ("params", "expr_list")],
MediumLevelILOperation.MLIL_CALL_UNTYPED: [("output", "expr"), ("dest", "expr"), ("params", "expr"), ("stack", "expr")],
MediumLevelILOperation.MLIL_CALL_OUTPUT: [("dest", "var_list")],
MediumLevelILOperation.MLIL_CALL_PARAM: [("src", "var_list")],
MediumLevelILOperation.MLIL_RET: [("src", "expr_list")],
MediumLevelILOperation.MLIL_NORET: [],
MediumLevelILOperation.MLIL_IF: [("condition", "expr"), ("true", "int"), ("false", "int")],
MediumLevelILOperation.MLIL_GOTO: [("dest", "int")],
MediumLevelILOperation.MLIL_CMP_E: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_CMP_NE: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_CMP_SLT: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_CMP_ULT: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_CMP_SLE: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_CMP_ULE: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_CMP_SGE: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_CMP_UGE: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_CMP_SGT: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_CMP_UGT: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_TEST_BIT: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_BOOL_TO_INT: [("src", "expr")],
MediumLevelILOperation.MLIL_ADD_OVERFLOW: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_SYSCALL: [("output", "var_list"), ("params", "expr_list")],
MediumLevelILOperation.MLIL_SYSCALL_UNTYPED: [("output", "expr"), ("params", "expr"), ("stack", "expr")],
MediumLevelILOperation.MLIL_TAILCALL: [("output", "var_list"), ("dest", "expr"), ("params", "expr_list")],
MediumLevelILOperation.MLIL_TAILCALL_UNTYPED: [("output", "expr"), ("dest", "expr"), ("params", "expr"), ("stack", "expr")],
MediumLevelILOperation.MLIL_BP: [],
MediumLevelILOperation.MLIL_TRAP: [("vector", "int")],
MediumLevelILOperation.MLIL_INTRINSIC: [("output", "var_list"), ("intrinsic", "intrinsic"), ("params", "expr_list")],
MediumLevelILOperation.MLIL_INTRINSIC_SSA: [("output", "var_ssa_list"), ("intrinsic", "intrinsic"), ("params", "expr_list")],
MediumLevelILOperation.MLIL_FREE_VAR_SLOT: [("dest", "var")],
MediumLevelILOperation.MLIL_FREE_VAR_SLOT_SSA: [("prev", "var_ssa_dest_and_src")],
MediumLevelILOperation.MLIL_UNDEF: [],
MediumLevelILOperation.MLIL_UNIMPL: [],
MediumLevelILOperation.MLIL_UNIMPL_MEM: [("src", "expr")],
MediumLevelILOperation.MLIL_FADD: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_FSUB: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_FMUL: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_FDIV: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_FSQRT: [("src", "expr")],
MediumLevelILOperation.MLIL_FNEG: [("src", "expr")],
MediumLevelILOperation.MLIL_FABS: [("src", "expr")],
MediumLevelILOperation.MLIL_FLOAT_TO_INT: [("src", "expr")],
MediumLevelILOperation.MLIL_INT_TO_FLOAT: [("src", "expr")],
MediumLevelILOperation.MLIL_FLOAT_CONV: [("src", "expr")],
MediumLevelILOperation.MLIL_ROUND_TO_INT: [("src", "expr")],
MediumLevelILOperation.MLIL_FLOOR: [("src", "expr")],
MediumLevelILOperation.MLIL_CEIL: [("src", "expr")],
MediumLevelILOperation.MLIL_FTRUNC: [("src", "expr")],
MediumLevelILOperation.MLIL_FCMP_E: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_FCMP_NE: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_FCMP_LT: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_FCMP_LE: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_FCMP_GE: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_FCMP_GT: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_FCMP_O: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_FCMP_UO: [("left", "expr"), ("right", "expr")],
MediumLevelILOperation.MLIL_SET_VAR_SSA: [("dest", "var_ssa"), ("src", "expr")],
MediumLevelILOperation.MLIL_SET_VAR_SSA_FIELD: [("prev", "var_ssa_dest_and_src"), ("offset", "int"), ("src", "expr")],
MediumLevelILOperation.MLIL_SET_VAR_SPLIT_SSA: [("high", "var_ssa"), ("low", "var_ssa"), ("src", "expr")],
MediumLevelILOperation.MLIL_SET_VAR_ALIASED: [("prev", "var_ssa_dest_and_src"), ("src", "expr")],
MediumLevelILOperation.MLIL_SET_VAR_ALIASED_FIELD: [("prev", "var_ssa_dest_and_src"), ("offset", "int"), ("src", "expr")],
MediumLevelILOperation.MLIL_VAR_SSA: [("src", "var_ssa")],
MediumLevelILOperation.MLIL_VAR_SSA_FIELD: [("src", "var_ssa"), ("offset", "int")],
MediumLevelILOperation.MLIL_VAR_ALIASED: [("src", "var_ssa")],
MediumLevelILOperation.MLIL_VAR_ALIASED_FIELD: [("src", "var_ssa"), ("offset", "int")],
MediumLevelILOperation.MLIL_VAR_SPLIT_SSA: [("high", "var_ssa"), ("low", "var_ssa")],
MediumLevelILOperation.MLIL_CALL_SSA: [("output", "expr"), ("dest", "expr"), ("params", "expr_list"), ("src_memory", "int")],
MediumLevelILOperation.MLIL_CALL_UNTYPED_SSA: [("output", "expr"), ("dest", "expr"), ("params", "expr"), ("stack", "expr")],
MediumLevelILOperation.MLIL_SYSCALL_SSA: [("output", "expr"), ("params", "expr_list"), ("src_memory", "int")],
MediumLevelILOperation.MLIL_SYSCALL_UNTYPED_SSA: [("output", "expr"), ("params", "expr"), ("stack", "expr")],
MediumLevelILOperation.MLIL_TAILCALL_SSA: [("output", "expr"), ("dest", "expr"), ("params", "expr_list"), ("src_memory", "int")],
MediumLevelILOperation.MLIL_TAILCALL_UNTYPED_SSA: [("output", "expr"), ("dest", "expr"), ("params", "expr"), ("stack", "expr")],
MediumLevelILOperation.MLIL_CALL_OUTPUT_SSA: [("dest_memory", "int"), ("dest", "var_ssa_list")],
MediumLevelILOperation.MLIL_CALL_PARAM_SSA: [("src_memory", "int"), ("src", "var_ssa_list")],
MediumLevelILOperation.MLIL_LOAD_SSA: [("src", "expr"), ("src_memory", "int")],
MediumLevelILOperation.MLIL_LOAD_STRUCT_SSA: [("src", "expr"), ("offset", "int"), ("src_memory", "int")],
MediumLevelILOperation.MLIL_STORE_SSA: [("dest", "expr"), ("dest_memory", "int"), ("src_memory", "int"), ("src", "expr")],
MediumLevelILOperation.MLIL_STORE_STRUCT_SSA: [("dest", "expr"), ("offset", "int"), ("dest_memory", "int"), ("src_memory", "int"), ("src", "expr")],
MediumLevelILOperation.MLIL_VAR_PHI: [("dest", "var_ssa"), ("src", "var_ssa_list")],
MediumLevelILOperation.MLIL_MEM_PHI: [("dest_memory", "int"), ("src_memory", "int_list")]
}
def __init__(self, func, expr_index, instr_index=None):
instr = core.BNGetMediumLevelILByIndex(func.handle, expr_index)
self._function = func
self._expr_index = expr_index
if instr_index is None:
self._instr_index = core.BNGetMediumLevelILInstructionForExpr(func.handle, expr_index)
else:
self._instr_index = instr_index
self._operation = MediumLevelILOperation(instr.operation)
self._size = instr.size
self._address = instr.address
self._source_operand = instr.sourceOperand
operands = MediumLevelILInstruction.ILOperations[instr.operation]
self._operands = []
i = 0
for operand in operands:
name, operand_type = operand
if operand_type == "int":
value = instr.operands[i]
value = (value & ((1 << 63) - 1)) - (value & (1 << 63))
elif operand_type == "float":
if instr.size == 4:
value = struct.unpack("f", struct.pack("I", instr.operands[i] & 0xffffffff))[0]
elif instr.size == 8:
value = struct.unpack("d", struct.pack("Q", instr.operands[i]))[0]
else:
value = instr.operands[i]
elif operand_type == "expr":
value = MediumLevelILInstruction(func, instr.operands[i])
elif operand_type == "intrinsic":
value = lowlevelil.ILIntrinsic(func.arch, instr.operands[i])
elif operand_type == "var":
value = function.Variable.from_identifier(self._function.source_function, instr.operands[i])
elif operand_type == "var_ssa":
var = function.Variable.from_identifier(self._function.source_function, instr.operands[i])
version = instr.operands[i + 1]
i += 1
value = SSAVariable(var, version)
elif operand_type == "var_ssa_dest_and_src":
var = function.Variable.from_identifier(self._function.source_function, instr.operands[i])
dest_version = instr.operands[i + 1]
src_version = instr.operands[i + 2]
i += 2
self._operands.append(SSAVariable(var, dest_version))
#TODO: documentation for dest
self.dest = SSAVariable(var, dest_version)
value = SSAVariable(var, src_version)
elif operand_type == "int_list":
count = ctypes.c_ulonglong()
operand_list = core.BNMediumLevelILGetOperandList(func.handle, self._expr_index, i, count)
value = []
for j in range(count.value):
value.append(operand_list[j])
core.BNMediumLevelILFreeOperandList(operand_list)
elif operand_type == "var_list":
count = ctypes.c_ulonglong()
operand_list = core.BNMediumLevelILGetOperandList(func.handle, self._expr_index, i, count)
i += 1
value = []
for j in range(count.value):
value.append(function.Variable.from_identifier(self._function.source_function, operand_list[j]))
core.BNMediumLevelILFreeOperandList(operand_list)
elif operand_type == "var_ssa_list":
count = ctypes.c_ulonglong()
operand_list = core.BNMediumLevelILGetOperandList(func.handle, self._expr_index, i, count)
i += 1
value = []
for j in range(count.value // 2):
var_id = operand_list[j * 2]
var_version = operand_list[(j * 2) + 1]
value.append(SSAVariable(function.Variable.from_identifier(self._function.source_function,
var_id), var_version))
core.BNMediumLevelILFreeOperandList(operand_list)
elif operand_type == "expr_list":
count = ctypes.c_ulonglong()
operand_list = core.BNMediumLevelILGetOperandList(func.handle, self._expr_index, i, count)
i += 1
value = []
for j in range(count.value):
value.append(MediumLevelILInstruction(func, operand_list[j]))
core.BNMediumLevelILFreeOperandList(operand_list)
elif operand_type == "target_map":
count = ctypes.c_ulonglong()
operand_list = core.BNMediumLevelILGetOperandList(func.handle, self._expr_index, i, count)
i += 1
value = {}
for j in range(count.value // 2):
key = operand_list[j * 2]
target = operand_list[(j * 2) + 1]
value[key] = target
core.BNMediumLevelILFreeOperandList(operand_list)
self._operands.append(value)
self.__dict__[name] = value
i += 1
def __str__(self):
tokens = self.tokens
if tokens is None:
return "invalid"
result = ""
for token in tokens:
result += token.text
return result
def __repr__(self):
return "<il: %s>" % str(self)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self._function == other.function and self._expr_index == other.expr_index
def __lt__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self._function == other.function and self.expr_index < other.expr_index
def __le__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self._function == other.function and self.expr_index <= other.expr_index
def __gt__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self._function == other.function and self.expr_index > other.expr_index
def __ge__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self._function == other.function and self.expr_index >= other.expr_index
def __hash__(self):
return hash((self._instr_index, self._function))
@property
def tokens(self):
"""MLIL tokens (read-only)"""
count = ctypes.c_ulonglong()
tokens = ctypes.POINTER(core.BNInstructionTextToken)()
if ((self._instr_index is not None) and (self._function.source_function is not None) and
(self._expr_index == core.BNGetMediumLevelILIndexForInstruction(self._function.handle, self._instr_index))):
if not core.BNGetMediumLevelILInstructionText(self._function.handle, self._function.source_function.handle,
self._function.arch.handle, self._instr_index, tokens, count):
return None
else:
if not core.BNGetMediumLevelILExprText(self._function.handle, self._function.arch.handle,
self._expr_index, tokens, count):
return None
result = binaryninja.function.InstructionTextToken.get_instruction_lines(tokens, count.value)
core.BNFreeInstructionText(tokens, count.value)
return result
@property
def il_basic_block(self):
"""IL basic block object containing this expression (read-only) (only available on finalized functions)"""
return MediumLevelILBasicBlock(self._function.source_function.view, core.BNGetMediumLevelILBasicBlockForInstruction(self._function.handle, self._instr_index), self._function)
@property
def ssa_form(self):
"""SSA form of expression (read-only)"""
return MediumLevelILInstruction(self._function.ssa_form,
core.BNGetMediumLevelILSSAExprIndex(self._function.handle, self._expr_index))
@property
def non_ssa_form(self):
"""Non-SSA form of expression (read-only)"""
return MediumLevelILInstruction(self._function.non_ssa_form,
core.BNGetMediumLevelILNonSSAExprIndex(self._function.handle, self._expr_index))
@property
def value(self):
"""Value of expression if constant or a known value (read-only)"""
value = core.BNGetMediumLevelILExprValue(self._function.handle, self._expr_index)
result = function.RegisterValue(self._function.arch, value)
return result
@property
def possible_values(self):
"""Possible values of expression using path-sensitive static data flow analysis (read-only)"""
value = core.BNGetMediumLevelILPossibleExprValues(self._function.handle, self._expr_index, None, 0)
result = function.PossibleValueSet(self._function.arch, value)
core.BNFreePossibleValueSet(value)
return result
@property
def branch_dependence(self):
"""Set of branching instructions that must take the true or false path to reach this instruction"""
count = ctypes.c_ulonglong()
deps = core.BNGetAllMediumLevelILBranchDependence(self._function.handle, self._instr_index, count)
result = {}
for i in range(0, count.value):
result[deps[i].branch] = ILBranchDependence(deps[i].dependence)
core.BNFreeILBranchDependenceList(deps)
return result
@property
def low_level_il(self):
"""Low level IL form of this expression"""
expr = self._function.get_low_level_il_expr_index(self._expr_index)
if expr is None:
return None
return lowlevelil.LowLevelILInstruction(self._function.low_level_il.ssa_form, expr)
@property
def llil(self):
"""Alias for low_level_il"""
return self.low_level_il
@property
def llils(self):
exprs = self._function.get_low_level_il_expr_indexes(self.expr_index)
result = []
for expr in exprs:
result.append(lowlevelil.LowLevelILInstruction(self._function.low_level_il.ssa_form, expr))
return result
@property
def high_level_il(self):
"""High level IL form of this expression"""
expr = self._function.get_high_level_il_expr_index(self._expr_index)
if expr is None:
return None
return binaryninja.highlevelil.HighLevelILInstruction(self._function.high_level_il, expr)
@property
def hlil(self):
"""Alias for high_level_il"""
return self.high_level_il
@property
def hlils(self):
exprs = self._function.get_high_level_il_expr_indexes(self.expr_index)
result = []
for expr in exprs:
result.append(binaryninja.highlevelil.HighLevelILInstruction(self._function.high_level_il, expr))
return result
@property
def ssa_memory_version(self):
"""Version of active memory contents in SSA form for this instruction"""
return core.BNGetMediumLevelILSSAMemoryVersionAtILInstruction(self._function.handle, self._instr_index)
@property
def prefix_operands(self):
"""All operands in the expression tree in prefix order"""
result = [MediumLevelILOperationAndSize(self._operation, self._size)]
for operand in self._operands:
if isinstance(operand, MediumLevelILInstruction):
result += operand.prefix_operands
else:
result.append(operand)
return result
@property
def postfix_operands(self):
"""All operands in the expression tree in postfix order"""
result = []
for operand in self._operands:
if isinstance(operand, MediumLevelILInstruction):
result += operand.postfix_operands
else:
result.append(operand)
result.append(MediumLevelILOperationAndSize(self._operation, self._size))
return result
@property
def vars_written(self):
"""List of variables written by instruction"""
if self._operation in [MediumLevelILOperation.MLIL_SET_VAR, MediumLevelILOperation.MLIL_SET_VAR_FIELD,
MediumLevelILOperation.MLIL_SET_VAR_SSA, MediumLevelILOperation.MLIL_SET_VAR_SSA_FIELD,
MediumLevelILOperation.MLIL_SET_VAR_ALIASED, MediumLevelILOperation.MLIL_SET_VAR_ALIASED_FIELD,
MediumLevelILOperation.MLIL_VAR_PHI]:
return [self.dest]
elif self._operation in [MediumLevelILOperation.MLIL_SET_VAR_SPLIT, MediumLevelILOperation.MLIL_SET_VAR_SPLIT_SSA]:
return [self.high, self.low]
elif self._operation in [MediumLevelILOperation.MLIL_CALL, MediumLevelILOperation.MLIL_SYSCALL, MediumLevelILOperation.MLIL_TAILCALL]:
return self.output
elif self._operation in [MediumLevelILOperation.MLIL_CALL_UNTYPED, MediumLevelILOperation.MLIL_SYSCALL_UNTYPED, MediumLevelILOperation.MLIL_TAILCALL_UNTYPED,
MediumLevelILOperation.MLIL_CALL_SSA, MediumLevelILOperation.MLIL_CALL_UNTYPED_SSA,
MediumLevelILOperation.MLIL_SYSCALL_SSA, MediumLevelILOperation.MLIL_SYSCALL_UNTYPED_SSA,
MediumLevelILOperation.MLIL_TAILCALL_SSA, MediumLevelILOperation.MLIL_TAILCALL_UNTYPED_SSA]:
return self.output.vars_written
elif self._operation in [MediumLevelILOperation.MLIL_CALL_OUTPUT, MediumLevelILOperation.MLIL_CALL_OUTPUT_SSA]:
return self.dest
return []
@property
def vars_read(self):
"""List of variables read by instruction"""
if self._operation in [MediumLevelILOperation.MLIL_SET_VAR, MediumLevelILOperation.MLIL_SET_VAR_FIELD,
MediumLevelILOperation.MLIL_SET_VAR_SPLIT, MediumLevelILOperation.MLIL_SET_VAR_SSA,
MediumLevelILOperation.MLIL_SET_VAR_SPLIT_SSA, MediumLevelILOperation.MLIL_SET_VAR_ALIASED]:
return self.src.vars_read
elif self._operation in [MediumLevelILOperation.MLIL_SET_VAR_SSA_FIELD,
MediumLevelILOperation.MLIL_SET_VAR_ALIASED_FIELD]:
return [self.prev] + self.src.vars_read
elif self._operation in [MediumLevelILOperation.MLIL_CALL, MediumLevelILOperation.MLIL_SYSCALL, MediumLevelILOperation.MLIL_TAILCALL,
MediumLevelILOperation.MLIL_CALL_SSA, MediumLevelILOperation.MLIL_SYSCALL_SSA, MediumLevelILOperation.MLIL_TAILCALL_SSA]:
result = []
for param in self.params:
result += param.vars_read
return result
elif self._operation in [MediumLevelILOperation.MLIL_CALL_UNTYPED, MediumLevelILOperation.MLIL_SYSCALL_UNTYPED, MediumLevelILOperation.MLIL_TAILCALL_UNTYPED,
MediumLevelILOperation.MLIL_CALL_UNTYPED_SSA, MediumLevelILOperation.MLIL_SYSCALL_UNTYPED_SSA, MediumLevelILOperation.MLIL_TAILCALL_UNTYPED_SSA]:
return self.params.vars_read
elif self._operation in [MediumLevelILOperation.MLIL_CALL_PARAM, MediumLevelILOperation.MLIL_CALL_PARAM_SSA,
MediumLevelILOperation.MLIL_VAR_PHI]:
return self.src
elif self._operation in [MediumLevelILOperation.MLIL_CALL_OUTPUT, MediumLevelILOperation.MLIL_CALL_OUTPUT_SSA]:
return []
result = []
for operand in self._operands:
if (isinstance(operand, function.Variable)) or (isinstance(operand, SSAVariable)):
result.append(operand)
elif isinstance(operand, MediumLevelILInstruction):
result += operand.vars_read
return result
@property
def expr_type(self):
"""Type of expression"""
result = core.BNGetMediumLevelILExprType(self._function.handle, self._expr_index)
if result.type:
platform = None
if self._function.source_function:
platform = self._function.source_function.platform
return types.Type(result.type, platform = platform, confidence = result.confidence)
return None
def get_possible_values(self, options = []):
option_array = (ctypes.c_int * len(options))()
idx = 0
for option in options:
option_array[idx] = option
idx += 1
value = core.BNGetMediumLevelILPossibleExprValues(self._function.handle, self._expr_index, option_array, len(options))
result = function.PossibleValueSet(self._function.arch, value)
core.BNFreePossibleValueSet(value)
return result
def get_ssa_var_possible_values(self, ssa_var, options = []):
var_data = core.BNVariable()
var_data.type = ssa_var.var.source_type
var_data.index = ssa_var.var.index
var_data.storage = ssa_var.var.storage
option_array = (ctypes.c_int * len(options))()
idx = 0
for option in options:
option_array[idx] = option
idx += 1
value = core.BNGetMediumLevelILPossibleSSAVarValues(self._function.handle, var_data, ssa_var.version,
self._instr_index, option_array, len(options))
result = function.PossibleValueSet(self._function.arch, value)
core.BNFreePossibleValueSet(value)
return result
def get_ssa_var_version(self, var):
var_data = core.BNVariable()
var_data.type = var.source_type
var_data.index = var.index
var_data.storage = var.storage
return core.BNGetMediumLevelILSSAVarVersionAtILInstruction(self._function.handle, var_data, self._instr_index)
def get_var_for_reg(self, reg):
reg = self._function.arch.get_reg_index(reg)
result = core.BNGetMediumLevelILVariableForRegisterAtInstruction(self._function.handle, reg, self._instr_index)
return function.Variable(self._function.source_function, result.type, result.index, result.storage)
def get_var_for_flag(self, flag):
flag = self._function.arch.get_flag_index(flag)
result = core.BNGetMediumLevelILVariableForFlagAtInstruction(self._function.handle, flag, self._instr_index)
return function.Variable(self._function.source_function, result.type, result.index, result.storage)
def get_var_for_stack_location(self, offset):
result = core.BNGetMediumLevelILVariableForStackLocationAtInstruction(self._function.handle, offset, self._instr_index)
return function.Variable(self._function.source_function, result.type, result.index, result.storage)
def get_reg_value(self, reg):
reg = self._function.arch.get_reg_index(reg)
value = core.BNGetMediumLevelILRegisterValueAtInstruction(self._function.handle, reg, self._instr_index)
result = function.RegisterValue(self._function.arch, value)
return result
def get_reg_value_after(self, reg):
reg = self._function.arch.get_reg_index(reg)
value = core.BNGetMediumLevelILRegisterValueAfterInstruction(self._function.handle, reg, self._instr_index)
result = function.RegisterValue(self._function.arch, value)
return result
def get_possible_reg_values(self, reg, options = []):
reg = self._function.arch.get_reg_index(reg)
option_array = (ctypes.c_int * len(options))()
idx = 0
for option in options:
option_array[idx] = option
idx += 1
value = core.BNGetMediumLevelILPossibleRegisterValuesAtInstruction(self._function.handle, reg, self._instr_index,
option_array, len(options))
result = function.PossibleValueSet(self._function.arch, value)
core.BNFreePossibleValueSet(value)
return result
def get_possible_reg_values_after(self, reg, options = []):
reg = self._function.arch.get_reg_index(reg)
option_array = (ctypes.c_int * len(options))()
idx = 0
for option in options:
option_array[idx] = option
idx += 1
value = core.BNGetMediumLevelILPossibleRegisterValuesAfterInstruction(self._function.handle, reg, self._instr_index,
option_array, len(options))
result = function.PossibleValueSet(self._function.arch, value)
core.BNFreePossibleValueSet(value)
return result
def get_flag_value(self, flag):
flag = self._function.arch.get_flag_index(flag)
value = core.BNGetMediumLevelILFlagValueAtInstruction(self._function.handle, flag, self._instr_index)
result = function.RegisterValue(self._function.arch, value)
return result
def get_flag_value_after(self, flag):
flag = self._function.arch.get_flag_index(flag)
value = core.BNGetMediumLevelILFlagValueAfterInstruction(self._function.handle, flag, self._instr_index)
result = function.RegisterValue(self._function.arch, value)
return result
def get_possible_flag_values(self, flag, options = []):
flag = self._function.arch.get_flag_index(flag)
option_array = (ctypes.c_int * len(options))()
idx = 0
for option in options:
option_array[idx] = option
idx += 1
value = core.BNGetMediumLevelILPossibleFlagValuesAtInstruction(self._function.handle, flag, self._instr_index,
option_array, len(options))
result = function.PossibleValueSet(self._function.arch, value)
core.BNFreePossibleValueSet(value)
return result
def get_possible_flag_values_after(self, flag, options = []):
flag = self._function.arch.get_flag_index(flag)
option_array = (ctypes.c_int * len(options))()
idx = 0
for option in options:
option_array[idx] = option
idx += 1
value = core.BNGetMediumLevelILPossibleFlagValuesAfterInstruction(self._function.handle, flag, self._instr_index,
option_array, len(options))
result = function.PossibleValueSet(self._function.arch, value)
core.BNFreePossibleValueSet(value)
return result
def get_stack_contents(self, offset, size):
value = core.BNGetMediumLevelILStackContentsAtInstruction(self._function.handle, offset, size, self._instr_index)
result = function.RegisterValue(self._function.arch, value)
return result
def get_stack_contents_after(self, offset, size):
value = core.BNGetMediumLevelILStackContentsAfterInstruction(self._function.handle, offset, size, self._instr_index)
result = function.RegisterValue(self._function.arch, value)
return result
def get_possible_stack_contents(self, offset, size, options = []):
option_array = (ctypes.c_int * len(options))()
idx = 0
for option in options:
option_array[idx] = option
idx += 1
value = core.BNGetMediumLevelILPossibleStackContentsAtInstruction(self._function.handle, offset, size, self._instr_index,
option_array, len(options))
result = function.PossibleValueSet(self._function.arch, value)
core.BNFreePossibleValueSet(value)
return result
def get_possible_stack_contents_after(self, offset, size, options = []):
option_array = (ctypes.c_int * len(options))()
idx = 0
for option in options:
option_array[idx] = option
idx += 1
value = core.BNGetMediumLevelILPossibleStackContentsAfterInstruction(self._function.handle, offset, size, self._instr_index,
option_array, len(options))
result = function.PossibleValueSet(self._function.arch, value)
core.BNFreePossibleValueSet(value)
return result
def get_branch_dependence(self, branch_instr):
return ILBranchDependence(core.BNGetMediumLevelILBranchDependence(self._function.handle, self._instr_index, branch_instr))
@property
def function(self):
""" """
return self._function
@property
def expr_index(self):
""" """
return self._expr_index
@property
def instr_index(self):
""" """
return self._instr_index
@property
def operation(self):
""" """
return self._operation
@property
def size(self):
""" """
return self._size
@property
def address(self):
""" """
return self._address
@property
def source_operand(self):
""" """
return self._source_operand
@property
def operands(self):
""" """
return self._operands
class MediumLevelILExpr(object):
"""
``class MediumLevelILExpr`` hold the index of IL Expressions.
.. note:: This class shouldn't be instantiated directly. Rather the helper members of MediumLevelILFunction should be \
used instead.
"""
def __init__(self, index):
self._index = index
@property
def index(self):
""" """
return self._index
@index.setter
def index(self, value):
self._index = value
class MediumLevelILFunction(object):
"""
``class MediumLevelILFunction`` contains the list of MediumLevelILExpr objects that make up a binaryninja.function. MediumLevelILExpr
objects can be added to the MediumLevelILFunction by calling :func:`append` and passing the result of the various class
methods which return MediumLevelILExpr objects.
"""
def __init__(self, arch = None, handle = None, source_func = None):
self._arch = arch
self._source_function = source_func
if handle is not None:
self.handle = core.handle_of_type(handle, core.BNMediumLevelILFunction)
if self._source_function is None:
self._source_function = binaryninja.function.Function(handle = core.BNGetMediumLevelILOwnerFunction(self.handle))
if self._arch is None:
self._arch = self._source_function.arch
else:
if self._source_function is None:
self.handle = None
raise ValueError("IL functions must be created with an associated function")
if self._arch is None:
self._arch = self._source_function.arch
func_handle = self._source_function.handle
self.handle = core.BNCreateMediumLevelILFunction(arch.handle, func_handle)
def __del__(self):
if self.handle is not None:
core.BNFreeMediumLevelILFunction(self.handle)
def __repr__(self):
arch = self.source_function.arch
if arch:
return "<mlil func: %s@%#x>" % (arch.name, self.source_function.start)
else:
return "<mlil func: %#x>" % self.source_function.start
def __len__(self):
return int(core.BNGetMediumLevelILInstructionCount(self.handle))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return ctypes.addressof(self.handle.contents) == ctypes.addressof(other.handle.contents)
def __ne__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return not (self == other)
def __hash__(self):
return hash(('MLIL', self._source_function))
def __getitem__(self, i):
if isinstance(i, slice) or isinstance(i, tuple):
raise IndexError("expected integer instruction index")
if isinstance(i, MediumLevelILExpr):
return MediumLevelILInstruction(self, i.index)
# for backwards compatibility
if isinstance(i, MediumLevelILInstruction):
return i
if i < -len(self) or i >= len(self):
raise IndexError("index out of range")
if i < 0:
i = len(self) + i
return MediumLevelILInstruction(self, core.BNGetMediumLevelILIndexForInstruction(self.handle, i), i)
def __setitem__(self, i, j):
raise IndexError("instruction modification not implemented")
def __iter__(self):
count = ctypes.c_ulonglong()
blocks = core.BNGetMediumLevelILBasicBlockList(self.handle, count)
view = None
if self._source_function is not None:
view = self._source_function.view
try:
for i in range(0, count.value):
yield MediumLevelILBasicBlock(view, core.BNNewBasicBlockReference(blocks[i]), self)
finally:
core.BNFreeBasicBlockList(blocks, count.value)
@property
def current_address(self):
"""Current IL Address (read/write)"""
return core.BNMediumLevelILGetCurrentAddress(self.handle)
@current_address.setter
def current_address(self, value):
core.BNMediumLevelILSetCurrentAddress(self.handle, self._arch.handle, value)
def set_current_address(self, value, arch = None):
if arch is None:
arch = self._arch
core.BNMediumLevelILSetCurrentAddress(self.handle, arch.handle, value)
@property
def basic_blocks(self):
"""list of MediumLevelILBasicBlock objects (read-only)"""
count = ctypes.c_ulonglong()
blocks = core.BNGetMediumLevelILBasicBlockList(self.handle, count)
result = []
view = None
if self._source_function is not None:
view = self._source_function.view
for i in range(0, count.value):
result.append(MediumLevelILBasicBlock(view, core.BNNewBasicBlockReference(blocks[i]), self))
core.BNFreeBasicBlockList(blocks, count.value)
return result
@property
def instructions(self):
"""A generator of mlil instructions of the current function"""
for block in self.basic_blocks:
for i in block:
yield i
@property
def ssa_form(self):
"""Medium level IL in SSA form (read-only)"""
result = core.BNGetMediumLevelILSSAForm(self.handle)
if not result:
return None
return MediumLevelILFunction(self._arch, result, self._source_function)
@property
def non_ssa_form(self):
"""Medium level IL in non-SSA (default) form (read-only)"""
result = core.BNGetMediumLevelILNonSSAForm(self.handle)
if not result:
return None
return MediumLevelILFunction(self._arch, result, self._source_function)
@property
def low_level_il(self):
"""Low level IL for this function"""
result = core.BNGetLowLevelILForMediumLevelIL(self.handle)
if not result:
return None
return lowlevelil.LowLevelILFunction(self._arch, result, self._source_function)
@property
def llil(self):
"""Alias for low_level_il"""
return self.low_level_il
@property
def high_level_il(self):
"""High level IL for this medium level IL."""
result = core.BNGetHighLevelILForMediumLevelIL(self.handle)
if not result:
return None
return binaryninja.highlevelil.HighLevelILFunction(self._arch, result, self._source_function)
@property
def hlil(self):
return self.high_level_il
def get_instruction_start(self, addr, arch = None):
if arch is None:
arch = self._arch
result = core.BNMediumLevelILGetInstructionStart(self.handle, arch.handle, addr)
if result >= core.BNGetMediumLevelILInstructionCount(self.handle):
return None
return result
def expr(self, operation, a = 0, b = 0, c = 0, d = 0, e = 0, size = 0):
if isinstance(operation, str):
operation = MediumLevelILOperation[operation]
elif isinstance(operation, MediumLevelILOperation):
operation = operation.value
return MediumLevelILExpr(core.BNMediumLevelILAddExpr(self.handle, operation, size, a, b, c, d, e))
def append(self, expr):
"""
``append`` adds the MediumLevelILExpr ``expr`` to the current MediumLevelILFunction.
:param MediumLevelILExpr expr: the MediumLevelILExpr to add to the current MediumLevelILFunction
:return: number of MediumLevelILExpr in the current function
:rtype: int
"""
return core.BNMediumLevelILAddInstruction(self.handle, expr.index)
def goto(self, label):
"""
``goto`` returns a goto expression which jumps to the provided MediumLevelILLabel.
:param MediumLevelILLabel label: Label to jump to
:return: the MediumLevelILExpr that jumps to the provided label
:rtype: MediumLevelILExpr
"""
return MediumLevelILExpr(core.BNMediumLevelILGoto(self.handle, label.handle))
def if_expr(self, operand, t, f):
"""
``if_expr`` returns the ``if`` expression which depending on condition ``operand`` jumps to the MediumLevelILLabel
``t`` when the condition expression ``operand`` is non-zero and ``f`` when it's zero.
:param MediumLevelILExpr operand: comparison expression to evaluate.
:param MediumLevelILLabel t: Label for the true branch
:param MediumLevelILLabel f: Label for the false branch
:return: the MediumLevelILExpr for the if expression
:rtype: MediumLevelILExpr
"""
return MediumLevelILExpr(core.BNMediumLevelILIf(self.handle, operand.index, t.handle, f.handle))
def mark_label(self, label):
"""
``mark_label`` assigns a MediumLevelILLabel to the current IL address.
:param MediumLevelILLabel label:
:rtype: None
"""
core.BNMediumLevelILMarkLabel(self.handle, label.handle)
def add_label_list(self, labels):
"""
``add_label_list`` returns a label list expression for the given list of MediumLevelILLabel objects.
:param labels: the list of MediumLevelILLabel to get a label list expression from
:type labels: list(MediumLevelILLabel)
:return: the label list expression
:rtype: MediumLevelILExpr
"""
label_list = (ctypes.POINTER(core.BNMediumLevelILLabel) * len(labels))()
for i in range(len(labels)):
label_list[i] = labels[i].handle
return MediumLevelILExpr(core.BNMediumLevelILAddLabelList(self.handle, label_list, len(labels)))
def add_operand_list(self, operands):
"""
``add_operand_list`` returns an operand list expression for the given list of integer operands.
:param operands: list of operand numbers
:type operands: list(int)
:return: an operand list expression
:rtype: MediumLevelILExpr
"""
operand_list = (ctypes.c_ulonglong * len(operands))()
for i in range(len(operands)):
operand_list[i] = operands[i]
return MediumLevelILExpr(core.BNMediumLevelILAddOperandList(self.handle, operand_list, len(operands)))
def finalize(self):
"""
``finalize`` ends the function and computes the list of basic blocks.
:rtype: None
"""
core.BNFinalizeMediumLevelILFunction(self.handle)
def get_ssa_instruction_index(self, instr):
return core.BNGetMediumLevelILSSAInstructionIndex(self.handle, instr)
def get_non_ssa_instruction_index(self, instr):
return core.BNGetMediumLevelILNonSSAInstructionIndex(self.handle, instr)
def get_ssa_var_definition(self, ssa_var):
var_data = core.BNVariable()
var_data.type = ssa_var.var.source_type
var_data.index = ssa_var.var.index
var_data.storage = ssa_var.var.storage
result = core.BNGetMediumLevelILSSAVarDefinition(self.handle, var_data, ssa_var.version)
if result >= core.BNGetMediumLevelILInstructionCount(self.handle):
return None
return self[result]
def get_ssa_memory_definition(self, version):
result = core.BNGetMediumLevelILSSAMemoryDefinition(self.handle, version)
if result >= core.BNGetMediumLevelILInstructionCount(self.handle):
return None
return self[result]
def get_ssa_var_uses(self, ssa_var):
count = ctypes.c_ulonglong()
var_data = core.BNVariable()
var_data.type = ssa_var.var.source_type
var_data.index = ssa_var.var.index
var_data.storage = ssa_var.var.storage
instrs = core.BNGetMediumLevelILSSAVarUses(self.handle, var_data, ssa_var.version, count)
result = []
for i in range(0, count.value):
result.append(self[instrs[i]])
core.BNFreeILInstructionList(instrs)
return result
def get_ssa_memory_uses(self, version):
count = ctypes.c_ulonglong()
instrs = core.BNGetMediumLevelILSSAMemoryUses(self.handle, version, count)
result = []
for i in range(0, count.value):
result.append(self[instrs[i]])
core.BNFreeILInstructionList(instrs)
return result
def is_ssa_var_live(self, ssa_var):
"""
``is_ssa_var_live`` determines if ``ssa_var`` is live at any point in the function
:param SSAVariable ssa_var: the SSA variable to query
:return: whether the variable is live at any point in the function
:rtype: bool
"""
var_data = core.BNVariable()
var_data.type = ssa_var.var.source_type
var_data.index = ssa_var.var.index
var_data.storage = ssa_var.var.storage
return core.BNIsMediumLevelILSSAVarLive(self.handle, var_data, ssa_var.version)
def get_var_definitions(self, var):
count = ctypes.c_ulonglong()
var_data = core.BNVariable()
var_data.type = var.source_type
var_data.index = var.index
var_data.storage = var.storage
instrs = core.BNGetMediumLevelILVariableDefinitions(self.handle, var_data, count)
result = []
for i in range(0, count.value):
result.append(self[instrs[i]])
core.BNFreeILInstructionList(instrs)
return result
def get_var_uses(self, var):
count = ctypes.c_ulonglong()
var_data = core.BNVariable()
var_data.type = var.source_type
var_data.index = var.index
var_data.storage = var.storage
instrs = core.BNGetMediumLevelILVariableUses(self.handle, var_data, count)
result = []
for i in range(0, count.value):
result.append(self[instrs[i]])
core.BNFreeILInstructionList(instrs)
return result
def get_ssa_var_value(self, ssa_var):
var_data = core.BNVariable()
var_data.type = ssa_var.var.source_type
var_data.index = ssa_var.var.index
var_data.storage = ssa_var.var.storage
value = core.BNGetMediumLevelILSSAVarValue(self.handle, var_data, ssa_var.version)
result = function.RegisterValue(self._arch, value)
return result
def get_low_level_il_instruction_index(self, instr):
low_il = self.low_level_il
if low_il is None:
return None
low_il = low_il.ssa_form
if low_il is None:
return None
result = core.BNGetLowLevelILInstructionIndex(self.handle, instr)
if result >= core.BNGetLowLevelILInstructionCount(low_il.handle):
return None
return result
def get_low_level_il_expr_index(self, expr):
low_il = self.low_level_il
if low_il is None:
return None
low_il = low_il.ssa_form
if low_il is None:
return None
result = core.BNGetLowLevelILExprIndex(self.handle, expr)
if result >= core.BNGetLowLevelILExprCount(low_il.handle):
return None
return result
def get_low_level_il_expr_indexes(self, expr):
count = ctypes.c_ulonglong()
exprs = core.BNGetLowLevelILExprIndexes(self.handle, expr, count)
result = []
for i in range(0, count.value):
result.append(exprs[i])
core.BNFreeILInstructionList(exprs)
return result
def get_high_level_il_instruction_index(self, instr):
high_il = self.high_level_il
if high_il is None:
return None
result = core.BNGetHighLevelILInstructionIndex(self.handle, instr)
if result >= core.BNGetHighLevelILInstructionCount(high_il.handle):
return None
return result
def get_high_level_il_expr_index(self, expr):
high_il = self.high_level_il
if high_il is None:
return None
result = core.BNGetHighLevelILExprIndex(self.handle, expr)
if result >= core.BNGetHighLevelILExprCount(high_il.handle):
return None
return result
def get_high_level_il_expr_indexes(self, expr):
count = ctypes.c_ulonglong()
exprs = core.BNGetHighLevelILExprIndexes(self.handle, expr, count)
result = []
for i in range(0, count.value):
result.append(exprs[i])
core.BNFreeILInstructionList(exprs)
return result
def create_graph(self, settings = None):
if settings is not None:
settings_obj = settings.handle
else:
settings_obj = None
return binaryninja.flowgraph.CoreFlowGraph(core.BNCreateMediumLevelILFunctionGraph(self.handle, settings_obj))
@property
def arch(self):
""" """
return self._arch
@arch.setter
def arch(self, value):
self._arch = value
@property
def source_function(self):
""" """
return self._source_function
@source_function.setter
def source_function(self, value):
self._source_function = value
class MediumLevelILBasicBlock(basicblock.BasicBlock):
def __init__(self, view, handle, owner):
super(MediumLevelILBasicBlock, self).__init__(handle, view)
self.il_function = owner
def __repr__(self):
arch = self.arch
if arch:
return "<mlil block: %s@%d-%d>" % (arch.name, self.start, self.end)
else:
return "<mlil block: %d-%d>" % (self.start, self.end)
def __iter__(self):
for idx in range(self.start, self.end):
yield self.il_function[idx]
def __getitem__(self, idx):
size = self.end - self.start
if idx > size or idx < -size:
raise IndexError("list index is out of range")
if idx >= 0:
return self.il_function[idx + self.start]
else:
return self.il_function[self.end + idx]
def __hash__(self):
return hash((self.start, self.end, self.il_function))
def __contains__(self, instruction):
if type(instruction) != MediumLevelILInstruction or instruction.il_basic_block != self:
return False
if instruction.instr_index >= self.start and instruction.instr_index <= self.end:
return True
else:
return False
def _create_instance(self, handle, view):
"""Internal method by super to instantiate child instances"""
return MediumLevelILBasicBlock(view, handle, self.il_function)
@property
def il_function(self):
""" """
return self._il_function
@il_function.setter
def il_function(self, value):
self._il_function = value
| mit | -3,140,209,989,579,271,000 | 38.9 | 176 | 0.71345 | false |
aimalz/chippr | chippr/log_z_dens.py | 1 | 22517 | import numpy as np
import scipy as sp
import os
import scipy.optimize as op
import cPickle as cpkl
import emcee
import matplotlib as mpl
mpl.use('PS')
import matplotlib.pyplot as plt
import chippr
from chippr import defaults as d
from chippr import plot_utils as pu
from chippr import utils as u
from chippr import stat_utils as s
from chippr import log_z_dens_plots as plots
class log_z_dens(object):
def __init__(self, catalog, hyperprior, truth=None, loc='.', prepend='', vb=True):
"""
An object representing the redshift density function (normalized
redshift distribution function)
Parameters
----------
catalog: chippr.catalog object
dict containing bin endpoints, interim prior bin values, and
interim posterior PDF bin values
hyperprior: chippr.mvn object
multivariate Gaussian distribution for hyperprior distribution
truth: chippr.gmix object, optional
true redshift density function expressed as univariate Gaussian
mixture
loc: string, optional
directory into which to save results and plots made along the way
prepend: str, optional
prepend string to file names
vb: boolean, optional
True to print progress messages to stdout, False to suppress
"""
self.info = {}
self.add_text = prepend + '_'
self.bin_ends = np.array(catalog['bin_ends'])
self.bin_range = self.bin_ends[:-1]-self.bin_ends[0]
self.bin_mids = (self.bin_ends[1:]+self.bin_ends[:-1])/2.
self.bin_difs = self.bin_ends[1:]-self.bin_ends[:-1]
self.log_bin_difs = u.safe_log(self.bin_difs)
self.n_bins = len(self.bin_mids)
self.info['bin_ends'] = self.bin_ends
self.log_int_pr = np.array(catalog['log_interim_prior'])
self.int_pr = np.exp(self.log_int_pr)
self.info['log_interim_prior'] = self.log_int_pr
self.log_pdfs = np.array(catalog['log_interim_posteriors'])
self.pdfs = np.exp(self.log_pdfs)
self.n_pdfs = len(self.log_pdfs)
self.info['log_interim_posteriors'] = self.log_pdfs
if vb:
print(str(self.n_bins) + ' bins, ' + str(len(self.log_pdfs)) + ' interim posterior PDFs')
self.hyper_prior = hyperprior
self.truth = truth
self.info['truth'] = None
if self.truth is not None:
self.info['truth'] = {}
self.tru_nz = np.zeros(self.n_bins)
self.fine_zs = []
self.fine_nz = []
for b in range(self.n_bins):
fine_z = np.linspace(self.bin_ends[b], self.bin_ends[b+1], self.n_bins)
self.fine_zs.extend(fine_z)
fine_dz = (self.bin_ends[b+1] - self.bin_ends[b]) / self.n_bins
fine_n = self.truth.evaluate(fine_z)
self.fine_nz.extend(fine_n)
coarse_nz = np.sum(fine_n) * fine_dz
self.tru_nz[b] += coarse_nz
self.tru_nz /= np.dot(self.tru_nz, self.bin_difs)
self.log_tru_nz = u.safe_log(self.tru_nz)
self.info['log_tru_nz'] = self.log_tru_nz
self.info['truth']['z_grid'] = np.array(self.fine_zs)
self.info['truth']['nz_grid'] = np.array(self.fine_nz)
self.info['estimators'] = {}
self.info['stats'] = {}
self.dir = loc
self.data_dir = os.path.join(loc, 'data')
self.plot_dir = os.path.join(loc, 'plots')
if not os.path.exists(self.plot_dir):
os.makedirs(self.plot_dir)
self.res_dir = os.path.join(loc, 'results')
if not os.path.exists(self.res_dir):
os.makedirs(self.res_dir)
return
#
# def precompute(self):
# """
# Function to precompute values that show up in posterior that are independent of n(z) params
#
# Returns
# -------
# precomputed: float
# log-probability component independent of test params
# """
# integrated_int_pr = np.log(np.dot(self.int_pr, self.bin_difs))
# integrated_int_posts = np.log(np.dot(self.pdfs, axis=0)
# precomputed = integrated_int_posts - integrated_int_pr
# return precomputed
def evaluate_log_hyper_likelihood(self, log_nz):
"""
Function to evaluate log hyperlikelihood
Parameters
----------
log_nz: numpy.ndarray, float
vector of logged redshift density bin values at which to evaluate
the hyperlikelihood
Returns
-------
log_hyper_likelihood: float
log likelihood probability associated with parameters in log_nz
"""
nz = np.exp(log_nz)
norm_nz = nz / np.dot(nz, self.bin_difs)
# testing whether the norm step is still necessary
hyper_lfs = np.sum(norm_nz[None,:] * self.pdfs / self.int_pr[None,:] * self.bin_difs, axis=1)
log_hyper_likelihood = np.sum(u.safe_log(hyper_lfs)) - u.safe_log(np.dot(norm_nz, self.bin_difs))
# this used to work...
# log_hyper_likelihood = np.dot(np.exp(log_nz + self.precomputed), self.bin_difs)
return log_hyper_likelihood
def evaluate_log_hyper_prior(self, log_nz):
"""
Function to evaluate log hyperprior
Parameters
----------
log_nz: numpy.ndarray, float
vector of logged redshift density bin values at which to evaluate
the hyperprior
Returns
-------
log_hyper_prior: float
log prior probability associated with parameters in log_nz
"""
log_hyper_prior = u.safe_log(self.hyper_prior.evaluate_one(log_nz))
return log_hyper_prior
def evaluate_log_hyper_posterior(self, log_nz):
"""
Function to evaluate log hyperposterior
Parameters
----------
log_nz: numpy.ndarray, float
vector of logged redshift density bin values at which to evaluate
the full posterior
Returns
-------
log_hyper_posterior: float
log hyperposterior probability associated with parameters in log_nz
"""
log_hyper_likelihood = self.evaluate_log_hyper_likelihood(log_nz)
log_hyper_prior = self.evaluate_log_hyper_prior(log_nz)
log_hyper_posterior = log_hyper_likelihood + log_hyper_prior
return log_hyper_posterior
def optimize(self, start, no_data, no_prior, vb=True):
"""
Maximizes the hyperposterior of the redshift density
Parameters
----------
start: numpy.ndarray, float
array of log redshift density function bin values at which to begin
optimization
no_data: boolean
True to exclude data contribution to hyperposterior
no_prior: boolean
True to exclude prior contribution to hyperposterior
vb: boolean, optional
True to print progress messages to stdout, False to suppress
Returns
-------
res.x: numpy.ndarray, float
array of logged redshift density function bin values maximizing
hyperposterior
"""
if no_data:
if vb: print('only optimizing prior')
def _objective(log_nz):
return -2. * self.evaluate_log_hyper_prior(log_nz)
elif no_prior:
if vb: print('only optimizing likelihood')
def _objective(log_nz):
return -2. * self.evaluate_log_hyper_likelihood(log_nz)
else:
if vb: print('optimizing posterior')
def _objective(log_nz):
return -2. * self.evaluate_log_hyper_posterior(log_nz)
if vb:
print(self.dir + ' starting at ', start, _objective(start))
res = op.minimize(_objective, start, method="Nelder-Mead", options={"maxfev": 1e5, "maxiter":1e5})
if vb:
print(self.dir + ': ' + str(res))
return res.x
def calculate_mmle(self, start, vb=True, no_data=0, no_prior=0):
"""
Calculates the marginalized maximum likelihood estimator of the
redshift density function
Parameters
----------
start: numpy.ndarray, float
array of log redshift density function bin values at which to begin
optimization
vb: boolean, optional
True to print progress messages to stdout, False to suppress
no_data: boolean, optional
True to exclude data contribution to hyperposterior
no_prior: boolean, optional
True to exclude prior contribution to hyperposterior
Returns
-------
log_mle_nz: numpy.ndarray, float
array of logged redshift density function bin values maximizing
hyperposterior
"""
# self.precomputed = self.precompute()
if 'log_mmle_nz' not in self.info['estimators']:
log_mle = self.optimize(start, no_data=no_data, no_prior=no_prior, vb=vb)
mle_nz = np.exp(log_mle)
self.mle_nz = mle_nz / np.dot(mle_nz, self.bin_difs)
self.log_mle_nz = u.safe_log(self.mle_nz)
self.info['estimators']['log_mmle_nz'] = self.log_mle_nz
else:
self.log_mle_nz = self.info['estimators']['log_mmle_nz']
self.mle_nz = np.exp(self.log_mle_nz)
return self.log_mle_nz
def calculate_stacked(self, vb=True):
"""
Calculates the stacked estimator of the redshift density function
Parameters
----------
vb: boolean, optional
True to print progress messages to stdout, False to suppress
Returns
-------
log_stk_nz: ndarray, float
array of logged redshift density function bin values
"""
if 'log_stacked_nz' not in self.info['estimators']:
self.stk_nz = np.sum(self.pdfs, axis=0)
self.stk_nz /= np.dot(self.stk_nz, self.bin_difs)
self.log_stk_nz = u.safe_log(self.stk_nz)
self.info['estimators']['log_stacked_nz'] = self.log_stk_nz
else:
self.log_stk_nz = self.info['estimators']['log_stacked_nz']
self.stk_nz = np.exp(self.log_stk_nz)
return self.log_stk_nz
def calculate_mmap(self, vb=True):
"""
Calculates the marginalized maximum a posteriori estimator of the
redshift density function
Parameters
----------
vb: boolean, optional
True to print progress messages to stdout, False to suppress
Returns
-------
log_map_nz: ndarray, float
array of logged redshift density function bin values
"""
if 'log_mmap_nz' not in self.info['estimators']:
self.map_nz = np.zeros(self.n_bins)
mappreps = [np.argmax(l) for l in self.log_pdfs]
for m in mappreps:
self.map_nz[m] += 1.
self.map_nz /= self.bin_difs[m] * self.n_pdfs
self.log_map_nz = u.safe_log(self.map_nz)
self.info['estimators']['log_mmap_nz'] = self.log_map_nz
else:
self.log_map_nz = self.info['estimators']['log_mmap_nz']
self.map_nz = np.exp(self.log_map_nz)
return self.log_map_nz
def calculate_mexp(self, vb=True):
"""
Calculates the marginalized expected value estimator of the redshift
density function
Parameters
----------
vb: boolean, optional
True to print progress messages to stdout, False to suppress
Returns
-------
log_exp_nz: ndarray, float
array of logged redshift density function bin values
"""
if 'log_mexp_nz' not in self.info['estimators']:
expprep = [sum(z) for z in self.bin_mids * self.pdfs * self.bin_difs]
self.exp_nz = np.zeros(self.n_bins)
for z in expprep:
for k in range(self.n_bins):
if z > self.bin_ends[k] and z < self.bin_ends[k+1]:
self.exp_nz[k] += 1.
self.exp_nz /= self.bin_difs * self.n_pdfs
self.log_exp_nz = u.safe_log(self.exp_nz)
self.info['estimators']['log_mexp_nz'] = self.log_exp_nz
else:
self.log_exp_nz = self.info['estimators']['log_mexp_nz']
self.exp_nz = np.exp(self.log_exp_nz)
return self.log_exp_nz
def sample(self, ivals, n_samps, vb=True):
"""
Samples the redshift density hyperposterior
Parameters
----------
ivals: numpy.ndarray, float
initial values of the walkers
n_samps: int
number of samples to accept before stopping
vb: boolean, optional
True to print progress messages to stdout, False to suppress
Returns
-------
mcmc_outputs: dict
dictionary containing array of sampled redshift density function
bin values as well as posterior probabilities, acceptance
fractions, and autocorrelation times
"""
self.sampler.reset()
pos, prob, state = self.sampler.run_mcmc(ivals, n_samps)
chains = self.sampler.chain
probs = self.sampler.lnprobability
fracs = self.sampler.acceptance_fraction
acors = s.acors(chains)
mcmc_outputs = {}
mcmc_outputs['chains'] = chains
mcmc_outputs['probs'] = probs
mcmc_outputs['fracs'] = fracs
mcmc_outputs['acors'] = acors
return mcmc_outputs
def calculate_samples(self, ivals, n_accepted=d.n_accepted, n_burned=d.n_burned, vb=True, n_procs=1, no_data=0, no_prior=0, gr_threshold=d.gr_threshold):
"""
Calculates samples estimating the redshift density function
Parameters
----------
ivals: numpy.ndarray, float
initial values of log n(z) for each walker
n_accepted: int, optional
log10 number of samples to accept per walker
n_burned: int, optional
log10 number of samples between tests of burn-in condition
n_procs: int, optional
number of processors to use, defaults to single-thread
vb: boolean, optional
True to print progress messages to stdout, False to suppress
no_data: boolean, optional
True to exclude data contribution to hyperposterior
no_prior: boolean, optional
True to exclude prior contribution to hyperposterior
Returns
-------
log_samples_nz: ndarray, float
array of sampled log redshift density function bin values
"""
# self.precomputed = self.precompute()
if 'log_mean_sampled_nz' not in self.info['estimators']:
self.n_walkers = len(ivals)
if no_data:
def distribution(log_nz):
return self.evaluate_log_hyper_prior(log_nz)
elif no_prior:
def distribution(log_nz):
return self.evaluate_log_hyper_likelihood(log_nz)
else:
def distribution(log_nz):
return self.evaluate_log_hyper_posterior(log_nz)
self.sampler = emcee.EnsembleSampler(self.n_walkers, self.n_bins, distribution, threads=n_procs)
self.burn_ins = 0
if n_burned == 0:
self.burning_in = False
else:
self.burning_in = True
vals = ivals
vals -= u.safe_log(np.sum(np.exp(ivals) * self.bin_difs[np.newaxis, :], axis=1))[:, np.newaxis]
if vb:
plots.plot_ivals(vals, self.info, self.plot_dir, prepend=self.add_text)
canvas = plots.set_up_burn_in_plots(self.n_bins, self.n_walkers)
full_chain = np.array([[vals[w]] for w in range(self.n_walkers)])
while self.burning_in:
if vb:
print('beginning sampling '+str(self.burn_ins))
burn_in_mcmc_outputs = self.sample(vals, 10**n_burned)
chain = burn_in_mcmc_outputs['chains']
burn_in_mcmc_outputs['chains'] -= u.safe_log(np.sum(np.exp(chain) * self.bin_difs[np.newaxis, np.newaxis, :], axis=2))[:, :, np.newaxis]
with open(os.path.join(self.res_dir, 'mcmc'+str(self.burn_ins)+'.p'), 'wb') as file_location:
cpkl.dump(burn_in_mcmc_outputs, file_location)
full_chain = np.concatenate((full_chain, burn_in_mcmc_outputs['chains']), axis=1)
if vb:
canvas = plots.plot_sampler_progress(canvas, burn_in_mcmc_outputs, full_chain, self.burn_ins, self.plot_dir, prepend=self.add_text)
self.burning_in = s.gr_test(full_chain, gr_threshold)
vals = np.array([item[-1] for item in burn_in_mcmc_outputs['chains']])
self.burn_ins += 1
mcmc_outputs = self.sample(vals, 10**n_accepted)
chain = mcmc_outputs['chains']
mcmc_outputs['chains'] -= u.safe_log(np.sum(np.exp(chain) * self.bin_difs[np.newaxis, np.newaxis, :], axis=2))[:, :, np.newaxis]
full_chain = np.concatenate((full_chain, mcmc_outputs['chains']), axis=1)
with open(os.path.join(self.res_dir, 'full_chain.p'), 'wb') as file_location:
cpkl.dump(full_chain, file_location)
self.log_smp_nz = mcmc_outputs['chains']
self.smp_nz = np.exp(self.log_smp_nz)
self.info['log_sampled_nz_meta_data'] = mcmc_outputs
self.log_bfe_nz = s.norm_fit(self.log_smp_nz)[0]
self.bfe_nz = np.exp(self.log_bfe_nz)
self.info['estimators']['log_mean_sampled_nz'] = self.log_bfe_nz
else:
self.log_smp_nz = self.info['log_sampled_nz_meta_data']
self.smp_nz = np.exp(self.log_smp_nz)
self.log_bfe_nz = self.info['estimators']['log_mean_sampled_nz']
self.bfe_nz = np.exp(self.log_smp_nz)
# if vb:
# plots.plot_samples(self.info, self.plot_dir)
return self.log_smp_nz
def compare(self, vb=True):
"""
Calculates all available goodness of fit measures
Parameters
----------
vb: boolean, optional
True to print progress messages to stdout, False to suppress
Returns
-------
out_info: dict
dictionary of all available statistics
"""
self.info['stats']['kld'], self.info['stats']['log_kld'] = {}, {}
self.info['stats']['rms'], self.info['stats']['log_rms'] = {}, {}
if self.truth is not None:
for key in self.info['estimators']:
self.info['stats']['kld'][key] = s.calculate_kld(np.exp(self.info['log_tru_nz']), np.exp(self.info['estimators'][key]))
# self.info['stats']['log_kld'][key] = s.calculate_kld(self.log_tru_nz, self.info['estimators'][key])
self.info['stats']['rms']['true_nz' + '__' + key[4:]] = s.calculate_rms(np.exp(self.info['log_tru_nz']), np.exp(self.info['estimators'][key]))
self.info['stats']['log_rms']['log_true_nz'+ '__' + key] = s.calculate_rms(self.info['log_tru_nz'], self.info['estimators'][key])
for i in range(len(self.info['estimators'].keys())):
key_1 = self.info['estimators'].keys()[i]
for j in range(len(self.info['estimators'].keys()[:i])):
key_2 = self.info['estimators'].keys()[j]
# print(((i,j), (key_1, key_2)))
self.info['stats']['log_rms'][key_1 + '__' + key_2] = s.calculate_rms(self.info['estimators'][key_1], self.info['estimators'][key_2])
self.info['stats']['rms'][key_1[4:] + '__' + key_2[4:]] = s.calculate_rms(np.exp(self.info['estimators'][key_1]), np.exp(self.info['estimators'][key_2]))
out_info = self.info['stats']
if vb:
print(out_info)
return out_info
def plot_estimators(self, log=True, mini=True):
"""
Plots all available estimators of the redshift density function.
"""
if mini:
also = 'mini'
else:
also = ''
if log:
plots.plot_estimators(self.info, self.plot_dir, prepend=self.add_text+also+'log_', mini=mini)
else:
plots.plot_estimators(self.info, self.plot_dir, log=False, prepend=self.add_text+also+'lin_', mini=mini)
return
def read(self, read_loc, style='pickle', vb=True):
"""
Function to load inferred quantities from files.
Parameters
----------
read_loc: string
filepath where inferred redshift density function is stored
style: string, optional
keyword for file format, currently only 'pickle' supported
vb: boolean, optional
True to print progress messages to stdout, False to suppress
Returns
-------
self.info: dict
returns the log_z_dens information dictionary object
"""
with open(os.path.join(self.res_dir, read_loc), 'rb') as file_location:
self.info = cpkl.load(file_location)
if vb:
print('The following quantities were read from '+read_loc+' in the '+style+' format:')
for key in self.info:
print(key)
if 'estimators' in self.info:
print(self.info['estimators'].keys())
return self.info
def write(self, write_loc, style='pickle', vb=True):
"""
Function to write results of inference to files.
Parameters
----------
write_loc: string
filepath where results of inference should be saved.
style: string, optional
keyword for file format, currently only 'pickle' supported
vb: boolean, optional
True to print progress messages to stdout, False to suppress
"""
with open(os.path.join(self.res_dir, write_loc), 'wb') as file_location:
cpkl.dump(self.info, file_location)
if vb:
print('The following quantities were written to '+write_loc+' in the '+style+' format:')
for key in self.info:
print(key)
return
| mit | 7,646,769,710,592,497,000 | 38.782686 | 169 | 0.569481 | false |
chrisxue815/leetcode_python | problems/test_0236.py | 1 | 1582 | import unittest
import utils
from tree import TreeNode
# O(n) time. O(log(n)) space. In-order DFS.
class Solution:
def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':
def dfs(curr):
if not curr:
return None, 0
ancestor, num_matches_left = dfs(curr.left)
if num_matches_left == 2:
return ancestor, 2
if curr is p or curr is q:
if num_matches_left == 1:
return curr, 2
else:
num_matches_left = 1
ancestor, num_matches_right = dfs(curr.right)
if num_matches_left + num_matches_right == 2:
return ancestor or curr, 2
return None, num_matches_left + num_matches_right
return dfs(root)[0]
class Test(unittest.TestCase):
def test(self):
cases = utils.load_test_json(__file__).test_cases
for case in cases:
args = str(case.args)
root = TreeNode.from_array(case.args.root)
p = self.find_node(root, case.args.p)
q = self.find_node(root, case.args.q)
actual = Solution().lowestCommonAncestor(root, p, q)
self.assertEqual(case.expected, actual.val, msg=args)
def find_node(self, root, val):
if not root:
return None
if root.val == val:
return root
return self.find_node(root.left, val) or self.find_node(root.right, val)
if __name__ == '__main__':
unittest.main()
| unlicense | -8,854,708,304,556,259,000 | 27.25 | 97 | 0.543616 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.