repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cidadania/ecidadania-ng | src/ecidadania/views/invite.py | 2 | 2374 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2013 Clione Software
# Copyright (c) 2010-2013 Cidadania S. Coop. Galega
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.core.mail import EmailMessage, EmailMultiAlternatives
from django.shortcuts import render_to_response
from django.template import RequestContext, loader, Context
from django.contrib.auth.decorators import login_required
from django.utils.translation import ugettext_lazy as _
from e_cidadania import settings
@login_required
def invite(request):
"""
Simple view to send invitations to friends via mail. Making the invitation
system as a view, guarantees that no invitation will be monitored or saved
to the hard disk.
"""
if request.method == "POST":
mail_addr = request.POST['email_addr']
raw_addr_list = mail_addr.split(',')
addr_list = [x.strip() for x in raw_addr_list]
usr_msg = request.POST['mail_msg']
plain_template = "invite/invite_plain.txt"
html_template = "invite/invite.html"
plain_msg = loader.get_template(plain_template).render(
RequestContext(request,
{'msg': usr_msg}))
html_msg = loader.get_template(html_template).render(
RequestContext(request,
{'msg': usr_msg}))
email = EmailMultiAlternatives(_('Invitation to join e-cidadania'), plain_msg, settings.DEFAULT_FROM_EMAIL, [], addr_list)
email.attach_alternative(html_msg, 'text/html')
email.send(fail_silently=False)
return render_to_response('invite_done.html',
context_instance=RequestContext(request))
uri = request.build_absolute_uri("/")
return render_to_response('invite.html', {"uri": uri}, context_instance=RequestContext(request))
| apache-2.0 | 8,266,754,755,651,584,000 | 40.649123 | 130 | 0.670598 | false | 4.086059 | false | false | false |
robdennis/sideboard | tests/plugins/different_versions/rdflib3_0_0/env/lib/python2.7/site-packages/rdflib/plugins/parsers/ntriples.py | 7 | 6700 | #!/usr/bin/env python
__doc__="""
N-Triples Parser
License: GPL 2, W3C, BSD, or MIT
Author: Sean B. Palmer, inamidst.com
Documentation: http://inamidst.com/proj/rdf/ntriples-doc
Command line usage::
./ntriples.py <URI> - parses URI as N-Triples
./ntriples.py --help - prints out this help message
# @@ fully empty document?
"""
import re
uriref = r'<([^:]+:[^\s"<>]+)>'
literal = r'"([^"\\]*(?:\\.[^"\\]*)*)"'
litinfo = r'(?:@([a-z]+(?:-[a-z0-9]+)*)|\^\^' + uriref + r')?'
r_line = re.compile(r'([^\r\n]*)(?:\r\n|\r|\n)')
r_wspace = re.compile(r'[ \t]*')
r_wspaces = re.compile(r'[ \t]+')
r_tail = re.compile(r'[ \t]*\.[ \t]*')
r_uriref = re.compile(uriref)
r_nodeid = re.compile(r'_:([A-Za-z][A-Za-z0-9]*)')
r_literal = re.compile(literal + litinfo)
bufsiz = 2048
validate = False
class Node(unicode): pass
# class URI(Node): pass
# class bNode(Node): pass
# class Literal(Node):
# def __new__(cls, lit, lang=None, dtype=None):
# n = str(lang) + ' ' + str(dtype) + ' ' + lit
# return unicode.__new__(cls, n)
from rdflib.term import URIRef as URI
from rdflib.term import BNode as bNode
from rdflib.term import Literal
class Sink(object):
def __init__(self):
self.length = 0
def triple(self, s, p, o):
self.length += 1
print (s, p, o)
class ParseError(Exception): pass
quot = {'t': '\t', 'n': '\n', 'r': '\r', '"': '"', '\\': '\\'}
r_safe = re.compile(r'([\x20\x21\x23-\x5B\x5D-\x7E]+)')
r_quot = re.compile(r'\\(t|n|r|"|\\)')
r_uniquot = re.compile(r'\\u([0-9A-F]{4})|\\U([0-9A-F]{8})')
def unquote(s):
"""Unquote an N-Triples string."""
result = []
while s:
m = r_safe.match(s)
if m:
s = s[m.end():]
result.append(m.group(1))
continue
m = r_quot.match(s)
if m:
s = s[2:]
result.append(quot[m.group(1)])
continue
m = r_uniquot.match(s)
if m:
s = s[m.end():]
u, U = m.groups()
codepoint = int(u or U, 16)
if codepoint > 0x10FFFF:
raise ParseError("Disallowed codepoint: %08X" % codepoint)
result.append(unichr(codepoint))
elif s.startswith('\\'):
raise ParseError("Illegal escape at: %s..." % s[:10])
else: raise ParseError("Illegal literal character: %r" % s[0])
return unicode(''.join(result))
if not validate:
def unquote(s):
return s.decode('unicode-escape')
r_hibyte = re.compile(r'([\x80-\xFF])')
def uriquote(uri):
return r_hibyte.sub(lambda m: '%%%02X' % ord(m.group(1)), uri)
if not validate:
def uriquote(uri):
return uri
class NTriplesParser(object):
"""An N-Triples Parser.
Usage::
p = NTriplesParser(sink=MySink())
sink = p.parse(f) # file; use parsestring for a string
"""
def __init__(self, sink=None):
if sink is not None:
self.sink = sink
else: self.sink = Sink()
def parse(self, f):
"""Parse f as an N-Triples file."""
if not hasattr(f, 'read'):
raise ParseError("Item to parse must be a file-like object.")
self.file = f
self.buffer = ''
while True:
self.line = self.readline()
if self.line is None: break
try: self.parseline()
except ParseError:
raise ParseError("Invalid line: %r" % self.line)
return self.sink
def parsestring(self, s):
"""Parse s as an N-Triples string."""
if not isinstance(s, basestring):
raise ParseError("Item to parse must be a string instance.")
from cStringIO import StringIO
f = StringIO()
f.write(s)
f.seek(0)
self.parse(f)
def readline(self):
"""Read an N-Triples line from buffered input."""
# N-Triples lines end in either CRLF, CR, or LF
# Therefore, we can't just use f.readline()
if not self.buffer:
buffer = self.file.read(bufsiz)
if not buffer: return None
self.buffer = buffer
while True:
m = r_line.match(self.buffer)
if m: # the more likely prospect
self.buffer = self.buffer[m.end():]
return m.group(1)
else:
buffer = self.file.read(bufsiz)
if not buffer and not self.buffer.isspace():
raise ParseError("EOF in line")
elif not buffer:
return None
self.buffer += buffer
def parseline(self):
self.eat(r_wspace)
if (not self.line) or self.line.startswith('#'):
return # The line is empty or a comment
subject = self.subject()
self.eat(r_wspaces)
predicate = self.predicate()
self.eat(r_wspaces)
object = self.object()
self.eat(r_tail)
if self.line:
raise ParseError("Trailing garbage")
self.sink.triple(subject, predicate, object)
def peek(self, token):
return self.line.startswith(token)
def eat(self, pattern):
m = pattern.match(self.line)
if not m: # @@ Why can't we get the original pattern?
raise ParseError("Failed to eat %s" % pattern)
self.line = self.line[m.end():]
return m
def subject(self):
# @@ Consider using dictionary cases
subj = self.uriref() or self.nodeid()
if not subj:
raise ParseError("Subject must be uriref or nodeID")
return subj
def predicate(self):
pred = self.uriref()
if not pred:
raise ParseError("Predicate must be uriref")
return pred
def object(self):
objt = self.uriref() or self.nodeid() or self.literal()
if objt is False:
raise ParseError("Unrecognised object type")
return objt
def uriref(self):
if self.peek('<'):
uri = self.eat(r_uriref).group(1)
uri = unquote(uri)
uri = uriquote(uri)
return URI(uri)
return False
def nodeid(self):
if self.peek('_'):
return bNode(self.eat(r_nodeid).group(1))
return False
def literal(self):
if self.peek('"'):
lit, lang, dtype = self.eat(r_literal).groups()
lang = lang or None
dtype = dtype or None
if lang and dtype:
raise ParseError("Can't have both a language and a datatype")
lit = unquote(lit)
return Literal(lit, lang, dtype)
return False
def parseURI(uri):
import urllib
parser = NTriplesParser()
u = urllib.urlopen(uri)
sink = parser.parse(u)
u.close()
# for triple in sink:
# print triple
print 'Length of input:', sink.length
def main():
import sys
if len(sys.argv) == 2:
parseURI(sys.argv[1])
else: print __doc__
if __name__=="__main__":
main()
| bsd-3-clause | -4,974,506,439,557,729,000 | 25.693227 | 73 | 0.564776 | false | 3.298868 | false | false | false |
x4dr/NossiNet | NossiPack/krypta.py | 1 | 4355 | import ctypes
import os
import random
import sqlite3
import threading
from contextlib import closing
from decimal import ROUND_HALF_UP, Decimal
from pathlib import Path
import numexpr
import Data
class DescriptiveError(Exception):
pass
def tuple_overlap(a: tuple, b: tuple) -> bool:
"""
checks if the first two elements of the tuples overlap on the numberline/other ordering
"""
a, b = sorted(a), sorted(b)
return (
b[0] <= a[0] <= b[1]
or b[0] <= a[1] <= b[1]
or a[0] <= b[0] <= a[1]
or a[0] <= b[1] <= a[1]
)
def terminate_thread(thread: threading.Thread):
"""Terminates a python thread from another thread.
:param thread: a threading.Thread instance
"""
if not thread.is_alive():
return
exc = ctypes.py_object(SystemExit)
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(thread.ident), exc)
if res == 0:
raise ValueError("nonexistent thread id")
if res > 1:
# """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(thread.ident, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
def init_db():
print("initializing DB")
with closing(connect_db("initialization")) as db:
db.cursor().executescript(Data.getschema())
db.commit()
def calculate(calc, par=None):
loose_par = [0] # last pop ends the loop
if par is None:
par = {}
else:
loose_par += [x for x in par.split(",") if ":" not in x]
par = {
x.upper(): y
for x, y in [pair.split(":") for pair in par.split(",") if ":" in pair]
}
for k, v in par.items():
calc = calc.replace(k, v)
calc = calc.strip()
missing = None
res = 0
while len(loose_par) > 0:
try:
res = numexpr.evaluate(calc, local_dict=par, truediv=True).item()
missing = None # success
break
except KeyError as e:
missing = e
par[e.args[0]] = float(loose_par.pop()) # try autofilling
if missing:
raise DescriptiveError("Parameter " + missing.args[0] + " is missing!")
return Decimal(res).quantize(1, ROUND_HALF_UP)
g = {} # module level caching
def close_db():
db = g.get("db", None)
if db:
db.close()
g["db"] = None
def connect_db(source) -> sqlite3.Connection:
"""db connection singleton"""
db = g.get("db", None)
if db:
return db
dbpath = Data.DATABASE
if source != "before request":
print("connecting to", dbpath, "from", source)
if not Path(dbpath).exists():
Path(dbpath).touch()
init_db()
g["db"] = sqlite3.connect(dbpath)
return g["db"]
def write_nonblocking(path, data):
path = Path(path)
if path.is_dir():
path = path / "_"
i = 0
while (path.with_suffix(f".{i}")).exists():
i += 1
with path.with_suffix(f".{i}").open(mode="x") as x:
x.write(data + "\n")
x.write("DONE") # mark file as ready
def read_nonblocking(path):
path = Path(path)
if path.is_dir():
path = path / "_"
result = []
file: Path
for file in sorted(path.parent.glob(str(path.stem) + "*")):
with file.open(mode="r") as f:
lines = f.readlines()
if lines[-1] != "DONE":
break # file not read yet or fragmented
result += lines[:-1]
os.remove(str(file.absolute()))
return result
def is_int(s: str) -> bool:
try:
int(s)
return True
except ValueError:
return False
def sumdict(inp):
result = 0
try:
for e in inp.keys():
result += int(inp[e])
except Exception:
result = sum(inp)
return result
def d10(amt, diff, ones=True): # faster than the Dice
succ = 0
anti = 0
for _ in range(amt):
x = random.randint(1, 10)
if x >= diff:
succ += 1
if ones and x == 1:
anti += 1
if anti > 0:
if succ > anti:
return succ - anti
if succ > 0:
return 0
return 0 - anti
return succ
def split_at(a, x):
return a[:x], a[x:]
| gpl-2.0 | 6,972,973,575,226,754,000 | 23.744318 | 91 | 0.554994 | false | 3.50362 | false | false | false |
stanta/darfchain | ICO/ico/cmd/rawinvestments.py | 1 | 2095 | """Extract crowdsale raw investmetn data."""
import csv
import datetime
import click
from eth_utils import from_wei
from populus import Project
@click.command()
@click.option('--chain', nargs=1, default="mainnet", help='On which chain to deploy - see populus.json')
@click.option('--address', nargs=1, help='CrowdsaleContract address to scan', required=True)
@click.option('--csv-file', nargs=1, help='CSV file to write', default=None, required=True)
def main(chain, address, csv_file):
"""Extract crowdsale invested events.
This is useful for RelaunchCrowdsale to rebuild the data.
"""
project = Project()
with project.get_chain(chain) as c:
web3 = c.web3
print("Web3 provider is", web3.currentProvider)
# Sanity check
print("Block number is", web3.eth.blockNumber)
Crowdsale = c.provider.get_base_contract_factory('MintedTokenCappedCrowdsale')
crowdsale = Crowdsale(address=address)
print("Total amount raised is", from_wei(crowdsale.call().weiRaised(), "ether"), "ether")
print("Getting events")
events = crowdsale.pastEvents("Invested").get(only_changes=False)
print("Writing results to", csv_file)
with open(csv_file, 'w', newline='') as out:
writer = csv.writer(out)
writer.writerow(["Address", "Payment at", "Tx hash", "Tx index", "Invested ETH", "Received tokens"])
for e in events:
timestamp = web3.eth.getBlock(e["blockNumber"])["timestamp"]
dt = datetime.datetime.fromtimestamp(timestamp, tz=datetime.timezone.utc)
writer.writerow([
e["args"]["investor"],
dt.isoformat(),
e["transactionHash"],
e["transactionIndex"],
from_wei(e["args"]["weiAmount"], "ether"),
e["args"]["tokenAmount"],
])
print("Total", len(events), "invest events")
print("All done! Enjoy your decentralized future.")
if __name__ == "__main__":
main()
| gpl-3.0 | -3,446,575,058,190,066,000 | 33.916667 | 112 | 0.603819 | false | 3.865314 | false | false | false |
ssyuzev/django-mailbox | django_mailbox/migrations/0009_remove_references_table.py | 3 | 2544 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing M2M table for field references on 'Message'
db.delete_table('django_mailbox_message_references')
def backwards(self, orm):
# Adding M2M table for field references on 'Message'
db.create_table('django_mailbox_message_references', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('from_message', models.ForeignKey(orm['django_mailbox.message'], null=False)),
('to_message', models.ForeignKey(orm['django_mailbox.message'], null=False))
))
db.create_unique('django_mailbox_message_references', ['from_message_id', 'to_message_id'])
models = {
'django_mailbox.mailbox': {
'Meta': {'object_name': 'Mailbox'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'uri': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'django_mailbox.message': {
'Meta': {'object_name': 'Message'},
'body': ('django.db.models.fields.TextField', [], {}),
'from_header': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_reply_to': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'replies'", 'null': 'True', 'to': "orm['django_mailbox.Message']"}),
'mailbox': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'messages'", 'to': "orm['django_mailbox.Mailbox']"}),
'message_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'outgoing': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'processed': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'to_header': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['django_mailbox']
| mit | -4,273,775,299,072,861,000 | 52 | 181 | 0.580582 | false | 3.763314 | false | false | false |
helixyte/TheLMA | thelma/tools/iso/libcreation/jobcreator.py | 1 | 5642 | """
This file is part of the TheLMA (THe Laboratory Management Application) project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Creator for library creation ISO jobs.
"""
from thelma.tools.semiconstants import \
get_rack_specs_from_reservoir_specs
from thelma.tools.semiconstants import get_item_status_future
from thelma.tools.semiconstants import get_reservoir_specs_standard_384
from thelma.tools.semiconstants import get_reservoir_specs_standard_96
from thelma.tools.iso.libcreation.base import LibraryLayout
from thelma.tools.iso.libcreation.base import NUMBER_SECTORS
from thelma.tools.iso.libcreation.base import \
DEFAULT_ALIQUOT_PLATE_CONCENTRATION
from thelma.tools.iso.libcreation.base import \
DEFAULT_PREPARATION_PLATE_CONCENTRATION
from thelma.tools.iso.libcreation.base import \
LibraryBaseLayoutConverter
from thelma.tools.iso.poolcreation.base import \
StockSampleCreationPosition
from thelma.tools.iso.poolcreation.jobcreator import \
StockSampleCreationIsoJobCreator
from thelma.tools.iso.poolcreation.jobcreator import \
StockSampleCreationIsoPopulator
from thelma.tools.utils.racksector import QuadrantIterator
from thelma.tools.utils.racksector import \
get_sector_layouts_for_384_layout
__docformat__ = 'reStructuredText en'
__all__ = ['LibraryCreationIsoJobCreator',
'LibraryCreationIsoPopulator',
]
class LibraryCreationIsoPopulator(StockSampleCreationIsoPopulator):
#: The label pattern for preparation plates.
PREP_PLATE_LABEL_PATTERN = '%s-%i-%inM-Q%i'
#: The label pattern for aliquot plates.
ALIQUOT_PLATE_LABEL_PATTERN = '%s-%i-%inM-%i'
def __init__(self, iso_request, number_isos, **kw):
StockSampleCreationIsoPopulator.__init__(self, iso_request,
number_isos, **kw)
#: The library base layout.
self.__base_layout = None
#: Maps sector indices -> positions.
self.__sector_positions = None
def reset(self):
StockSampleCreationIsoPopulator.reset(self)
self.__base_layout = None
self.__sector_positions = None
@property
def _base_layout(self):
if self.__base_layout is None:
lib = self.iso_request.molecule_design_library
converter = LibraryBaseLayoutConverter(lib.rack_layout,
parent=self)
self.__base_layout = converter.get_result()
return self.__base_layout
@property
def _sector_positions(self):
if self.__sector_positions is None:
self.__sector_positions = \
QuadrantIterator.sort_into_sectors(self._base_layout,
NUMBER_SECTORS)
return self.__sector_positions
def _create_iso_layout(self):
layout = LibraryLayout.from_base_layout(self._base_layout)
for positions in self._sector_positions.values():
if not self._have_candidates:
break
for base_pos in positions:
if not self._have_candidates:
break
lib_cand = self._pool_candidates.pop(0)
lib_pos = \
StockSampleCreationPosition(base_pos.rack_position,
lib_cand.pool,
lib_cand.get_tube_barcodes())
layout.add_position(lib_pos)
return layout
def _populate_iso(self, iso, layout):
StockSampleCreationIsoPopulator._populate_iso(self, iso, layout)
# Create sector preparation plates.
library_name = self.iso_request.label
ir_specs_96 = get_reservoir_specs_standard_96()
plate_specs_96 = get_rack_specs_from_reservoir_specs(ir_specs_96)
ir_specs_384 = get_reservoir_specs_standard_384()
plate_specs_384 = get_rack_specs_from_reservoir_specs(ir_specs_384)
future_status = get_item_status_future()
sec_layout_map = get_sector_layouts_for_384_layout(layout)
# Create preparation plates.
for sec_idx in range(NUMBER_SECTORS):
if not sec_idx in sec_layout_map:
continue
# TODO: Move label creation to LABELS class.
prep_label = self.PREP_PLATE_LABEL_PATTERN \
% (library_name,
iso.layout_number,
DEFAULT_PREPARATION_PLATE_CONCENTRATION,
sec_idx + 1)
prep_plate = plate_specs_96.create_rack(prep_label, future_status)
sec_layout = sec_layout_map[sec_idx]
iso.add_sector_preparation_plate(prep_plate, sec_idx,
sec_layout.create_rack_layout())
# Create aliquot plates.
for i in range(self.iso_request.number_aliquots):
# TODO: Move label creation to LABELS class.
aliquot_label = self.ALIQUOT_PLATE_LABEL_PATTERN \
% (library_name,
iso.layout_number,
DEFAULT_ALIQUOT_PLATE_CONCENTRATION,
i + 1)
aliquot_plate = plate_specs_384.create_rack(aliquot_label,
future_status)
iso.add_aliquot_plate(aliquot_plate)
class LibraryCreationIsoJobCreator(StockSampleCreationIsoJobCreator):
_ISO_POPULATOR_CLASS = LibraryCreationIsoPopulator
| mit | -9,135,050,248,493,287,000 | 42.736434 | 80 | 0.610599 | false | 4.004258 | false | false | false |
pydcs/dcs | dcs/vehicles.py | 1 | 60786 | # This file is generated from pydcs_export.lua
import dcs.unittype as unittype
class Artillery:
class _2B11_mortar(unittype.VehicleType):
id = "2B11 mortar"
name = "Mortar 2B11 120mm"
detection_range = 0
threat_range = 7000
air_weapon_dist = 7000
class SAU_Gvozdika(unittype.VehicleType):
id = "SAU Gvozdika"
name = "SPH 2S1 Gvozdika 122mm"
detection_range = 0
threat_range = 15000
air_weapon_dist = 15000
class SAU_Msta(unittype.VehicleType):
id = "SAU Msta"
name = "SPH 2S19 Msta 152mm"
detection_range = 0
threat_range = 23500
air_weapon_dist = 23500
class SAU_Akatsia(unittype.VehicleType):
id = "SAU Akatsia"
name = "SPH 2S3 Akatsia 152mm"
detection_range = 0
threat_range = 17000
air_weapon_dist = 17000
class SAU_2_C9(unittype.VehicleType):
id = "SAU 2-C9"
name = "SPM 2S9 Nona 120mm M"
detection_range = 0
threat_range = 7000
air_weapon_dist = 7000
class M_109(unittype.VehicleType):
id = "M-109"
name = "SPH M109 Paladin 155mm"
detection_range = 0
threat_range = 22000
air_weapon_dist = 22000
eplrs = True
class SpGH_Dana(unittype.VehicleType):
id = "SpGH_Dana"
name = "SPH Dana vz77 152mm"
detection_range = 0
threat_range = 18700
air_weapon_dist = 18700
class Grad_FDDM(unittype.VehicleType):
id = "Grad_FDDM"
name = "Grad MRL FDDM (FC)"
detection_range = 0
threat_range = 1000
air_weapon_dist = 1000
class MLRS_FDDM(unittype.VehicleType):
id = "MLRS FDDM"
name = "MRLS FDDM (FC)"
detection_range = 0
threat_range = 1200
air_weapon_dist = 1200
eplrs = True
class Grad_URAL(unittype.VehicleType):
id = "Grad-URAL"
name = "MLRS BM-21 Grad 122mm"
detection_range = 0
threat_range = 19000
air_weapon_dist = 19000
class Uragan_BM_27(unittype.VehicleType):
id = "Uragan_BM-27"
name = "MLRS 9K57 Uragan BM-27 220mm"
detection_range = 0
threat_range = 35800
air_weapon_dist = 35800
class Smerch(unittype.VehicleType):
id = "Smerch"
name = "MLRS 9A52 Smerch CM 300mm"
detection_range = 0
threat_range = 70000
air_weapon_dist = 70000
class Smerch_HE(unittype.VehicleType):
id = "Smerch_HE"
name = "MLRS 9A52 Smerch HE 300mm"
detection_range = 0
threat_range = 70000
air_weapon_dist = 70000
class MLRS(unittype.VehicleType):
id = "MLRS"
name = "MLRS M270 227mm"
detection_range = 0
threat_range = 32000
air_weapon_dist = 32000
eplrs = True
class T155_Firtina(unittype.VehicleType):
id = "T155_Firtina"
name = "SPH T155 Firtina 155mm"
detection_range = 0
threat_range = 41000
air_weapon_dist = 41000
class PLZ05(unittype.VehicleType):
id = "PLZ05"
name = "PLZ-05"
detection_range = 0
threat_range = 23500
air_weapon_dist = 23500
eplrs = True
class M12_GMC(unittype.VehicleType):
id = "M12_GMC"
name = "SPG M12 GMC 155mm"
detection_range = 0
threat_range = 18300
air_weapon_dist = 0
class Infantry:
class Paratrooper_RPG_16(unittype.VehicleType):
id = "Paratrooper RPG-16"
name = "Paratrooper RPG-16"
detection_range = 0
threat_range = 500
air_weapon_dist = 500
class Paratrooper_AKS_74(unittype.VehicleType):
id = "Paratrooper AKS-74"
name = "Paratrooper AKS"
detection_range = 0
threat_range = 500
air_weapon_dist = 500
class Infantry_AK_Ins(unittype.VehicleType):
id = "Infantry AK Ins"
name = "Insurgent AK-74"
detection_range = 0
threat_range = 500
air_weapon_dist = 500
class Soldier_AK(unittype.VehicleType):
id = "Soldier AK"
name = "Infantry AK-74"
detection_range = 0
threat_range = 500
air_weapon_dist = 500
class Infantry_AK(unittype.VehicleType):
id = "Infantry AK"
name = "Infantry AK-74 Rus"
detection_range = 0
threat_range = 500
air_weapon_dist = 500
class Soldier_M249(unittype.VehicleType):
id = "Soldier M249"
name = "Infantry M249"
detection_range = 0
threat_range = 700
air_weapon_dist = 700
class Soldier_M4(unittype.VehicleType):
id = "Soldier M4"
name = "Infantry M4"
detection_range = 0
threat_range = 500
air_weapon_dist = 500
class Soldier_M4_GRG(unittype.VehicleType):
id = "Soldier M4 GRG"
name = "Infantry M4 Georgia"
detection_range = 0
threat_range = 500
air_weapon_dist = 500
class Soldier_RPG(unittype.VehicleType):
id = "Soldier RPG"
name = "Infantry RPG"
detection_range = 0
threat_range = 500
air_weapon_dist = 500
class Soldier_mauser98(unittype.VehicleType):
id = "soldier_mauser98"
name = "Infantry Mauser 98"
detection_range = 0
threat_range = 500
air_weapon_dist = 500
class Soldier_wwii_br_01(unittype.VehicleType):
id = "soldier_wwii_br_01"
name = "Infantry SMLE No.4 Mk-1"
detection_range = 0
threat_range = 500
air_weapon_dist = 500
class Soldier_wwii_us(unittype.VehicleType):
id = "soldier_wwii_us"
name = "Infantry M1 Garand"
detection_range = 0
threat_range = 500
air_weapon_dist = 500
class AirDefence:
class _2S6_Tunguska(unittype.VehicleType):
id = "2S6 Tunguska"
name = "SAM SA-19 Tunguska \"Grison\" "
detection_range = 18000
threat_range = 8000
air_weapon_dist = 8000
class Kub_2P25_ln(unittype.VehicleType):
id = "Kub 2P25 ln"
name = "SAM SA-6 Kub \"Gainful\" TEL"
detection_range = 0
threat_range = 25000
air_weapon_dist = 25000
class _5p73_s_125_ln(unittype.VehicleType):
id = "5p73 s-125 ln"
name = "SAM SA-3 S-125 \"Goa\" LN"
detection_range = 0
threat_range = 18000
air_weapon_dist = 18000
class S_300PS_5P85C_ln(unittype.VehicleType):
id = "S-300PS 5P85C ln"
name = "SAM SA-10 S-300 \"Grumble\" TEL D"
detection_range = 0
threat_range = 120000
air_weapon_dist = 120000
class S_300PS_5P85D_ln(unittype.VehicleType):
id = "S-300PS 5P85D ln"
name = "SAM SA-10 S-300 \"Grumble\" TEL C"
detection_range = 0
threat_range = 120000
air_weapon_dist = 120000
class SA_11_Buk_LN_9A310M1(unittype.VehicleType):
id = "SA-11 Buk LN 9A310M1"
name = "SAM SA-11 Buk \"Gadfly\" Fire Dome TEL"
detection_range = 50000
threat_range = 35000
air_weapon_dist = 35000
class Osa_9A33_ln(unittype.VehicleType):
id = "Osa 9A33 ln"
name = "SAM SA-8 Osa \"Gecko\" TEL"
detection_range = 30000
threat_range = 10300
air_weapon_dist = 10300
class Tor_9A331(unittype.VehicleType):
id = "Tor 9A331"
name = "SAM SA-15 Tor \"Gauntlet\""
detection_range = 25000
threat_range = 12000
air_weapon_dist = 12000
class Strela_10M3(unittype.VehicleType):
id = "Strela-10M3"
name = "SAM SA-13 Strela 10M3 \"Gopher\" TEL"
detection_range = 8000
threat_range = 5000
air_weapon_dist = 5000
class Strela_1_9P31(unittype.VehicleType):
id = "Strela-1 9P31"
name = "SAM SA-9 Strela 1 \"Gaskin\" TEL"
detection_range = 5000
threat_range = 4200
air_weapon_dist = 4200
class SA_11_Buk_CC_9S470M1(unittype.VehicleType):
id = "SA-11 Buk CC 9S470M1"
name = "SAM SA-11 Buk \"Gadfly\" C2 "
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class SA_8_Osa_LD_9T217(unittype.VehicleType):
id = "SA-8 Osa LD 9T217"
name = "SAM SA-8 Osa LD 9T217"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class Patriot_AMG(unittype.VehicleType):
id = "Patriot AMG"
name = "SAM Patriot CR (AMG AN/MRC-137)"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class Patriot_ECS(unittype.VehicleType):
id = "Patriot ECS"
name = "SAM Patriot ECS"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
eplrs = True
class Gepard(unittype.VehicleType):
id = "Gepard"
name = "SPAAA Gepard"
detection_range = 15000
threat_range = 4000
air_weapon_dist = 4000
class Hawk_pcp(unittype.VehicleType):
id = "Hawk pcp"
name = "SAM Hawk Platoon Command Post (PCP)"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class Vulcan(unittype.VehicleType):
id = "Vulcan"
name = "SPAAA Vulcan M163"
detection_range = 5000
threat_range = 2000
air_weapon_dist = 2000
eplrs = True
class Hawk_ln(unittype.VehicleType):
id = "Hawk ln"
name = "SAM Hawk LN M192"
detection_range = 0
threat_range = 45000
air_weapon_dist = 45000
class M48_Chaparral(unittype.VehicleType):
id = "M48 Chaparral"
name = "SAM Chaparral M48"
detection_range = 10000
threat_range = 8500
air_weapon_dist = 8500
eplrs = True
class M6_Linebacker(unittype.VehicleType):
id = "M6 Linebacker"
name = "SAM Linebacker - Bradley M6"
detection_range = 8000
threat_range = 4500
air_weapon_dist = 4500
eplrs = True
class Patriot_ln(unittype.VehicleType):
id = "Patriot ln"
name = "SAM Patriot LN"
detection_range = 0
threat_range = 100000
air_weapon_dist = 100000
class M1097_Avenger(unittype.VehicleType):
id = "M1097 Avenger"
name = "SAM Avenger (Stinger)"
detection_range = 5200
threat_range = 4500
air_weapon_dist = 4500
eplrs = True
class Patriot_EPP(unittype.VehicleType):
id = "Patriot EPP"
name = "SAM Patriot EPP-III"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class Patriot_cp(unittype.VehicleType):
id = "Patriot cp"
name = "SAM Patriot C2 ICC"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class Roland_ADS(unittype.VehicleType):
id = "Roland ADS"
name = "SAM Roland ADS"
detection_range = 12000
threat_range = 8000
air_weapon_dist = 8000
class S_300PS_54K6_cp(unittype.VehicleType):
id = "S-300PS 54K6 cp"
name = "SAM SA-10 S-300 \"Grumble\" C2 "
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class Soldier_stinger(unittype.VehicleType):
id = "Soldier stinger"
name = "MANPADS Stinger"
detection_range = 5000
threat_range = 4500
air_weapon_dist = 4500
class Stinger_comm_dsr(unittype.VehicleType):
id = "Stinger comm dsr"
name = "MANPADS Stinger C2 Desert"
detection_range = 5000
threat_range = 0
air_weapon_dist = 0
class Stinger_comm(unittype.VehicleType):
id = "Stinger comm"
name = "MANPADS Stinger C2"
detection_range = 5000
threat_range = 0
air_weapon_dist = 0
class ZSU_23_4_Shilka(unittype.VehicleType):
id = "ZSU-23-4 Shilka"
name = "SPAAA ZSU-23-4 Shilka \"Gun Dish\""
detection_range = 5000
threat_range = 2500
air_weapon_dist = 2500
class ZU_23_Emplacement_Closed(unittype.VehicleType):
id = "ZU-23 Emplacement Closed"
name = "AAA ZU-23 Closed Emplacement"
detection_range = 5000
threat_range = 2500
air_weapon_dist = 2500
class ZU_23_Emplacement(unittype.VehicleType):
id = "ZU-23 Emplacement"
name = "AAA ZU-23 Emplacement"
detection_range = 5000
threat_range = 2500
air_weapon_dist = 2500
class Ural_375_ZU_23(unittype.VehicleType):
id = "Ural-375 ZU-23"
name = "SPAAA ZU-23-2 Mounted Ural 375"
detection_range = 5000
threat_range = 2500
air_weapon_dist = 2500
class ZU_23_Closed_Insurgent(unittype.VehicleType):
id = "ZU-23 Closed Insurgent"
name = "AAA ZU-23 Insurgent Closed Emplacement"
detection_range = 5000
threat_range = 2500
air_weapon_dist = 2500
class Ural_375_ZU_23_Insurgent(unittype.VehicleType):
id = "Ural-375 ZU-23 Insurgent"
name = "SPAAA ZU-23-2 Insurgent Mounted Ural-375"
detection_range = 5000
threat_range = 2500
air_weapon_dist = 2500
class ZU_23_Insurgent(unittype.VehicleType):
id = "ZU-23 Insurgent"
name = "AAA ZU-23 Insurgent Emplacement"
detection_range = 5000
threat_range = 2500
air_weapon_dist = 2500
class SA_18_Igla_manpad(unittype.VehicleType):
id = "SA-18 Igla manpad"
name = "MANPADS SA-18 Igla \"Grouse\""
detection_range = 5000
threat_range = 5200
air_weapon_dist = 5200
class SA_18_Igla_comm(unittype.VehicleType):
id = "SA-18 Igla comm"
name = "MANPADS SA-18 Igla \"Grouse\" C2"
detection_range = 5000
threat_range = 0
air_weapon_dist = 0
class SA_18_Igla_S_manpad(unittype.VehicleType):
id = "SA-18 Igla-S manpad"
name = "MANPADS SA-18 Igla-S \"Grouse\""
detection_range = 5000
threat_range = 5200
air_weapon_dist = 5200
class SA_18_Igla_S_comm(unittype.VehicleType):
id = "SA-18 Igla-S comm"
name = "MANPADS SA-18 Igla-S \"Grouse\" C2"
detection_range = 5000
threat_range = 0
air_weapon_dist = 0
class Igla_manpad_INS(unittype.VehicleType):
id = "Igla manpad INS"
name = "MANPADS SA-18 Igla \"Grouse\" Ins"
detection_range = 5000
threat_range = 5200
air_weapon_dist = 5200
class _1L13_EWR(unittype.VehicleType):
id = "1L13 EWR"
name = "EWR 1L13"
detection_range = 120000
threat_range = 0
air_weapon_dist = 0
class Kub_1S91_str(unittype.VehicleType):
id = "Kub 1S91 str"
name = "SAM SA-6 Kub \"Straight Flush\" STR"
detection_range = 70000
threat_range = 0
air_weapon_dist = 0
class S_300PS_40B6M_tr(unittype.VehicleType):
id = "S-300PS 40B6M tr"
name = "SAM SA-10 S-300 \"Grumble\" Flap Lid TR "
detection_range = 160000
threat_range = 0
air_weapon_dist = 0
class S_300PS_40B6MD_sr(unittype.VehicleType):
id = "S-300PS 40B6MD sr"
name = "SAM SA-10 S-300 \"Grumble\" Clam Shell SR"
detection_range = 60000
threat_range = 0
air_weapon_dist = 0
class _55G6_EWR(unittype.VehicleType):
id = "55G6 EWR"
name = "EWR 55G6"
detection_range = 120000
threat_range = 0
air_weapon_dist = 0
class S_300PS_64H6E_sr(unittype.VehicleType):
id = "S-300PS 64H6E sr"
name = "SAM SA-10 S-300 \"Grumble\" Big Bird SR "
detection_range = 160000
threat_range = 0
air_weapon_dist = 0
class SA_11_Buk_SR_9S18M1(unittype.VehicleType):
id = "SA-11 Buk SR 9S18M1"
name = "SAM SA-11 Buk \"Gadfly\" Snow Drift SR"
detection_range = 100000
threat_range = 0
air_weapon_dist = 0
class Dog_Ear_radar(unittype.VehicleType):
id = "Dog Ear radar"
name = "MCC-SR Sborka \"Dog Ear\" SR"
detection_range = 35000
threat_range = 0
air_weapon_dist = 0
class Hawk_tr(unittype.VehicleType):
id = "Hawk tr"
name = "SAM Hawk TR (AN/MPQ-46)"
detection_range = 90000
threat_range = 0
air_weapon_dist = 0
class Hawk_sr(unittype.VehicleType):
id = "Hawk sr"
name = "SAM Hawk SR (AN/MPQ-50)"
detection_range = 90000
threat_range = 0
air_weapon_dist = 0
eplrs = True
class Patriot_str(unittype.VehicleType):
id = "Patriot str"
name = "SAM Patriot STR"
detection_range = 160000
threat_range = 0
air_weapon_dist = 0
class Hawk_cwar(unittype.VehicleType):
id = "Hawk cwar"
name = "SAM Hawk CWAR AN/MPQ-55"
detection_range = 70000
threat_range = 0
air_weapon_dist = 0
eplrs = True
class P_19_s_125_sr(unittype.VehicleType):
id = "p-19 s-125 sr"
name = "SAM P19 \"Flat Face\" SR (SA-2/3)"
detection_range = 160000
threat_range = 0
air_weapon_dist = 0
class Roland_Radar(unittype.VehicleType):
id = "Roland Radar"
name = "SAM Roland EWR"
detection_range = 35000
threat_range = 0
air_weapon_dist = 0
class Snr_s_125_tr(unittype.VehicleType):
id = "snr s-125 tr"
name = "SAM SA-3 S-125 \"Low Blow\" TR"
detection_range = 100000
threat_range = 0
air_weapon_dist = 0
class S_75M_Volhov(unittype.VehicleType):
id = "S_75M_Volhov"
name = "SAM SA-2 S-75 \"Guideline\" LN"
detection_range = 0
threat_range = 43000
air_weapon_dist = 43000
class SNR_75V(unittype.VehicleType):
id = "SNR_75V"
name = "SAM SA-2 S-75 \"Fan Song\" TR"
detection_range = 100000
threat_range = 0
air_weapon_dist = 0
class RLS_19J6(unittype.VehicleType):
id = "RLS_19J6"
name = "SR 19J6"
detection_range = 150000
threat_range = 0
air_weapon_dist = 0
class ZSU_57_2(unittype.VehicleType):
id = "ZSU_57_2"
name = "SPAAA ZSU-57-2"
detection_range = 5000
threat_range = 7000
air_weapon_dist = 7000
class S_60_Type59_Artillery(unittype.VehicleType):
id = "S-60_Type59_Artillery"
name = "AAA S-60 57mm"
detection_range = 5000
threat_range = 7000
air_weapon_dist = 7000
class Generator_5i57(unittype.VehicleType):
id = "generator_5i57"
name = "Disel Power Station 5I57A"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class Bofors40(unittype.VehicleType):
id = "bofors40"
name = "AAA Bofors 40mm"
detection_range = 0
threat_range = 4000
air_weapon_dist = 4000
class Rapier_fsa_launcher(unittype.VehicleType):
id = "rapier_fsa_launcher"
name = "SAM Rapier LN"
detection_range = 30000
threat_range = 6800
air_weapon_dist = 6800
class Rapier_fsa_optical_tracker_unit(unittype.VehicleType):
id = "rapier_fsa_optical_tracker_unit"
name = "SAM Rapier Tracker"
detection_range = 20000
threat_range = 0
air_weapon_dist = 0
class Rapier_fsa_blindfire_radar(unittype.VehicleType):
id = "rapier_fsa_blindfire_radar"
name = "SAM Rapier Blindfire TR"
detection_range = 30000
threat_range = 0
air_weapon_dist = 0
class Flak18(unittype.VehicleType):
id = "flak18"
name = "AAA 8,8cm Flak 18"
detection_range = 0
threat_range = 11000
air_weapon_dist = 11000
class HQ_7_LN_SP(unittype.VehicleType):
id = "HQ-7_LN_SP"
name = "HQ-7 Self-Propelled LN"
detection_range = 20000
threat_range = 12000
air_weapon_dist = 12000
class HQ_7_STR_SP(unittype.VehicleType):
id = "HQ-7_STR_SP"
name = "HQ-7 Self-Propelled STR"
detection_range = 30000
threat_range = 0
air_weapon_dist = 0
class Flak30(unittype.VehicleType):
id = "flak30"
name = "AAA Flak 38 20mm"
detection_range = 0
threat_range = 2500
air_weapon_dist = 2500
class Flak36(unittype.VehicleType):
id = "flak36"
name = "AAA 8,8cm Flak 36"
detection_range = 0
threat_range = 11000
air_weapon_dist = 11000
class Flak37(unittype.VehicleType):
id = "flak37"
name = "AAA 8,8cm Flak 37"
detection_range = 0
threat_range = 11000
air_weapon_dist = 11000
class Flak38(unittype.VehicleType):
id = "flak38"
name = "AAA Flak-Vierling 38 Quad 20mm"
detection_range = 0
threat_range = 2500
air_weapon_dist = 2500
class KDO_Mod40(unittype.VehicleType):
id = "KDO_Mod40"
name = "AAA SP Kdo.G.40"
detection_range = 30000
threat_range = 0
air_weapon_dist = 0
class Flakscheinwerfer_37(unittype.VehicleType):
id = "Flakscheinwerfer_37"
name = "SL Flakscheinwerfer 37"
detection_range = 15000
threat_range = 15000
air_weapon_dist = 0
class Maschinensatz_33(unittype.VehicleType):
id = "Maschinensatz_33"
name = "PU Maschinensatz_33"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class Flak41(unittype.VehicleType):
id = "flak41"
name = "AAA 8,8cm Flak 41"
detection_range = 0
threat_range = 12500
air_weapon_dist = 12500
class FuMG_401(unittype.VehicleType):
id = "FuMG-401"
name = "EWR FuMG-401 Freya LZ"
detection_range = 160000
threat_range = 0
air_weapon_dist = 0
class FuSe_65(unittype.VehicleType):
id = "FuSe-65"
name = "EWR FuSe-65 Würzburg-Riese"
detection_range = 60000
threat_range = 0
air_weapon_dist = 0
class QF_37_AA(unittype.VehicleType):
id = "QF_37_AA"
name = "AAA QF 3,7\""
detection_range = 0
threat_range = 9000
air_weapon_dist = 9000
class M45_Quadmount(unittype.VehicleType):
id = "M45_Quadmount"
name = "AAA M45 Quadmount HB 12.7mm"
detection_range = 0
threat_range = 1500
air_weapon_dist = 1500
class M1_37mm(unittype.VehicleType):
id = "M1_37mm"
name = "AAA M1 37mm"
detection_range = 0
threat_range = 5700
air_weapon_dist = 5700
class Fortification:
class Bunker(unittype.VehicleType):
id = "Bunker"
name = "Bunker 2"
detection_range = 0
threat_range = 800
air_weapon_dist = 800
class Sandbox(unittype.VehicleType):
id = "Sandbox"
name = "Bunker 1"
detection_range = 0
threat_range = 800
air_weapon_dist = 800
class House1arm(unittype.VehicleType):
id = "house1arm"
name = "Barracks armed"
detection_range = 0
threat_range = 800
air_weapon_dist = 800
class House2arm(unittype.VehicleType):
id = "house2arm"
name = "Watch tower armed"
detection_range = 0
threat_range = 800
air_weapon_dist = 800
class Outpost_road(unittype.VehicleType):
id = "outpost_road"
name = "Road outpost"
detection_range = 0
threat_range = 800
air_weapon_dist = 800
class Outpost(unittype.VehicleType):
id = "outpost"
name = "Outpost"
detection_range = 0
threat_range = 800
air_weapon_dist = 800
class HouseA_arm(unittype.VehicleType):
id = "houseA_arm"
name = "Building armed"
detection_range = 0
threat_range = 800
air_weapon_dist = 800
class TACAN_beacon(unittype.VehicleType):
id = "TACAN_beacon"
name = "Beacon TACAN Portable TTS 3030"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class SK_C_28_naval_gun(unittype.VehicleType):
id = "SK_C_28_naval_gun"
name = "Gun 15cm SK C/28 Naval in Bunker"
detection_range = 0
threat_range = 20000
air_weapon_dist = 0
class Fire_control(unittype.VehicleType):
id = "fire_control"
name = "Bunker with Fire Control Center"
detection_range = 0
threat_range = 1100
air_weapon_dist = 1100
class Unarmed:
class Ural_4320_APA_5D(unittype.VehicleType):
id = "Ural-4320 APA-5D"
name = "GPU APA-5D on Ural 4320"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class ATMZ_5(unittype.VehicleType):
id = "ATMZ-5"
name = "Refueler ATMZ-5"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class ATZ_10(unittype.VehicleType):
id = "ATZ-10"
name = "Refueler ATZ-10"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class GAZ_3307(unittype.VehicleType):
id = "GAZ-3307"
name = "Truck GAZ-3307"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class GAZ_3308(unittype.VehicleType):
id = "GAZ-3308"
name = "Truck GAZ-3308"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class GAZ_66(unittype.VehicleType):
id = "GAZ-66"
name = "Truck GAZ-66"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class M978_HEMTT_Tanker(unittype.VehicleType):
id = "M978 HEMTT Tanker"
name = "Refueler M978 HEMTT"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class HEMTT_TFFT(unittype.VehicleType):
id = "HEMTT TFFT"
name = "Firefighter HEMMT TFFT"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class IKARUS_Bus(unittype.VehicleType):
id = "IKARUS Bus"
name = "Bus IKARUS-280"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class KAMAZ_Truck(unittype.VehicleType):
id = "KAMAZ Truck"
name = "Truck KAMAZ 43101"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class LAZ_Bus(unittype.VehicleType):
id = "LAZ Bus"
name = "Bus LAZ-695"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class LiAZ_Bus(unittype.VehicleType):
id = "LiAZ Bus"
name = "Bus LiAZ-677"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class Hummer(unittype.VehicleType):
id = "Hummer"
name = "LUV HMMWV Jeep"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
eplrs = True
class M_818(unittype.VehicleType):
id = "M 818"
name = "Truck M939 Heavy"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class MAZ_6303(unittype.VehicleType):
id = "MAZ-6303"
name = "Truck MAZ-6303"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class Predator_GCS(unittype.VehicleType):
id = "Predator GCS"
name = "MCC Predator UAV CP & GCS"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class Predator_TrojanSpirit(unittype.VehicleType):
id = "Predator TrojanSpirit"
name = "MCC-COMM Predator UAV CL"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class Suidae(unittype.VehicleType):
id = "Suidae"
name = "Suidae"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class Tigr_233036(unittype.VehicleType):
id = "Tigr_233036"
name = "LUV Tigr"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class Trolley_bus(unittype.VehicleType):
id = "Trolley bus"
name = "Bus ZIU-9 Trolley"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class UAZ_469(unittype.VehicleType):
id = "UAZ-469"
name = "LUV UAZ-469 Jeep"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class Ural_ATsP_6(unittype.VehicleType):
id = "Ural ATsP-6"
name = "Firefighter Ural ATsP-6"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class Ural_4320_31(unittype.VehicleType):
id = "Ural-4320-31"
name = "Truck Ural-4320-31 Arm'd"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class Ural_4320T(unittype.VehicleType):
id = "Ural-4320T"
name = "Truck Ural-4320T"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class Ural_375_PBU(unittype.VehicleType):
id = "Ural-375 PBU"
name = "Truck Ural-375 Mobile C2"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class Ural_375(unittype.VehicleType):
id = "Ural-375"
name = "Truck Ural-375"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class VAZ_Car(unittype.VehicleType):
id = "VAZ Car"
name = "Car VAZ-2109"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class ZiL_131_APA_80(unittype.VehicleType):
id = "ZiL-131 APA-80"
name = "GPU APA-80 on ZIL-131"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class SKP_11(unittype.VehicleType):
id = "SKP-11"
name = "Truck SKP-11 Mobile ATC"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class ZIL_131_KUNG(unittype.VehicleType):
id = "ZIL-131 KUNG"
name = "Truck ZIL-131 (C2)"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class ZIL_4331(unittype.VehicleType):
id = "ZIL-4331"
name = "Truck ZIL-4331"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class KrAZ6322(unittype.VehicleType):
id = "KrAZ6322"
name = "Truck KrAZ-6322 6x6"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class ATZ_5(unittype.VehicleType):
id = "ATZ-5"
name = "Refueler ATZ-5"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class AA8(unittype.VehicleType):
id = "AA8"
name = "Fire Fight Vehicle AA-7.2/60"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class ATZ_60_Maz(unittype.VehicleType):
id = "ATZ-60_Maz"
name = "Refueler ATZ-60 Tractor"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class ZIL_135(unittype.VehicleType):
id = "ZIL-135"
name = "Truck ZIL-135"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class TZ_22_KrAZ(unittype.VehicleType):
id = "TZ-22_KrAZ"
name = "Refueler TZ-22 Tractor"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class Bedford_MWD(unittype.VehicleType):
id = "Bedford_MWD"
name = "Truck Bedford"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class Land_Rover_101_FC(unittype.VehicleType):
id = "Land_Rover_101_FC"
name = "Truck Land Rover 101 FC"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class Land_Rover_109_S3(unittype.VehicleType):
id = "Land_Rover_109_S3"
name = "LUV Land Rover 109"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class Blitz_36_6700A(unittype.VehicleType):
id = "Blitz_36-6700A"
name = "Truck Opel Blitz"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class Kubelwagen_82(unittype.VehicleType):
id = "Kubelwagen_82"
name = "LUV Kubelwagen 82"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class Sd_Kfz_2(unittype.VehicleType):
id = "Sd_Kfz_2"
name = "LUV Kettenrad"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class Sd_Kfz_7(unittype.VehicleType):
id = "Sd_Kfz_7"
name = "Carrier Sd.Kfz.7 Tractor"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class Horch_901_typ_40_kfz_21(unittype.VehicleType):
id = "Horch_901_typ_40_kfz_21"
name = "LUV Horch 901 Staff Car"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class CCKW_353(unittype.VehicleType):
id = "CCKW_353"
name = "Truck GMC \"Jimmy\" 6x6 Truck"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class Willys_MB(unittype.VehicleType):
id = "Willys_MB"
name = "Car Willys Jeep"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class M30_CC(unittype.VehicleType):
id = "M30_CC"
name = "Carrier M30 Cargo"
detection_range = 0
threat_range = 1200
air_weapon_dist = 0
class M4_Tractor(unittype.VehicleType):
id = "M4_Tractor"
name = "Tractor M4 Hi-Speed"
detection_range = 0
threat_range = 1200
air_weapon_dist = 0
class Armor:
class AAV7(unittype.VehicleType):
id = "AAV7"
name = "APC AAV-7 Amphibious"
detection_range = 0
threat_range = 1200
air_weapon_dist = 1200
class BMD_1(unittype.VehicleType):
id = "BMD-1"
name = "IFV BMD-1"
detection_range = 0
threat_range = 3000
air_weapon_dist = 1000
class BMP_1(unittype.VehicleType):
id = "BMP-1"
name = "IFV BMP-1"
detection_range = 0
threat_range = 3000
air_weapon_dist = 1000
class BMP_2(unittype.VehicleType):
id = "BMP-2"
name = "IFV BMP-2"
detection_range = 0
threat_range = 3000
air_weapon_dist = 2000
class BMP_3(unittype.VehicleType):
id = "BMP-3"
name = "IFV BMP-3"
detection_range = 0
threat_range = 4000
air_weapon_dist = 2000
class BRDM_2(unittype.VehicleType):
id = "BRDM-2"
name = "Scout BRDM-2"
detection_range = 0
threat_range = 1600
air_weapon_dist = 1600
class BTR_80(unittype.VehicleType):
id = "BTR-80"
name = "APC BTR-80"
detection_range = 0
threat_range = 1600
air_weapon_dist = 1600
class BTR_D(unittype.VehicleType):
id = "BTR_D"
name = "APC BTR-RD"
detection_range = 0
threat_range = 3000
air_weapon_dist = 1000
class Cobra(unittype.VehicleType):
id = "Cobra"
name = "Scout Cobra"
detection_range = 0
threat_range = 1200
air_weapon_dist = 1200
class LAV_25(unittype.VehicleType):
id = "LAV-25"
name = "IFV LAV-25"
detection_range = 0
threat_range = 2500
air_weapon_dist = 2500
class M1043_HMMWV_Armament(unittype.VehicleType):
id = "M1043 HMMWV Armament"
name = "Scout HMMWV"
detection_range = 0
threat_range = 1200
air_weapon_dist = 1200
eplrs = True
class M1045_HMMWV_TOW(unittype.VehicleType):
id = "M1045 HMMWV TOW"
name = "ATGM HMMWV"
detection_range = 0
threat_range = 3800
air_weapon_dist = 0
eplrs = True
class M1126_Stryker_ICV(unittype.VehicleType):
id = "M1126 Stryker ICV"
name = "IFV M1126 Stryker ICV"
detection_range = 0
threat_range = 1200
air_weapon_dist = 1200
eplrs = True
class M_113(unittype.VehicleType):
id = "M-113"
name = "APC M113"
detection_range = 0
threat_range = 1200
air_weapon_dist = 1200
eplrs = True
class M1134_Stryker_ATGM(unittype.VehicleType):
id = "M1134 Stryker ATGM"
name = "ATGM Stryker"
detection_range = 0
threat_range = 3800
air_weapon_dist = 1000
eplrs = True
class M_2_Bradley(unittype.VehicleType):
id = "M-2 Bradley"
name = "IFV M2A2 Bradley"
detection_range = 0
threat_range = 3800
air_weapon_dist = 2500
eplrs = True
class MCV_80(unittype.VehicleType):
id = "MCV-80"
name = "IFV Warrior "
detection_range = 0
threat_range = 2500
air_weapon_dist = 2500
class MTLB(unittype.VehicleType):
id = "MTLB"
name = "APC MTLB"
detection_range = 0
threat_range = 1000
air_weapon_dist = 1000
class Marder(unittype.VehicleType):
id = "Marder"
name = "IFV Marder"
detection_range = 0
threat_range = 1500
air_weapon_dist = 1500
class TPZ(unittype.VehicleType):
id = "TPZ"
name = "APC TPz Fuchs "
detection_range = 0
threat_range = 1000
air_weapon_dist = 1000
class Challenger2(unittype.VehicleType):
id = "Challenger2"
name = "MBT Challenger II"
detection_range = 0
threat_range = 3500
air_weapon_dist = 1500
class Leclerc(unittype.VehicleType):
id = "Leclerc"
name = "MBT Leclerc"
detection_range = 0
threat_range = 3500
air_weapon_dist = 1500
class M_60(unittype.VehicleType):
id = "M-60"
name = "MBT M60A3 Patton"
detection_range = 0
threat_range = 8000
air_weapon_dist = 1500
class M1128_Stryker_MGS(unittype.VehicleType):
id = "M1128 Stryker MGS"
name = "SPG Stryker MGS"
detection_range = 0
threat_range = 4000
air_weapon_dist = 1200
eplrs = True
class M_1_Abrams(unittype.VehicleType):
id = "M-1 Abrams"
name = "MBT M1A2 Abrams"
detection_range = 0
threat_range = 3500
air_weapon_dist = 1200
eplrs = True
class T_55(unittype.VehicleType):
id = "T-55"
name = "MBT T-55"
detection_range = 0
threat_range = 2500
air_weapon_dist = 1200
class T_72B(unittype.VehicleType):
id = "T-72B"
name = "MBT T-72B"
detection_range = 0
threat_range = 4000
air_weapon_dist = 3500
class T_80UD(unittype.VehicleType):
id = "T-80UD"
name = "MBT T-80U"
detection_range = 0
threat_range = 5000
air_weapon_dist = 3500
class T_90(unittype.VehicleType):
id = "T-90"
name = "MBT T-90"
detection_range = 0
threat_range = 5000
air_weapon_dist = 3500
class Leopard1A3(unittype.VehicleType):
id = "Leopard1A3"
name = "MBT Leopard 1A3"
detection_range = 0
threat_range = 2500
air_weapon_dist = 1500
class Merkava_Mk4(unittype.VehicleType):
id = "Merkava_Mk4"
name = "MBT Merkava IV"
detection_range = 0
threat_range = 3500
air_weapon_dist = 1200
class M4_Sherman(unittype.VehicleType):
id = "M4_Sherman"
name = "Tk M4 Sherman"
detection_range = 0
threat_range = 3000
air_weapon_dist = 0
class M2A1_halftrack(unittype.VehicleType):
id = "M2A1_halftrack"
name = "APC M2A1 Halftrack"
detection_range = 0
threat_range = 1200
air_weapon_dist = 0
class T_72B3(unittype.VehicleType):
id = "T-72B3"
name = "MBT T-72B3"
detection_range = 0
threat_range = 4000
air_weapon_dist = 3500
class BTR_82A(unittype.VehicleType):
id = "BTR-82A"
name = "IFV BTR-82A"
detection_range = 0
threat_range = 2000
air_weapon_dist = 2000
class PT_76(unittype.VehicleType):
id = "PT_76"
name = "LT PT-76"
detection_range = 0
threat_range = 2000
air_weapon_dist = 1000
class Chieftain_mk3(unittype.VehicleType):
id = "Chieftain_mk3"
name = "MBT Chieftain Mk.3"
detection_range = 0
threat_range = 3500
air_weapon_dist = 1500
class Pz_IV_H(unittype.VehicleType):
id = "Pz_IV_H"
name = "Tk PzIV H"
detection_range = 0
threat_range = 3000
air_weapon_dist = 0
class Sd_Kfz_251(unittype.VehicleType):
id = "Sd_Kfz_251"
name = "APC Sd.Kfz.251 Halftrack"
detection_range = 0
threat_range = 1100
air_weapon_dist = 0
class Leopard_2A5(unittype.VehicleType):
id = "Leopard-2A5"
name = "MBT Leopard-2A5"
detection_range = 0
threat_range = 3500
air_weapon_dist = 1500
class Leopard_2(unittype.VehicleType):
id = "Leopard-2"
name = "MBT Leopard-2A6M"
detection_range = 0
threat_range = 3500
air_weapon_dist = 1500
class Leopard_2A4(unittype.VehicleType):
id = "leopard-2A4"
name = "MBT Leopard-2A4"
detection_range = 0
threat_range = 3500
air_weapon_dist = 1500
class Leopard_2A4_trs(unittype.VehicleType):
id = "leopard-2A4_trs"
name = "MBT Leopard-2A4 Trs"
detection_range = 0
threat_range = 3500
air_weapon_dist = 1500
class VAB_Mephisto(unittype.VehicleType):
id = "VAB_Mephisto"
name = "ATGM VAB Mephisto"
detection_range = 0
threat_range = 3800
air_weapon_dist = 3800
eplrs = True
class ZTZ96B(unittype.VehicleType):
id = "ZTZ96B"
name = "ZTZ-96B"
detection_range = 0
threat_range = 5000
air_weapon_dist = 3500
eplrs = True
class ZBD04A(unittype.VehicleType):
id = "ZBD04A"
name = "ZBD-04A"
detection_range = 0
threat_range = 4800
air_weapon_dist = 0
eplrs = True
class Tiger_I(unittype.VehicleType):
id = "Tiger_I"
name = "HT Pz.Kpfw.VI Tiger I"
detection_range = 0
threat_range = 3000
air_weapon_dist = 0
class Tiger_II_H(unittype.VehicleType):
id = "Tiger_II_H"
name = "HT Pz.Kpfw.VI Ausf. B Tiger II"
detection_range = 0
threat_range = 6000
air_weapon_dist = 0
class Pz_V_Panther_G(unittype.VehicleType):
id = "Pz_V_Panther_G"
name = "MT Pz.Kpfw.V Panther Ausf.G"
detection_range = 0
threat_range = 3000
air_weapon_dist = 0
class Jagdpanther_G1(unittype.VehicleType):
id = "Jagdpanther_G1"
name = "SPG Jagdpanther G1"
detection_range = 0
threat_range = 5000
air_weapon_dist = 0
class JagdPz_IV(unittype.VehicleType):
id = "JagdPz_IV"
name = "SPG Jagdpanzer IV"
detection_range = 0
threat_range = 3000
air_weapon_dist = 0
class Stug_IV(unittype.VehicleType):
id = "Stug_IV"
name = "SPG StuG IV"
detection_range = 0
threat_range = 3000
air_weapon_dist = 0
class SturmPzIV(unittype.VehicleType):
id = "SturmPzIV"
name = "SPG Sturmpanzer IV Brummbar"
detection_range = 0
threat_range = 4500
air_weapon_dist = 2500
class Sd_Kfz_234_2_Puma(unittype.VehicleType):
id = "Sd_Kfz_234_2_Puma"
name = "IFV Sd.Kfz.234/2 Puma"
detection_range = 0
threat_range = 2000
air_weapon_dist = 0
class Stug_III(unittype.VehicleType):
id = "Stug_III"
name = "SPG StuG III Ausf. G"
detection_range = 0
threat_range = 3000
air_weapon_dist = 0
class Elefant_SdKfz_184(unittype.VehicleType):
id = "Elefant_SdKfz_184"
name = "SPG Sd.Kfz.184 Elefant"
detection_range = 0
threat_range = 6000
air_weapon_dist = 0
class Cromwell_IV(unittype.VehicleType):
id = "Cromwell_IV"
name = "CT Cromwell IV"
detection_range = 0
threat_range = 3000
air_weapon_dist = 0
class M4A4_Sherman_FF(unittype.VehicleType):
id = "M4A4_Sherman_FF"
name = "MT M4A4 Sherman Firefly"
detection_range = 0
threat_range = 3000
air_weapon_dist = 0
class Centaur_IV(unittype.VehicleType):
id = "Centaur_IV"
name = "CT Centaur IV"
detection_range = 0
threat_range = 6000
air_weapon_dist = 0
class Churchill_VII(unittype.VehicleType):
id = "Churchill_VII"
name = "HIT Churchill VII"
detection_range = 0
threat_range = 3000
air_weapon_dist = 0
class Daimler_AC(unittype.VehicleType):
id = "Daimler_AC"
name = "Car Daimler Armored"
detection_range = 0
threat_range = 2000
air_weapon_dist = 0
class Tetrarch(unittype.VehicleType):
id = "Tetrarch"
name = "LT Mk VII Tetrarch"
detection_range = 0
threat_range = 2000
air_weapon_dist = 0
class M10_GMC(unittype.VehicleType):
id = "M10_GMC"
name = "SPG M10 GMC"
detection_range = 0
threat_range = 6000
air_weapon_dist = 0
class M8_Greyhound(unittype.VehicleType):
id = "M8_Greyhound"
name = "Car M8 Greyhound Armored"
detection_range = 0
threat_range = 2000
air_weapon_dist = 0
class MissilesSS:
class Scud_B(unittype.VehicleType):
id = "Scud_B"
name = "SSM SS-1C Scud-B"
detection_range = 0
threat_range = 320000
air_weapon_dist = 320000
class Hy_launcher(unittype.VehicleType):
id = "hy_launcher"
name = "AShM SS-N-2 Silkworm"
detection_range = 100000
threat_range = 100000
air_weapon_dist = 100000
class Silkworm_SR(unittype.VehicleType):
id = "Silkworm_SR"
name = "AShM Silkworm SR"
detection_range = 200000
threat_range = 0
air_weapon_dist = 0
class V1_launcher(unittype.VehicleType):
id = "v1_launcher"
name = "SSM V-1 Launcher"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class Locomotive:
class Electric_locomotive(unittype.VehicleType):
id = "Electric locomotive"
name = "Loco VL80 Electric"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class Locomotive(unittype.VehicleType):
id = "Locomotive"
name = "Loco CHME3T"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class ES44AH(unittype.VehicleType):
id = "ES44AH"
name = "Loco ES44AH"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class DRG_Class_86(unittype.VehicleType):
id = "DRG_Class_86"
name = "Loco DRG Class 86"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class Carriage:
class Coach_cargo(unittype.VehicleType):
id = "Coach cargo"
name = "Freight Van"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class Coach_cargo_open(unittype.VehicleType):
id = "Coach cargo open"
name = "Open Wagon"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class Coach_a_tank_blue(unittype.VehicleType):
id = "Coach a tank blue"
name = "Tank Car blue"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class Coach_a_tank_yellow(unittype.VehicleType):
id = "Coach a tank yellow"
name = "Tank Car yellow"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class Coach_a_passenger(unittype.VehicleType):
id = "Coach a passenger"
name = "Passenger Car"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class Coach_a_platform(unittype.VehicleType):
id = "Coach a platform"
name = "Coach Platform"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class Boxcartrinity(unittype.VehicleType):
id = "Boxcartrinity"
name = "Flatcar"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class Tankcartrinity(unittype.VehicleType):
id = "Tankcartrinity"
name = "Tank Cartrinity"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class Wellcarnsc(unittype.VehicleType):
id = "Wellcarnsc"
name = "Well Car"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class DR_50Ton_Flat_Wagon(unittype.VehicleType):
id = "DR_50Ton_Flat_Wagon"
name = "DR 50-ton flat wagon"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class German_covered_wagon_G10(unittype.VehicleType):
id = "German_covered_wagon_G10"
name = "Wagon G10 (Germany)"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
class German_tank_wagon(unittype.VehicleType):
id = "German_tank_wagon"
name = "Tank Car (Germany)"
detection_range = 0
threat_range = 0
air_weapon_dist = 0
vehicle_map = {
"2B11 mortar": Artillery._2B11_mortar,
"SAU Gvozdika": Artillery.SAU_Gvozdika,
"SAU Msta": Artillery.SAU_Msta,
"SAU Akatsia": Artillery.SAU_Akatsia,
"SAU 2-C9": Artillery.SAU_2_C9,
"M-109": Artillery.M_109,
"SpGH_Dana": Artillery.SpGH_Dana,
"AAV7": Armor.AAV7,
"BMD-1": Armor.BMD_1,
"BMP-1": Armor.BMP_1,
"BMP-2": Armor.BMP_2,
"BMP-3": Armor.BMP_3,
"BRDM-2": Armor.BRDM_2,
"BTR-80": Armor.BTR_80,
"BTR_D": Armor.BTR_D,
"Cobra": Armor.Cobra,
"LAV-25": Armor.LAV_25,
"M1043 HMMWV Armament": Armor.M1043_HMMWV_Armament,
"M1045 HMMWV TOW": Armor.M1045_HMMWV_TOW,
"M1126 Stryker ICV": Armor.M1126_Stryker_ICV,
"M-113": Armor.M_113,
"M1134 Stryker ATGM": Armor.M1134_Stryker_ATGM,
"M-2 Bradley": Armor.M_2_Bradley,
"MCV-80": Armor.MCV_80,
"MTLB": Armor.MTLB,
"Marder": Armor.Marder,
"TPZ": Armor.TPZ,
"Grad_FDDM": Artillery.Grad_FDDM,
"Bunker": Fortification.Bunker,
"Paratrooper RPG-16": Infantry.Paratrooper_RPG_16,
"Paratrooper AKS-74": Infantry.Paratrooper_AKS_74,
"Infantry AK Ins": Infantry.Infantry_AK_Ins,
"Sandbox": Fortification.Sandbox,
"Soldier AK": Infantry.Soldier_AK,
"Infantry AK": Infantry.Infantry_AK,
"Soldier M249": Infantry.Soldier_M249,
"Soldier M4": Infantry.Soldier_M4,
"Soldier M4 GRG": Infantry.Soldier_M4_GRG,
"Soldier RPG": Infantry.Soldier_RPG,
"MLRS FDDM": Artillery.MLRS_FDDM,
"Grad-URAL": Artillery.Grad_URAL,
"Uragan_BM-27": Artillery.Uragan_BM_27,
"Smerch": Artillery.Smerch,
"Smerch_HE": Artillery.Smerch_HE,
"MLRS": Artillery.MLRS,
"2S6 Tunguska": AirDefence._2S6_Tunguska,
"Kub 2P25 ln": AirDefence.Kub_2P25_ln,
"5p73 s-125 ln": AirDefence._5p73_s_125_ln,
"S-300PS 5P85C ln": AirDefence.S_300PS_5P85C_ln,
"S-300PS 5P85D ln": AirDefence.S_300PS_5P85D_ln,
"SA-11 Buk LN 9A310M1": AirDefence.SA_11_Buk_LN_9A310M1,
"Osa 9A33 ln": AirDefence.Osa_9A33_ln,
"Tor 9A331": AirDefence.Tor_9A331,
"Strela-10M3": AirDefence.Strela_10M3,
"Strela-1 9P31": AirDefence.Strela_1_9P31,
"SA-11 Buk CC 9S470M1": AirDefence.SA_11_Buk_CC_9S470M1,
"SA-8 Osa LD 9T217": AirDefence.SA_8_Osa_LD_9T217,
"Patriot AMG": AirDefence.Patriot_AMG,
"Patriot ECS": AirDefence.Patriot_ECS,
"Gepard": AirDefence.Gepard,
"Hawk pcp": AirDefence.Hawk_pcp,
"Vulcan": AirDefence.Vulcan,
"Hawk ln": AirDefence.Hawk_ln,
"M48 Chaparral": AirDefence.M48_Chaparral,
"M6 Linebacker": AirDefence.M6_Linebacker,
"Patriot ln": AirDefence.Patriot_ln,
"M1097 Avenger": AirDefence.M1097_Avenger,
"Patriot EPP": AirDefence.Patriot_EPP,
"Patriot cp": AirDefence.Patriot_cp,
"Roland ADS": AirDefence.Roland_ADS,
"S-300PS 54K6 cp": AirDefence.S_300PS_54K6_cp,
"Soldier stinger": AirDefence.Soldier_stinger,
"Stinger comm dsr": AirDefence.Stinger_comm_dsr,
"Stinger comm": AirDefence.Stinger_comm,
"ZSU-23-4 Shilka": AirDefence.ZSU_23_4_Shilka,
"ZU-23 Emplacement Closed": AirDefence.ZU_23_Emplacement_Closed,
"ZU-23 Emplacement": AirDefence.ZU_23_Emplacement,
"Ural-375 ZU-23": AirDefence.Ural_375_ZU_23,
"ZU-23 Closed Insurgent": AirDefence.ZU_23_Closed_Insurgent,
"Ural-375 ZU-23 Insurgent": AirDefence.Ural_375_ZU_23_Insurgent,
"ZU-23 Insurgent": AirDefence.ZU_23_Insurgent,
"SA-18 Igla manpad": AirDefence.SA_18_Igla_manpad,
"SA-18 Igla comm": AirDefence.SA_18_Igla_comm,
"SA-18 Igla-S manpad": AirDefence.SA_18_Igla_S_manpad,
"SA-18 Igla-S comm": AirDefence.SA_18_Igla_S_comm,
"Igla manpad INS": AirDefence.Igla_manpad_INS,
"1L13 EWR": AirDefence._1L13_EWR,
"Kub 1S91 str": AirDefence.Kub_1S91_str,
"S-300PS 40B6M tr": AirDefence.S_300PS_40B6M_tr,
"S-300PS 40B6MD sr": AirDefence.S_300PS_40B6MD_sr,
"55G6 EWR": AirDefence._55G6_EWR,
"S-300PS 64H6E sr": AirDefence.S_300PS_64H6E_sr,
"SA-11 Buk SR 9S18M1": AirDefence.SA_11_Buk_SR_9S18M1,
"Dog Ear radar": AirDefence.Dog_Ear_radar,
"Hawk tr": AirDefence.Hawk_tr,
"Hawk sr": AirDefence.Hawk_sr,
"Patriot str": AirDefence.Patriot_str,
"Hawk cwar": AirDefence.Hawk_cwar,
"p-19 s-125 sr": AirDefence.P_19_s_125_sr,
"Roland Radar": AirDefence.Roland_Radar,
"snr s-125 tr": AirDefence.Snr_s_125_tr,
"house1arm": Fortification.House1arm,
"house2arm": Fortification.House2arm,
"outpost_road": Fortification.Outpost_road,
"outpost": Fortification.Outpost,
"houseA_arm": Fortification.HouseA_arm,
"TACAN_beacon": Fortification.TACAN_beacon,
"Challenger2": Armor.Challenger2,
"Leclerc": Armor.Leclerc,
"M-60": Armor.M_60,
"M1128 Stryker MGS": Armor.M1128_Stryker_MGS,
"M-1 Abrams": Armor.M_1_Abrams,
"T-55": Armor.T_55,
"T-72B": Armor.T_72B,
"T-80UD": Armor.T_80UD,
"T-90": Armor.T_90,
"Leopard1A3": Armor.Leopard1A3,
"Merkava_Mk4": Armor.Merkava_Mk4,
"Ural-4320 APA-5D": Unarmed.Ural_4320_APA_5D,
"ATMZ-5": Unarmed.ATMZ_5,
"ATZ-10": Unarmed.ATZ_10,
"GAZ-3307": Unarmed.GAZ_3307,
"GAZ-3308": Unarmed.GAZ_3308,
"GAZ-66": Unarmed.GAZ_66,
"M978 HEMTT Tanker": Unarmed.M978_HEMTT_Tanker,
"HEMTT TFFT": Unarmed.HEMTT_TFFT,
"IKARUS Bus": Unarmed.IKARUS_Bus,
"KAMAZ Truck": Unarmed.KAMAZ_Truck,
"LAZ Bus": Unarmed.LAZ_Bus,
"LiAZ Bus": Unarmed.LiAZ_Bus,
"Hummer": Unarmed.Hummer,
"M 818": Unarmed.M_818,
"MAZ-6303": Unarmed.MAZ_6303,
"Predator GCS": Unarmed.Predator_GCS,
"Predator TrojanSpirit": Unarmed.Predator_TrojanSpirit,
"Suidae": Unarmed.Suidae,
"Tigr_233036": Unarmed.Tigr_233036,
"Trolley bus": Unarmed.Trolley_bus,
"UAZ-469": Unarmed.UAZ_469,
"Ural ATsP-6": Unarmed.Ural_ATsP_6,
"Ural-4320-31": Unarmed.Ural_4320_31,
"Ural-4320T": Unarmed.Ural_4320T,
"Ural-375 PBU": Unarmed.Ural_375_PBU,
"Ural-375": Unarmed.Ural_375,
"VAZ Car": Unarmed.VAZ_Car,
"ZiL-131 APA-80": Unarmed.ZiL_131_APA_80,
"SKP-11": Unarmed.SKP_11,
"ZIL-131 KUNG": Unarmed.ZIL_131_KUNG,
"ZIL-4331": Unarmed.ZIL_4331,
"KrAZ6322": Unarmed.KrAZ6322,
"Electric locomotive": Locomotive.Electric_locomotive,
"Locomotive": Locomotive.Locomotive,
"Coach cargo": Carriage.Coach_cargo,
"Coach cargo open": Carriage.Coach_cargo_open,
"Coach a tank blue": Carriage.Coach_a_tank_blue,
"Coach a tank yellow": Carriage.Coach_a_tank_yellow,
"Coach a passenger": Carriage.Coach_a_passenger,
"Coach a platform": Carriage.Coach_a_platform,
"Scud_B": MissilesSS.Scud_B,
"M4_Sherman": Armor.M4_Sherman,
"M2A1_halftrack": Armor.M2A1_halftrack,
"S_75M_Volhov": AirDefence.S_75M_Volhov,
"SNR_75V": AirDefence.SNR_75V,
"RLS_19J6": AirDefence.RLS_19J6,
"ZSU_57_2": AirDefence.ZSU_57_2,
"T-72B3": Armor.T_72B3,
"BTR-82A": Armor.BTR_82A,
"S-60_Type59_Artillery": AirDefence.S_60_Type59_Artillery,
"generator_5i57": AirDefence.Generator_5i57,
"ATZ-5": Unarmed.ATZ_5,
"AA8": Unarmed.AA8,
"PT_76": Armor.PT_76,
"ATZ-60_Maz": Unarmed.ATZ_60_Maz,
"ZIL-135": Unarmed.ZIL_135,
"TZ-22_KrAZ": Unarmed.TZ_22_KrAZ,
"Bedford_MWD": Unarmed.Bedford_MWD,
"bofors40": AirDefence.Bofors40,
"rapier_fsa_launcher": AirDefence.Rapier_fsa_launcher,
"rapier_fsa_optical_tracker_unit": AirDefence.Rapier_fsa_optical_tracker_unit,
"rapier_fsa_blindfire_radar": AirDefence.Rapier_fsa_blindfire_radar,
"Land_Rover_101_FC": Unarmed.Land_Rover_101_FC,
"Land_Rover_109_S3": Unarmed.Land_Rover_109_S3,
"Chieftain_mk3": Armor.Chieftain_mk3,
"hy_launcher": MissilesSS.Hy_launcher,
"Silkworm_SR": MissilesSS.Silkworm_SR,
"ES44AH": Locomotive.ES44AH,
"Boxcartrinity": Carriage.Boxcartrinity,
"Tankcartrinity": Carriage.Tankcartrinity,
"Wellcarnsc": Carriage.Wellcarnsc,
"Pz_IV_H": Armor.Pz_IV_H,
"Sd_Kfz_251": Armor.Sd_Kfz_251,
"flak18": AirDefence.Flak18,
"Blitz_36-6700A": Unarmed.Blitz_36_6700A,
"Leopard-2A5": Armor.Leopard_2A5,
"Leopard-2": Armor.Leopard_2,
"leopard-2A4": Armor.Leopard_2A4,
"leopard-2A4_trs": Armor.Leopard_2A4_trs,
"T155_Firtina": Artillery.T155_Firtina,
"VAB_Mephisto": Armor.VAB_Mephisto,
"ZTZ96B": Armor.ZTZ96B,
"ZBD04A": Armor.ZBD04A,
"HQ-7_LN_SP": AirDefence.HQ_7_LN_SP,
"HQ-7_STR_SP": AirDefence.HQ_7_STR_SP,
"PLZ05": Artillery.PLZ05,
"Kubelwagen_82": Unarmed.Kubelwagen_82,
"Sd_Kfz_2": Unarmed.Sd_Kfz_2,
"Sd_Kfz_7": Unarmed.Sd_Kfz_7,
"Horch_901_typ_40_kfz_21": Unarmed.Horch_901_typ_40_kfz_21,
"Tiger_I": Armor.Tiger_I,
"Tiger_II_H": Armor.Tiger_II_H,
"Pz_V_Panther_G": Armor.Pz_V_Panther_G,
"Jagdpanther_G1": Armor.Jagdpanther_G1,
"JagdPz_IV": Armor.JagdPz_IV,
"Stug_IV": Armor.Stug_IV,
"SturmPzIV": Armor.SturmPzIV,
"Sd_Kfz_234_2_Puma": Armor.Sd_Kfz_234_2_Puma,
"flak30": AirDefence.Flak30,
"flak36": AirDefence.Flak36,
"flak37": AirDefence.Flak37,
"flak38": AirDefence.Flak38,
"KDO_Mod40": AirDefence.KDO_Mod40,
"Flakscheinwerfer_37": AirDefence.Flakscheinwerfer_37,
"Maschinensatz_33": AirDefence.Maschinensatz_33,
"soldier_mauser98": Infantry.Soldier_mauser98,
"SK_C_28_naval_gun": Fortification.SK_C_28_naval_gun,
"fire_control": Fortification.Fire_control,
"Stug_III": Armor.Stug_III,
"Elefant_SdKfz_184": Armor.Elefant_SdKfz_184,
"flak41": AirDefence.Flak41,
"v1_launcher": MissilesSS.V1_launcher,
"FuMG-401": AirDefence.FuMG_401,
"FuSe-65": AirDefence.FuSe_65,
"Cromwell_IV": Armor.Cromwell_IV,
"M4A4_Sherman_FF": Armor.M4A4_Sherman_FF,
"soldier_wwii_br_01": Infantry.Soldier_wwii_br_01,
"Centaur_IV": Armor.Centaur_IV,
"Churchill_VII": Armor.Churchill_VII,
"Daimler_AC": Armor.Daimler_AC,
"Tetrarch": Armor.Tetrarch,
"QF_37_AA": AirDefence.QF_37_AA,
"CCKW_353": Unarmed.CCKW_353,
"Willys_MB": Unarmed.Willys_MB,
"M12_GMC": Artillery.M12_GMC,
"M30_CC": Unarmed.M30_CC,
"soldier_wwii_us": Infantry.Soldier_wwii_us,
"M10_GMC": Armor.M10_GMC,
"M8_Greyhound": Armor.M8_Greyhound,
"M4_Tractor": Unarmed.M4_Tractor,
"M45_Quadmount": AirDefence.M45_Quadmount,
"M1_37mm": AirDefence.M1_37mm,
"DR_50Ton_Flat_Wagon": Carriage.DR_50Ton_Flat_Wagon,
"DRG_Class_86": Locomotive.DRG_Class_86,
"German_covered_wagon_G10": Carriage.German_covered_wagon_G10,
"German_tank_wagon": Carriage.German_tank_wagon,
}
| lgpl-3.0 | -7,687,351,654,465,233,000 | 28.11159 | 82 | 0.579633 | false | 2.859798 | false | false | false |
umkcdcrg01/ryu_openflow | ryu/app/host_tracker_rest.py | 1 | 3378 | # Copyright (C) 2014 SDN Hub
#
# Licensed under the GNU GENERAL PUBLIC LICENSE, Version 3.
# You may not use this file except in compliance with this License.
# You may obtain a copy of the License at
#
# http://www.gnu.org/licenses/gpl-3.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# REST API
#
############# Host tracker ##############
#
# get all hosts
# GET /hosts
#
# get all hosts associated with a switch
# GET /hosts/{dpid}
#
#
import logging
import json
from webob import Response
import time
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.controller import dpset
from ryu.app.wsgi import ControllerBase, WSGIApplication, route
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ipv4
from ryu.ofproto import ether
from ryu.ofproto import ofproto_v1_0, ofproto_v1_3
from ryu.app.sdnhub_apps import host_tracker
from ryu.lib import dpid as dpid_lib
class HostTrackerController(ControllerBase):
def __init__(self, req, link, data, **config):
super(HostTrackerController, self).__init__(req, link, data, **config)
self.host_tracker = data['host_tracker']
self.dpset = data['dpset']
@route('hosts', '/v1.0/hosts', methods=['GET'])
def get_all_hosts(self, req, **kwargs):
return Response(status=200, content_type='application/json',
body=json.dumps(self.host_tracker.hosts))
@route('hosts', '/v1.0/hosts/{dpid}', methods=['GET'])
# requirements={'dpid': dpid_lib.DPID_PATTERN})
def get_hosts(self, req, dpid, **_kwargs):
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
switch_hosts = {}
for key, val in self.host_tracker.hosts.iteritems():
if val['dpid'] == dpid_lib.dpid_to_str(dp.id):
switch_hosts[key] = val
return Response(status=200, content_type='application/json',
body=json.dumps(switch_hosts))
class HostTrackerRestApi(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_0.OFP_VERSION,
ofproto_v1_3.OFP_VERSION]
_CONTEXTS = {
'dpset': dpset.DPSet,
'wsgi': WSGIApplication,
'host_tracker': host_tracker.HostTracker
}
def __init__(self, *args, **kwargs):
super(HostTrackerRestApi, self).__init__(*args, **kwargs)
dpset = kwargs['dpset']
wsgi = kwargs['wsgi']
host_tracker = kwargs['host_tracker']
self.data = {}
self.data['dpset'] = dpset
self.data['waiters'] = {}
self.data['host_tracker'] = host_tracker
wsgi.register(HostTrackerController, self.data)
#mapper = wsgi.mapper
# mapper.connect('hosts', '/v1.0/hosts', controller=HostTrackerController, action='get_all_hosts',
# conditions=dict(method=['GET']))
# mapper.connect('hosts', '/v1.0/hosts/{dpid}', controller=HostTrackerController, action='get_hosts',
# conditions=dict(method=['GET']), requirements={'dpid': dpid_lib.DPID_PATTERN})
| apache-2.0 | 2,639,174,920,022,244,400 | 32.117647 | 109 | 0.647128 | false | 3.636168 | false | false | false |
mikeywaites/flask-arrested | tests/test_handlers.py | 1 | 3543 | import pytest
import json
from mock import patch
from werkzeug.exceptions import BadRequest
from arrested import (
Handler, Endpoint, ResponseHandler,
RequestHandler, JSONRequestMixin, JSONResponseMixin)
def test_handler_params_set():
endpoint = Endpoint()
handler = Handler(endpoint, payload_key='foo', **{'test': 'foo'})
assert handler.endpoint == endpoint
assert handler.payload_key == 'foo'
assert handler.params == {'test': 'foo'}
def test_handler_handle_method_basic():
"""By default the hanlde method simply returns the data passed to it.
"""
endpoint = Endpoint()
handler = Handler(endpoint)
resp = handler.handle({'foo': 'bar'})
assert resp == {'foo': 'bar'}
def test_handler_process_method_calls_handle():
endpoint = Endpoint()
handler = Handler(endpoint)
with patch.object(Handler, 'handle') as _mock:
handler.process({'foo': 'bar'})
_mock.assert_called_once_with({'foo': 'bar'})
def test_handler_process_method_response():
endpoint = Endpoint()
handler = Handler(endpoint)
resp = handler.process({'foo': 'bar'})
assert resp == handler
assert resp.data == {'foo': 'bar'}
def test_response_handler_handle_method(app):
endpoint = Endpoint()
handler = ResponseHandler(endpoint)
with app.test_request_context('/test', method='GET'):
resp = handler.process({'foo': 'bar'})
assert resp == handler
assert resp.data == {'foo': 'bar'}
def test_response_handler_get_response_data(app):
endpoint = Endpoint()
handler = ResponseHandler(endpoint)
with app.test_request_context('/test', method='GET'):
resp = handler.process({'foo': 'bar'})
assert resp == handler
assert resp.data == {'foo': 'bar'}
def test_request_handler_handle_method():
endpoint = Endpoint()
handler = RequestHandler(endpoint)
# as we're passing data directly to process() the get_request_data() method will not
# be called so the incoming data is not required to be in JSON format.
resp = handler.process({'foo': 'bar'})
assert resp == handler
assert resp.data == {'foo': 'bar'}
def test_request_handler_handle_method_request_data(app):
endpoint = Endpoint()
handler = RequestHandler(endpoint)
with app.test_request_context(
'/test',
data=json.dumps({'foo': 'bar'}),
headers={'content-type': 'application/json'},
method='POST'):
resp = handler.process()
assert resp == handler
assert resp.data == {'foo': 'bar'}
def test_json_request_mixin_valid_json_request(app):
mixin = JSONRequestMixin()
with app.test_request_context(
'/test',
data=json.dumps({'foo': 'bar'}),
headers={'content-type': 'application/json'},
method='POST'):
resp = mixin.get_request_data()
assert resp == {'foo': 'bar'}
def test_json_request_mixin_invalid_json(app):
endpoint = Endpoint()
mixin = JSONRequestMixin()
mixin.endpoint = endpoint
with app.test_request_context(
'/test',
data=b'not valid',
headers={'content-type': 'application/json'},
method='POST'):
with pytest.raises(BadRequest):
mixin.get_request_data()
def test_json_response_mixin(app):
mixin = JSONResponseMixin()
mixin.payload_key = 'data'
mixin.data = {'foo': 'bar'}
assert mixin.get_response_data() == json.dumps({"data": {"foo": "bar"}})
| mit | -4,878,512,467,761,809,000 | 26.679688 | 88 | 0.620943 | false | 3.945434 | true | false | false |
paulmadore/Eric-IDE | 6-6.0.9/eric/Helpviewer/Passwords/PasswordsDialog.py | 2 | 3538 | # -*- coding: utf-8 -*-
# Copyright (c) 2009 - 2015 Detlev Offenbach <[email protected]>
#
"""
Module implementing a dialog to show all saved logins.
"""
from __future__ import unicode_literals
from PyQt5.QtCore import pyqtSlot, QSortFilterProxyModel
from PyQt5.QtGui import QFont, QFontMetrics
from PyQt5.QtWidgets import QDialog
from E5Gui import E5MessageBox
from .Ui_PasswordsDialog import Ui_PasswordsDialog
class PasswordsDialog(QDialog, Ui_PasswordsDialog):
"""
Class implementing a dialog to show all saved logins.
"""
def __init__(self, parent=None):
"""
Constructor
@param parent reference to the parent widget (QWidget)
"""
super(PasswordsDialog, self).__init__(parent)
self.setupUi(self)
self.__showPasswordsText = self.tr("Show Passwords")
self.__hidePasswordsText = self.tr("Hide Passwords")
self.passwordsButton.setText(self.__showPasswordsText)
self.removeButton.clicked.connect(
self.passwordsTable.removeSelected)
self.removeAllButton.clicked.connect(self.passwordsTable.removeAll)
import Helpviewer.HelpWindow
from .PasswordModel import PasswordModel
self.passwordsTable.verticalHeader().hide()
self.__passwordModel = PasswordModel(
Helpviewer.HelpWindow.HelpWindow.passwordManager(), self)
self.__proxyModel = QSortFilterProxyModel(self)
self.__proxyModel.setSourceModel(self.__passwordModel)
self.searchEdit.textChanged.connect(
self.__proxyModel.setFilterFixedString)
self.passwordsTable.setModel(self.__proxyModel)
fm = QFontMetrics(QFont())
height = fm.height() + fm.height() // 3
self.passwordsTable.verticalHeader().setDefaultSectionSize(height)
self.passwordsTable.verticalHeader().setMinimumSectionSize(-1)
self.__calculateHeaderSizes()
def __calculateHeaderSizes(self):
"""
Private method to calculate the section sizes of the horizontal header.
"""
fm = QFontMetrics(QFont())
for section in range(self.__passwordModel.columnCount()):
header = self.passwordsTable.horizontalHeader()\
.sectionSizeHint(section)
if section == 0:
header = fm.width("averagebiglongsitename")
elif section == 1:
header = fm.width("averagelongusername")
elif section == 2:
header = fm.width("averagelongpassword")
buffer = fm.width("mm")
header += buffer
self.passwordsTable.horizontalHeader()\
.resizeSection(section, header)
self.passwordsTable.horizontalHeader().setStretchLastSection(True)
@pyqtSlot()
def on_passwordsButton_clicked(self):
"""
Private slot to switch the password display mode.
"""
if self.__passwordModel.showPasswords():
self.__passwordModel.setShowPasswords(False)
self.passwordsButton.setText(self.__showPasswordsText)
else:
res = E5MessageBox.yesNo(
self,
self.tr("Saved Passwords"),
self.tr("""Do you really want to show passwords?"""))
if res:
self.__passwordModel.setShowPasswords(True)
self.passwordsButton.setText(self.__hidePasswordsText)
self.__calculateHeaderSizes()
| gpl-3.0 | 4,459,828,073,297,611,000 | 35.474227 | 79 | 0.628321 | false | 4.478481 | false | false | false |
gtesei/fast-furious | competitions/microsoft-malware-prediction/base.py | 1 | 8792 | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 1 23:05:32 2019
@author: gtesei
"""
import pandas as pd
import numpy as np
import lightgbm as lgb
#import xgboost as xgb
from scipy.sparse import vstack, csr_matrix, save_npz, load_npz
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.model_selection import StratifiedKFold
#from sklearn.metrics import roc_auc_score
import gc
gc.enable()
dtypes = {
'MachineIdentifier': 'category',
'ProductName': 'category',
'EngineVersion': 'category',
'AppVersion': 'category',
'AvSigVersion': 'category',
'IsBeta': 'int8',
'RtpStateBitfield': 'float16',
'IsSxsPassiveMode': 'int8',
'DefaultBrowsersIdentifier': 'float16',
'AVProductStatesIdentifier': 'float32',
'AVProductsInstalled': 'float16',
'AVProductsEnabled': 'float16',
'HasTpm': 'int8',
'CountryIdentifier': 'int16',
'CityIdentifier': 'float32',
'OrganizationIdentifier': 'float16',
'GeoNameIdentifier': 'float16',
'LocaleEnglishNameIdentifier': 'int8',
'Platform': 'category',
'Processor': 'category',
'OsVer': 'category',
'OsBuild': 'int16',
'OsSuite': 'int16',
'OsPlatformSubRelease': 'category',
'OsBuildLab': 'category',
'SkuEdition': 'category',
'IsProtected': 'float16',
'AutoSampleOptIn': 'int8',
'PuaMode': 'category',
'SMode': 'float16',
'IeVerIdentifier': 'float16',
'SmartScreen': 'category',
'Firewall': 'float16',
'UacLuaenable': 'float32',
'Census_MDC2FormFactor': 'category',
'Census_DeviceFamily': 'category',
'Census_OEMNameIdentifier': 'float16',
'Census_OEMModelIdentifier': 'float32',
'Census_ProcessorCoreCount': 'float16',
'Census_ProcessorManufacturerIdentifier': 'float16',
'Census_ProcessorModelIdentifier': 'float16',
'Census_ProcessorClass': 'category',
'Census_PrimaryDiskTotalCapacity': 'float32',
'Census_PrimaryDiskTypeName': 'category',
'Census_SystemVolumeTotalCapacity': 'float32',
'Census_HasOpticalDiskDrive': 'int8',
'Census_TotalPhysicalRAM': 'float32',
'Census_ChassisTypeName': 'category',
'Census_InternalPrimaryDiagonalDisplaySizeInInches': 'float16',
'Census_InternalPrimaryDisplayResolutionHorizontal': 'float16',
'Census_InternalPrimaryDisplayResolutionVertical': 'float16',
'Census_PowerPlatformRoleName': 'category',
'Census_InternalBatteryType': 'category',
'Census_InternalBatteryNumberOfCharges': 'float32',
'Census_OSVersion': 'category',
'Census_OSArchitecture': 'category',
'Census_OSBranch': 'category',
'Census_OSBuildNumber': 'int16',
'Census_OSBuildRevision': 'int32',
'Census_OSEdition': 'category',
'Census_OSSkuName': 'category',
'Census_OSInstallTypeName': 'category',
'Census_OSInstallLanguageIdentifier': 'float16',
'Census_OSUILocaleIdentifier': 'int16',
'Census_OSWUAutoUpdateOptionsName': 'category',
'Census_IsPortableOperatingSystem': 'int8',
'Census_GenuineStateName': 'category',
'Census_ActivationChannel': 'category',
'Census_IsFlightingInternal': 'float16',
'Census_IsFlightsDisabled': 'float16',
'Census_FlightRing': 'category',
'Census_ThresholdOptIn': 'float16',
'Census_FirmwareManufacturerIdentifier': 'float16',
'Census_FirmwareVersionIdentifier': 'float32',
'Census_IsSecureBootEnabled': 'int8',
'Census_IsWIMBootEnabled': 'float16',
'Census_IsVirtualDevice': 'float16',
'Census_IsTouchEnabled': 'int8',
'Census_IsPenCapable': 'int8',
'Census_IsAlwaysOnAlwaysConnectedCapable': 'float16',
'Wdft_IsGamer': 'float16',
'Wdft_RegionIdentifier': 'float16',
'HasDetections': 'int8'
}
print('Download Train and Test Data.\n')
train = pd.read_csv('data/train.csv', dtype=dtypes, low_memory=True)
train['MachineIdentifier'] = train.index.astype('uint32')
test = pd.read_csv('data/test.csv', dtype=dtypes, low_memory=True)
test['MachineIdentifier'] = test.index.astype('uint32')
gc.collect()
print('Transform all features to category.\n')
print('Transform all features to category.\n')
for usecol in train.columns.tolist()[1:-1]:
train[usecol] = train[usecol].astype('str')
test[usecol] = test[usecol].astype('str')
#Fit LabelEncoder
le = LabelEncoder().fit(
np.unique(train[usecol].unique().tolist()+
test[usecol].unique().tolist()))
#At the end 0 will be used for dropped values
train[usecol] = le.transform(train[usecol])+1
test[usecol] = le.transform(test[usecol])+1
agg_tr = (train
.groupby([usecol])
.aggregate({'MachineIdentifier':'count'})
.reset_index()
.rename({'MachineIdentifier':'Train'}, axis=1))
agg_te = (test
.groupby([usecol])
.aggregate({'MachineIdentifier':'count'})
.reset_index()
.rename({'MachineIdentifier':'Test'}, axis=1))
agg = pd.merge(agg_tr, agg_te, on=usecol, how='outer').replace(np.nan, 0)
#Select values with more than 1000 observations
agg = agg[(agg['Train'] > 1000)].reset_index(drop=True)
agg['Total'] = agg['Train'] + agg['Test']
#Drop unbalanced values
agg = agg[(agg['Train'] / agg['Total'] > 0.2) & (agg['Train'] / agg['Total'] < 0.8)]
agg[usecol+'Copy'] = agg[usecol]
train[usecol] = (pd.merge(train[[usecol]], agg[[usecol, usecol+'Copy']],
on=usecol, how='left')[usecol+'Copy'].replace(np.nan, 0).astype('int').astype('category'))
test[usecol] = (pd.merge(test[[usecol]],
agg[[usecol, usecol+'Copy']],
on=usecol, how='left')[usecol+'Copy']
.replace(np.nan, 0).astype('int').astype('category'))
del le, agg_tr, agg_te, agg, usecol
gc.collect()
| mit | 2,492,126,143,849,511,400 | 51.963855 | 99 | 0.433576 | false | 4.639578 | true | false | false |
CyBHFal/plugin.video.freplay | resources/lib/channels/rtbf.py | 2 | 1735 | # -*- coding: utf-8 -*-
from resources.lib import utils
import re
title = ['RTBF Auvio']
img = ['rtbf']
readyForUse = True
url_root = 'http://www.rtbf.be/auvio'
categories = {
'/categorie/series?id=35': 'Séries',
'/categorie/sport?id=9': 'Sport',
'/categorie/divertissement?id=29': 'Divertissement',
'/categorie/culture?id=18': 'Culture',
'/categorie/films?id=36': 'Films',
'/categorie/sport/football?id=11': 'Football',
'/categorie/vie-quotidienne?id=44': 'Vie quotidienne',
'/categorie/musique?id=23': 'Musique',
'/categorie/info?id=1': 'Info',
'/categorie/humour?id=40': 'Humour',
'/categorie/documentaires?id=31': 'Documentaires',
'/categorie/enfants?id=32': 'Enfants'
}
def list_shows(channel, param):
shows = []
if param == 'none':
for url, title in categories.iteritems():
shows.append([channel,url,title,'','shows'])
return shows
def list_videos(channel, cat_url):
videos = []
cat=cat_url[2:]
filePath=utils.downloadCatalog(url_root + cat_url ,'rtbf' + cat + '.html',False,{})
html=open(filePath).read().replace('\xe9', 'e').replace('\xe0', 'a').replace('\n', ' ').replace('\r', '')
match = re.compile(r'<h3 class="rtbf-media-item__title "><a href="(.*?)" title="(.*?)">',re.DOTALL).findall(html)
for url,title in match:
title=utils.formatName(title)
infoLabels={ "Title": title}
videos.append( [channel, url , title , '',infoLabels,'play'] )
return videos
def getVideoURL(channel, url_video):
html = utils.get_webcontent(url_video).replace('\xe9', 'e').replace('\xe0', 'a').replace('\n', ' ').replace('\r', '')
url=re.findall(r'<meta property="og:video" content="(.*?).mp4"', html)[0]
return url+'.mp4'
| gpl-2.0 | -7,676,071,606,240,000,000 | 33 | 121 | 0.622261 | false | 2.851974 | false | false | false |
CWTGMBH/ERPNext2Alfresco | erpnext2alfresco/actions/cmislibalf/extension.py | 1 | 13272 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from ..cmislib import model
from ..cmislib.exceptions import InvalidArgumentException
import datetime
ALFRESCO_NS = 'http://www.alfresco.org'
ALFRESCO_NSALIAS = 'alf'
ALFRESCO_NSALIAS_DECL = 'xmlns:' + ALFRESCO_NSALIAS
ALFRESCO_NSPREFIX = ALFRESCO_NSALIAS + ':'
LOCALNAME_ASPECTS = 'aspects'
LOCALNAME_PROPERTIES = 'properties'
LOCALNAME_APPLIED_ASPECTS = 'appliedAspects'
LOCALNAME_SET_ASPECTS = 'setAspects'
LOCALNAME_ASPECTS_TO_ADD = 'aspectsToAdd'
LOCALNAME_ASPECTS_TO_REMOVE = 'aspectsToRemove'
TAGNAME_ALFRESCO_PROPERTIES = ALFRESCO_NSPREFIX + LOCALNAME_PROPERTIES
TAGNAME_SET_ASPECTS = ALFRESCO_NSPREFIX + LOCALNAME_SET_ASPECTS
TAGNAME_ASPECTS_TO_ADD = ALFRESCO_NSPREFIX + LOCALNAME_ASPECTS_TO_ADD
TAGNAME_ASPECTS_TO_REMOVE = ALFRESCO_NSPREFIX + LOCALNAME_ASPECTS_TO_REMOVE
OBJECT_TYPE_ID = 'cmis:objectTypeId'
CHANGE_TOKEN = 'cmis:changeToken'
def addSetAspectsToXMLDocument(xmldoc):
entryElements = xmldoc.getElementsByTagNameNS(model.ATOM_NS, 'entry')
entryElements[0].setAttribute(ALFRESCO_NSALIAS_DECL, ALFRESCO_NS)
propertiesElements = xmldoc.getElementsByTagNameNS(model.CMIS_NS, LOCALNAME_PROPERTIES)
if len(propertiesElements) == 0:
objectElement = xmldoc.getElementsByTagNameNS(model.CMISRA_NS, 'object')
propertiesElement = xmldoc.createElementNS(model.CMIS_NS, 'cmis:properties')
objectElement[0].appendChild(propertiesElement)
else:
propertiesElement = propertiesElements[0]
aspectsElement = xmldoc.createElementNS(ALFRESCO_NS, TAGNAME_SET_ASPECTS)
propertiesElement.appendChild(aspectsElement)
return aspectsElement
def addPropertiesToXMLElement(xmldoc, element, properties):
for propName, propValue in properties.items():
"""
the name of the element here is significant: it includes the
data type. I should be able to figure out the right type based
on the actual type of the object passed in.
I could do a lookup to the type definition, but that doesn't
seem worth the performance hit
"""
propType = type(propValue)
isList = False
if (propType == list):
propType = type(propValue[0])
isList = True
if (propType == model.CmisId):
propElementName = 'cmis:propertyId'
if isList:
propValueStrList = []
for val in propValue:
propValueStrList.append(val)
else:
propValueStrList = [propValue]
elif (propType == str):
propElementName = 'cmis:propertyString'
if isList:
propValueStrList = []
for val in propValue:
propValueStrList.append(val)
else:
propValueStrList = [propValue]
elif (propType == datetime.datetime):
propElementName = 'cmis:propertyDateTime'
if isList:
propValueStrList = []
for val in propValue:
propValueStrList.append(val.isoformat())
else:
propValueStrList = [propValue.isoformat()]
elif (propType == bool):
propElementName = 'cmis:propertyBoolean'
if isList:
propValueStrList = []
for val in propValue:
propValueStrList.append(unicode(val).lower())
else:
propValueStrList = [unicode(propValue).lower()]
elif (propType == int):
propElementName = 'cmis:propertyInteger'
if isList:
propValueStrList = []
for val in propValue:
propValueStrList.append(unicode(val))
else:
propValueStrList = [unicode(propValue)]
elif (propType == float):
propElementName = 'cmis:propertyDecimal'
if isList:
propValueStrList = []
for val in propValue:
propValueStrList.append(unicode(val))
else:
propValueStrList = [unicode(propValue)]
else:
propElementName = 'cmis:propertyString'
if isList:
propValueStrList = []
for val in propValue:
propValueStrList.append(unicode(val))
else:
propValueStrList = [unicode(propValue)]
propElement = xmldoc.createElementNS(model.CMIS_NS, propElementName)
propElement.setAttribute('propertyDefinitionId', propName)
for val in propValueStrList:
valElement = xmldoc.createElementNS(model.CMIS_NS, 'cmis:value')
valText = xmldoc.createTextNode(val)
valElement.appendChild(valText)
propElement.appendChild(valElement)
element.appendChild(propElement)
def initData(self):
model.CmisObject._initData(self)
self._aspects = {}
self._alfproperties = {}
def findAlfrescoExtensions(self):
if not hasattr(self, '_aspects'):
self._aspects = {}
if self._aspects == {}:
if self.xmlDoc == None:
self.reload()
appliedAspects = self.xmlDoc.getElementsByTagNameNS(ALFRESCO_NS, LOCALNAME_APPLIED_ASPECTS)
for node in appliedAspects:
aspectType = self._repository.getTypeDefinition(node.childNodes[0].data)
self._aspects[node.childNodes[0].data] = aspectType
def hasAspect(self, arg):
result = False
if arg is not None:
self._findAlfrescoExtensions()
if isinstance(arg, model.ObjectType):
result = arg.getTypeId() in self._aspects
else:
result = arg in self._aspects
return result
def getAspects(self):
self._findAlfrescoExtensions()
return self._aspects.values()
def findAspect(self, propertyId):
self._findAlfrescoExtensions()
if (propertyId is not None) and (len(self._aspects) > 0):
for id, aspect in self._aspects.iteritems():
props = aspect.getProperties()
if propertyId in props:
return aspect
return None
def updateAspects(self, addAspects=None, removeAspects=None):
if addAspects or removeAspects:
selfUrl = self._getSelfLink()
xmlEntryDoc = getEntryXmlDoc(self._repository)
# Patch xmlEntryDoc
setAspectsElement = addSetAspectsToXMLDocument(xmlEntryDoc)
if addAspects:
addAspectElement = xmlEntryDoc.createElementNS(ALFRESCO_NS, TAGNAME_ASPECTS_TO_ADD)
valText = xmlEntryDoc.createTextNode(addAspects)
addAspectElement.appendChild(valText)
setAspectsElement.appendChild(addAspectElement)
if removeAspects:
removeAspectElement = xmlEntryDoc.createElementNS(ALFRESCO_NS, TAGNAME_ASPECTS_TO_REMOVE)
valText = xmlEntryDoc.createTextNode(removeAspects)
removeAspectElement.appendChild(valText)
setAspectsElement.appendChild(removeAspectElement)
updatedXmlDoc = self._cmisClient.put(selfUrl.encode('utf-8'),
xmlEntryDoc.toxml(encoding='utf-8'),
model.ATOM_XML_TYPE)
self.xmlDoc = updatedXmlDoc
self._initData()
def getProperties(self):
result = model.CmisObject.getProperties(self)
if not hasattr(self, '_alfproperties'):
self._alfproperties = {}
if self._alfproperties == {}:
alfpropertiesElements = self.xmlDoc.getElementsByTagNameNS(ALFRESCO_NS, LOCALNAME_PROPERTIES)
if len(alfpropertiesElements) > 0:
for alfpropertiesElement in alfpropertiesElements:
for node in [e for e in alfpropertiesElement.childNodes if e.nodeType == e.ELEMENT_NODE and e.namespaceURI == model.CMIS_NS]:
#propertyId, propertyString, propertyDateTime
#propertyType = cpattern.search(node.localName).groups()[0]
propertyName = node.attributes['propertyDefinitionId'].value
if node.childNodes and \
node.getElementsByTagNameNS(model.CMIS_NS, 'value')[0] and \
node.getElementsByTagNameNS(model.CMIS_NS, 'value')[0].childNodes:
valNodeList = node.getElementsByTagNameNS(model.CMIS_NS, 'value')
if (len(valNodeList) == 1):
propertyValue = model.parsePropValue(valNodeList[0].
childNodes[0].data,
node.localName)
else:
propertyValue = []
for valNode in valNodeList:
propertyValue.append(model.parsePropValue(valNode.
childNodes[0].data,
node.localName))
else:
propertyValue = None
self._alfproperties[propertyName] = propertyValue
result.update(self._alfproperties)
return result
def updateProperties(self, properties):
selfUrl = self._getSelfLink()
cmisproperties = {}
alfproperties = {}
# if we have a change token, we must pass it back, per the spec
args = {}
if (self.properties.has_key(CHANGE_TOKEN) and
self.properties[CHANGE_TOKEN] != None):
self.logger.debug('Change token present, adding it to args')
args = {"changeToken": self.properties[CHANGE_TOKEN]}
objectTypeId = properties.get(OBJECT_TYPE_ID)
if (objectTypeId is None):
objectTypeId = self.properties.get(OBJECT_TYPE_ID)
objectType = self._repository.getTypeDefinition(objectTypeId)
objectTypePropsDef = objectType.getProperties()
for propertyName, propertyValue in properties.items():
if (propertyName == OBJECT_TYPE_ID) or (propertyName in objectTypePropsDef.keys()):
cmisproperties[propertyName] = propertyValue
else:
if self.findAspect(propertyName) is None:
raise InvalidArgumentException
else:
alfproperties[propertyName] = propertyValue
xmlEntryDoc = getEntryXmlDoc(self._repository, properties=cmisproperties)
# Patch xmlEntryDoc
# add alfresco properties
if len(alfproperties) > 0:
aspectsElement = addSetAspectsToXMLDocument(xmlEntryDoc)
alfpropertiesElement = xmlEntryDoc.createElementNS(ALFRESCO_NS, TAGNAME_ALFRESCO_PROPERTIES)
aspectsElement.appendChild(alfpropertiesElement)
# Like regular properties
addPropertiesToXMLElement(xmlEntryDoc, alfpropertiesElement, alfproperties)
updatedXmlDoc = self._cmisClient.put(selfUrl.encode('utf-8'),
xmlEntryDoc.toxml(encoding='utf-8'),
model.ATOM_XML_TYPE,
**args)
self.xmlDoc = updatedXmlDoc
self._initData()
return self
def addAspect(self, arg):
if arg is not None:
aspect_id = arg
if isinstance(arg, model.ObjectType):
aspect_id = arg.getTypeId()
if self._repository.getTypeDefinition(aspect_id) is None:
raise InvalidArgumentException
self._updateAspects(addAspects=aspect_id)
def removeAspect(self, arg):
if arg is not None:
aspect_id = arg
if isinstance(arg, model.ObjectType):
aspect_id = arg.getTypeId()
if self._repository.getTypeDefinition(aspect_id) is None:
raise InvalidArgumentException
self._updateAspects(removeAspects=aspect_id)
def getEntryXmlDoc(repo=None, objectTypeId=None, properties=None, contentFile=None,
contentType=None, contentEncoding=None):
return model.getEntryXmlDoc(repo, objectTypeId, properties, contentFile, contentType, contentEncoding) | mit | -2,067,185,787,698,772,500 | 41.379085 | 141 | 0.606088 | false | 4.228098 | false | false | false |
nrkumar93/bnr_workspace | label_training_detection/scripts/visualize_classifier.py | 1 | 4111 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# visualize_classifier.py
#
# Copyright 2016 Ramkumar Natarajan <ram@ramkumar-ubuntu>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
import os
import cv2
import argparse
import numpy as np
import xml.etree.ElementTree as ET
feats_per_stage = []
feats = []
tilted_per_feat = []
featID_in_stage = []
feat_img = []
scale_factor = 5
def improperXML():
raise Exception('The classifier XML is not properly formatted. Please verify whether you have given the correct classifier.')
def main():
parser = argparse.ArgumentParser(description="Visualize the HAAR features obtained from Opencv Cascade Classifier")
parser.add_argument("classifier", help="The full path to the classifier ")
parser.add_argument("overlay", help="Image on which the features should be overlaid.")
parser.add_argument("dst", help="The path to save the visualization.")
args = parser.parse_args()
if os.path.splitext(args.classifier)[1] != '.xml':
raise Exception('A non XML classifier is provided. Cannot be parsed! Aborting...')
# Create classifier XML object for parsing through the XML
obj = ET.parse(args.classifier).getroot()
if obj.tag != 'opencv_storage':
improperXML()
'''
for i in range(len(obj[0])):
if obj[i].tag == 'cascade':
for j in range(len(obj[i])):
if obj[i][j].tag == 'stages':
for k in range(len(obj[i][j])):
if obj[i][j][k].tag == '_':
for l in range(len(obj[i][j][k])):
if obj[i][j][k][l].tag == 'maxWeakCount':
feats_per_stage.append(int(obj[i][j][k][l].text))
'''
for i in obj.iter('width'):
width = int(i.text)
for i in obj.iter('height'):
height = int(i.text)
# Parse XML to collect weak classifiers per stage
for i in obj.iter('stages'):
for j in i.iter('maxWeakCount'):
feats_per_stage.append(int(j.text))
for i in obj.iter('stageNum'):
assert(len(feats_per_stage) == int(i.text))
# Parse XML to collect all the features in the classifier.
for i in obj.iter('rects'):
rect_in_feat=[]
for j in i.iter('_'):
rect_in_feat.append(j.text.split())
feats.append(rect_in_feat)
# Parse XML to collect 'tilted' flag per feature
for i in obj.iter('tilted'):
tilted_per_feat.append(int(i.text))
assert(sum(feats_per_stage) == len(feats))
assert(sum(feats_per_stage) == len(tilted_per_feat))
# Converting all the feature rectangle values into numpy images.
for i in feats:
haar = np.ones((height, width), dtype = 'u1')*127
for j in i:
if float(j[-1]) < 0:
haar[int(j[1]):(int(j[1])+int(j[3])), int(j[0]):(int(j[0])+int(j[2]))] = 255
else:
haar[int(j[1]):(int(j[1])+int(j[3])), int(j[0]):(int(j[0])+int(j[2]))] = 0
feat_img.append(haar)
overlay = cv2.resize(cv2.imread(args.overlay, 0), (width*scale_factor, height*scale_factor), fx=0, fy=0, interpolation=cv2.INTER_LINEAR)
kk = 0
for i in feat_img:
res = cv2.resize(i, None, fx=scale_factor, fy=scale_factor, interpolation=cv2.INTER_LINEAR)
blend = cv2.addWeighted(overlay, 0.3, res, 0.7, 0)
cv2.imshow('img', blend)
cv2.waitKey(0)
return 0
if __name__ == '__main__':
main()
| gpl-3.0 | -4,139,936,041,857,228,000 | 33.838983 | 140 | 0.622476 | false | 3.312651 | false | false | false |
Bojak4616/Mobile_Phone_Tracking | utils/deauth.py | 1 | 2188 | #!/usr/bin/python
import argparse
import subprocess
from sys import exit
from scapy.all import *
# Credit: https://gist.githubusercontent.com/jordan-wright/4576966/raw/5f17c9bfb747d6b2b702df3630028a097be8f399/perform_deauth.py
def deauth(iface, ap, client, count, channel):
subprocess.call(['iwconfig', iface, 'channel', str(channel)])
pckt = Dot11(addr1=client, addr2=ap, addr3=ap) / Dot11Deauth()
cli_to_ap_pckt = None
if client != 'FF:FF:FF:FF:FF:FF':
cli_to_ap_pckt = Dot11(addr1=ap, addr2=client, addr3=ap) / Dot11Deauth()
print 'Sending Deauth to ' + client + ' from ' + ap
if count == -1: print 'Press CTRL+C to quit'
# We will do like aireplay does and send the packets in bursts of 64, then sleep for half a sec or so
while count != 0:
try:
for i in range(64):
# Send out deauth from the AP
send(pckt, iface=iface, verbose=0)
print 'Sent deauth to ' + client
# If we're targeting a client, we will also spoof deauth from the client to the AP
if client != 'FF:FF:FF:FF:FF:FF': send(cli_to_ap_pckt, iface=iface, verbose=0)
# If count was -1, this will be an infinite loop
count -= 1
except KeyboardInterrupt:
break
def main():
parser = argparse.ArgumentParser(description='deauth.py - Deauthticate clients from a network')
parser.add_argument('-i', '--interface', dest='iface', type=str, required=True, help='Interface to use for deauth')
parser.add_argument('-a', '--ap', dest='ap', type=str, required=True, help='BSSID of the access point')
parser.add_argument('-c', '--client', dest='client', type=str, required=True, help='BSSID of the client being DeAuthenticated')
parser.add_argument('-n', '--packets', dest='count', type=int, required=False, default=-1,help='Number of DeAuthentication packets to send')
parser.add_argument('-ch', '--channel', dest='channel', type=int, required=True, help='Channel which AP and client are on')
args = parser.parse_args()
deauth(args.iface, args.ap, args.client, args.count, args.channel)
exit(0)
if __name__ == '__main__':
main()
| mit | -4,704,566,407,926,638,000 | 45.553191 | 143 | 0.653565 | false | 3.290226 | false | false | false |
astocko/statsmodels | statsmodels/stats/tests/results/results_panelrobust.py | 37 | 3264 | import numpy as np
cov_clu_stata = np.array([ .00025262993207,
-.00065043385106,
.20961897960949,
-.00065043385106,
.00721940994738,
-1.2171040967615,
.20961897960949,
-1.2171040967615,
417.18890043724]).reshape(3,3)
cov_pnw0_stata = np.array([ .00004638910396,
-.00006781406833,
-.00501232990882,
-.00006781406833,
.00238784043122,
-.49683062350622,
-.00501232990882,
-.49683062350622,
133.97367476797]).reshape(3,3)
cov_pnw1_stata = np.array([ .00007381482253,
-.00009936717692,
-.00613513582975,
-.00009936717692,
.00341979122583,
-.70768252183061,
-.00613513582975,
-.70768252183061,
197.31345000598]).reshape(3,3)
cov_pnw4_stata = np.array([ .0001305958131,
-.00022910455176,
.00889686530849,
-.00022910455176,
.00468152667913,
-.88403667445531,
.00889686530849,
-.88403667445531,
261.76140136858]).reshape(3,3)
cov_dk0_stata = np.array([ .00005883478135,
-.00011241470772,
-.01670183921469,
-.00011241470772,
.00140649264687,
-.29263014921586,
-.01670183921469,
-.29263014921586,
99.248049966902]).reshape(3,3)
cov_dk1_stata = np.array([ .00009855800275,
-.00018443722054,
-.03257408922788,
-.00018443722054,
.00205106413403,
-.3943459697384,
-.03257408922788,
-.3943459697384,
140.50692606398]).reshape(3,3)
cov_dk4_stata = np.array([ .00018052657317,
-.00035661054613,
-.06728261073866,
-.00035661054613,
.0024312795189,
-.32394785247278,
-.06728261073866,
-.32394785247278,
148.60456447156]).reshape(3,3)
class Bunch(dict):
def __init__(self, **kw):
dict.__init__(self, kw)
self.__dict__ = self
results = Bunch(cov_clu_stata=cov_clu_stata, cov_pnw0_stata=cov_pnw0_stata, cov_pnw1_stata=cov_pnw1_stata, cov_pnw4_stata=cov_pnw4_stata, cov_dk0_stata=cov_dk0_stata, cov_dk1_stata=cov_dk1_stata, cov_dk4_stata=cov_dk4_stata, )
| bsd-3-clause | -4,662,529,930,910,888,000 | 40.316456 | 226 | 0.401348 | false | 3.853601 | false | false | false |
InverseLina/python-practice | DataBase/Postgres/PostgresTest.py | 1 | 2205 | import psycopg2
# encoding=utf-8
__author__ = 'Hinsteny'
def get_conn():
conn = psycopg2.connect(database="hello_db", user="hinsteny", password="welcome", host="127.0.0.1", port="5432")
return conn
def create_table(conn):
cur = conn.cursor()
cur.execute('''CREATE TABLE if not exists COMPANY
(ID INT PRIMARY KEY NOT NULL,
NAME TEXT NOT NULL,
AGE INT NOT NULL,
ADDRESS CHAR(50),
SALARY REAL);''')
conn.commit()
conn.close()
def insert_data(conn):
cur = conn.cursor()
# cur.execute("INSERT INTO COMPANY (ID,NAME,AGE,ADDRESS,SALARY) \
# VALUES (1, 'Paul', 32, 'California', 20000.00 )")
#
# cur.execute("INSERT INTO COMPANY (ID,NAME,AGE,ADDRESS,SALARY) \
# VALUES (2, 'Allen', 25, 'Texas', 15000.00 )")
#
# cur.execute("INSERT INTO COMPANY (ID,NAME,AGE,ADDRESS,SALARY) \
# VALUES (3, 'Teddy', 23, 'Norway', 20000.00 )")
#
# cur.execute("INSERT INTO COMPANY (ID,NAME,AGE,ADDRESS,SALARY) \
# VALUES (4, 'Mark', 25, 'Rich-Mond ', 65000.00 )")
# conn.commit()
print("Records created successfully")
conn.close()
def select_data(conn):
'''
:param conn:
:return:
'''
cur = conn.cursor()
cur.execute("SELECT id, name, address, salary from COMPANY ORDER BY id ASC;")
rows = cur.fetchall()
for row in rows:
print("ID = ", row[0])
print("NAME = ", row[1])
print("ADDRESS = ", row[2])
print("SALARY = ", row[3], "\n")
print("Operation done successfully")
conn.close()
pass
def update_data(conn):
cur = conn.cursor()
cur.execute("UPDATE COMPANY set SALARY = 50000.00 where ID=1;")
conn.commit()
conn.close()
select_data(get_conn())
pass
def delete_data(conn):
cur = conn.cursor()
cur.execute("DELETE from COMPANY where ID=4;")
conn.commit()
conn.close()
select_data(get_conn())
pass
# Do test
if __name__ == "__main__":
create_table(get_conn())
insert_data(get_conn())
select_data(get_conn())
update_data(get_conn())
delete_data(get_conn())
pass
| apache-2.0 | -7,581,728,928,613,393,000 | 23.5 | 116 | 0.564626 | false | 3.233138 | false | false | false |
jamilatta/opac_ssm | grpc_ssm/grpc_server.py | 1 | 9839 | #!/usr/bin/env python
import time
import logging
import json
from concurrent import futures
import grpc
from grpc_ssm import opac_pb2
from grpc_health.v1 import health
from grpc_health.v1 import health_pb2
from celery.result import AsyncResult
from assets_manager import tasks
from assets_manager import models
MAX_RECEIVE_MESSAGE_LENGTH = 90 * 1024 * 1024
MAX_SEND_MESSAGE_LENGTH = 90 * 1024 * 1024
class Asset(opac_pb2.AssetServiceServicer):
def add_asset(self, request, context):
"""
Return a task id
"""
task_result = tasks.add_asset.delay(request.file, request.filename,
request.type, request.metadata,
request.bucket)
return opac_pb2.TaskId(id=task_result.id)
def get_asset(self, request, context):
"""
Return an Asset or message erro when asset doesnt exists
"""
try:
asset = models.Asset.objects.get(uuid=request.id)
except models.Asset.DoesNotExist as e:
logging.error(str(e))
context.set_details(str(e))
raise
else:
try:
fp = open(asset.file.path, 'rb')
except IOError as e:
logging.error(e)
context.set_details(e)
raise
return opac_pb2.Asset(file=fp.read(),
filename=asset.filename,
type=asset.type,
metadata=json.dumps(asset.metadata),
uuid=asset.uuid.hex,
bucket=asset.bucket.name,
checksum=asset.checksum,
absolute_url=asset.get_absolute_url,
full_absolute_url=asset.get_full_absolute_url,
created_at=asset.created_at.isoformat(),
updated_at=asset.updated_at.isoformat())
def update_asset(self, request, context):
"""
Return a task id
"""
task_result = tasks.update_asset.delay(request.uuid, request.file,
request.filename, request.type,
request.metadata, request.bucket)
return opac_pb2.TaskId(id=task_result.id)
def remove_asset(self, request, context):
"""
Return a AssetExists
"""
result = tasks.remove_asset(asset_uuid=request.id)
return opac_pb2.AssetRemoved(exist=result)
def exists_asset(self, request, context):
"""
Return a AssetExists
"""
result = tasks.exists_asset(asset_uuid=request.id)
return opac_pb2.AssetExists(exist=result)
def get_task_state(self, request, context):
"""
Return an Asset state
"""
res = AsyncResult(request.id)
return opac_pb2.TaskState(state=res.state)
def get_asset_info(self, request, context):
"""
Return an Asset info
"""
try:
asset = models.Asset.objects.get(uuid=request.id)
except models.Asset.DoesNotExist as e:
logging.error(str(e))
context.set_details(str(e))
raise
else:
return opac_pb2.AssetInfo(url=asset.get_full_absolute_url,
url_path=asset.get_absolute_url)
def get_bucket(self, request, context):
"""
Return a bucket of any asset
"""
try:
asset = models.Asset.objects.get(uuid=request.id)
except models.Asset.DoesNotExist as e:
logging.error(str(e))
context.set_details(str(e))
raise
else:
return opac_pb2.Bucket(name=asset.bucket.name)
def query(self, request, context):
"""
Return a list of assets if it exists
"""
asset_list = []
assets = opac_pb2.Assets()
filters = {}
if request.checksum:
filters['checksum'] = request.checksum
if request.filename:
filters['filename'] = request.filename
if request.type:
filters['type'] = request.type
if request.uuid:
filters['uuid'] = request.uuid
if request.bucket:
filters['bucket'] = request.bucket
result = tasks.query(filters, metadata=request.metadata)
for ret in result:
asset = opac_pb2.Asset()
asset.filename = ret.filename
asset.type = ret.type
asset.metadata = json.dumps(ret.metadata)
asset.uuid = ret.uuid.hex
asset.checksum = ret.checksum
asset.bucket = ret.bucket.name
asset.absolute_url = ret.get_absolute_url
asset.full_absolute_url = ret.get_full_absolute_url
asset.created_at = ret.created_at.isoformat()
asset.updated_at = ret.updated_at.isoformat()
asset_list.append(asset)
assets.assets.extend(asset_list)
return assets
class AssetBucket(opac_pb2.BucketServiceServicer):
def add_bucket(self, request, context):
"""
Return a task id
"""
task_result = tasks.add_bucket.delay(bucket_name=request.name)
return opac_pb2.TaskId(id=task_result.id)
def update_bucket(self, request, context):
"""
Return a task id
"""
task_result = tasks.update_bucket.delay(bucket_name=request.name,
new_name=request.new_name)
return opac_pb2.TaskId(id=task_result.id)
def remove_bucket(self, request, context):
"""
Return a BucketRemoved
"""
result = tasks.remove_bucket(bucket_name=request.name)
return opac_pb2.BucketRemoved(exist=result)
def exists_bucket(self, request, context):
"""
Return a AssetExists
"""
result = tasks.exists_bucket(bucket_name=request.name)
return opac_pb2.BucketExists(exist=result)
def get_task_state(self, request, context):
"""
Return an Asset state
"""
res = AsyncResult(request.id)
return opac_pb2.TaskState(state=res.state)
def get_assets(self, request, context):
"""
Return a list of assets
"""
asset_list = []
# Necessário retornar um objeto to tipo Assets
assets = opac_pb2.Assets()
result = models.Asset.objects.filter(bucket__name=request.name)
for ret in result:
asset = opac_pb2.Asset()
asset.file = ret.file.read()
asset.filename = ret.filename
asset.type = ret.type
asset.metadata = json.dumps(ret.metadata)
asset.uuid = ret.uuid.hex
asset.checksum = ret.checksum
asset.bucket = ret.bucket.name
asset.absolute_url = ret.get_absolute_url
asset.full_absolute_url = ret.get_full_absolute_url
asset.created_at = ret.created_at.isoformat()
asset.updated_at = ret.updated_at.isoformat()
asset_list.append(asset)
assets.assets.extend(asset_list)
return assets
def serve(host='[::]', port=5000, max_workers=4,
max_receive_message_length=MAX_RECEIVE_MESSAGE_LENGTH,
max_send_message_length=MAX_SEND_MESSAGE_LENGTH):
servicer = health.HealthServicer()
servicer.set('', health_pb2.HealthCheckResponse.SERVING)
# Asset
servicer.set('get_asset', health_pb2.HealthCheckResponse.SERVING)
servicer.set('add_asset', health_pb2.HealthCheckResponse.SERVING)
servicer.set('update_asset', health_pb2.HealthCheckResponse.SERVING)
servicer.set('remove_asset', health_pb2.HealthCheckResponse.SERVING)
servicer.set('exists_asset', health_pb2.HealthCheckResponse.SERVING)
servicer.set('get_asset_info', health_pb2.HealthCheckResponse.SERVING)
servicer.set('get_task_state', health_pb2.HealthCheckResponse.SERVING)
servicer.set('get_bucket', health_pb2.HealthCheckResponse.SERVING)
servicer.set('query', health_pb2.HealthCheckResponse.SERVING)
# Bucket
servicer.set('add_bucket', health_pb2.HealthCheckResponse.SERVING)
servicer.set('update_bucket', health_pb2.HealthCheckResponse.SERVING)
servicer.set('remove_bucket', health_pb2.HealthCheckResponse.SERVING)
servicer.set('exists_bucket', health_pb2.HealthCheckResponse.SERVING)
servicer.set('get_assets', health_pb2.HealthCheckResponse.SERVING)
options = [('grpc.max_receive_message_length', max_receive_message_length),
('grpc.max_send_message_length', max_send_message_length)]
server = grpc.server(futures.ThreadPoolExecutor(max_workers=max_workers),
options=options)
opac_pb2.add_AssetServiceServicer_to_server(Asset(), server)
opac_pb2.add_BucketServiceServicer_to_server(AssetBucket(), server)
# Health service
health_pb2.add_HealthServicer_to_server(servicer, server)
# Set port and Start Server
server.add_insecure_port('{0}:{1}'.format(host, port))
server.start()
logging.info('Started GRPC server on localhost, port: {0}, accept connections!'.format(port))
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
logging.info('User stopping server...')
server.stop(0)
logging.info('Server stopped; exiting.')
except Exception as e:
logging.info('Caught exception "%s"; stopping server...', e)
server.stop(0)
logging.info('Server stopped; exiting.')
if __name__ == '__main__':
serve()
| bsd-3-clause | -7,292,785,968,008,391,000 | 30.838188 | 97 | 0.584875 | false | 4.01551 | false | false | false |
colloquium/spacewalk | backend/server/taskomatic.py | 1 | 2010 |
"""
Module for taskomatic related functions (inserting into queues, etc)
"""
from spacewalk.server import rhnSQL
class RepodataQueueEntry(object):
def __init__(self, channel, client, reason, force=False,
bypass_filters=False):
self.channel = channel
self.client = client
self.reason = reason
self.force = force
self.bypass_filters = bypass_filters
class RepodataQueue(object):
def _boolean_as_char(boolean):
if boolean:
return 'Y'
else:
return 'N'
_boolean_as_char = staticmethod(_boolean_as_char)
def add(self, entry):
h = rhnSQL.prepare("""
insert into rhnRepoRegenQueue
(id, channel_label, client, reason, force, bypass_filters,
next_action, created, modified)
values (
sequence_nextval('rhn_repo_regen_queue_id_seq'),
:channel, :client, :reason, :force, :bypass_filters,
current_timestamp, current_timestamp, current_timestamp
)
""")
h.execute(channel=entry.channel, client=entry.client,
reason=entry.reason, force=self._boolean_as_char(entry.force),
bypass_filters=self._boolean_as_char(entry.bypass_filters))
def add_to_repodata_queue(channel, client, reason, force=False,
bypass_filters=False):
if reason == '':
reason = None
entry = RepodataQueueEntry(channel, client, reason, force, bypass_filters)
queue = RepodataQueue()
queue.add(entry)
# XXX not the best place for this...
def add_to_repodata_queue_for_channel_package_subscription(affected_channels,
batch, caller):
tmpreason = []
for package in batch:
tmpreason.append(package.short_str())
reason = " ".join(tmpreason)
for channel in affected_channels:
# don't want to cause an error for the db
add_to_repodata_queue(channel, caller, reason[:128])
| gpl-2.0 | -5,680,111,563,538,335,000 | 29.454545 | 78 | 0.610448 | false | 3.996024 | false | false | false |
geobricks/pgeo | pgeo/gis/raster_scatter_workers.py | 1 | 14596 | import numpy as np
from osgeo import gdal
from pgeo.gis.raster import get_nodata_value
import time
# from pylab import hexbin,show
# from scipy.ndimage import measurements
# from scipy.stats import itemfreq
# import rasterio
from pysal.esda import mapclassify
import brewer2mpl
from threading import Thread
# import Queue
from pgeo.utils.log import logger
from pgeo.error.custom_exceptions import PGeoException
from scipy.optimize import curve_fit
from itertools import izip
from multiprocessing import Process, Manager, Lock, Queue, Pool
import multiprocessing
import threading
from scipy.stats import linregress
from os import kill
log = logger("pgeo.gis.raster_scatter")
# print "here"
# cal= mapclassify.load_example()
# print cal
# ei=mapclassify.Equal_Interval(cal,k=5)
# print ei
def create_scatter(raster_path1, raster_path2, band1=1, band2=1, buckets=200, intervals=6, workers=3, forced_min1=0, forced_min2=0, color='Reds', color_type='Sequential', reverse=False):
log.info(workers)
ds1 = gdal.Open(raster_path1)
ds2 = gdal.Open(raster_path2)
rows1 = ds1.RasterYSize
cols1 = ds1.RasterXSize
rows2 = ds2.RasterYSize
cols2 = ds2.RasterXSize
log.info("Scatter Processing")
if cols1 != cols2 or rows1 != rows2:
log.error("The rasters cannot be processed because they have different dimensions")
log.error("%sx%s %sx%s" % (rows1, cols1, rows2, cols2))
raise PGeoException("The rasters cannot be processed because they have different dimensions", status_code=404)
band1 = ds1.GetRasterBand(band1)
array1 = np.array(band1.ReadAsArray()).flatten()
#array1 = np.array(band1.ReadAsArray())
nodata1 = band1.GetNoDataValue()
band2 = ds2.GetRasterBand(band2)
array2 = np.array(band2.ReadAsArray()).flatten()
#array2 = np.array(band2.ReadAsArray())
nodata2 = band2.GetNoDataValue()
# min/max calulation
(min1, max1) = band1.ComputeRasterMinMax(0)
step1 = (max1 - min1) / buckets
(min2, max2) = band2.ComputeRasterMinMax(0)
step2 = (max2 - min2) / buckets
# Calculation of the frequencies
#freqs = couples_with_freq(array1, array2, step1, step2, min1, min2, max1, max2, forced_min1, forced_min2, nodata1, nodata2)
#freqs = couples_with_freq_split(array1, array2, step1, step2, min1, min2, max1, max2, forced_min1, forced_min2, nodata1, nodata2)
statistics = couples_with_freq_multiprocess(array1, array2, step1, step2, min1, min2, max1, max2, forced_min1, forced_min2, nodata1, nodata2, workers)
#print len(freqs)
series = get_series(statistics["scatter"].values(), intervals, color, color_type, reverse)
#print series
result = dict()
# probably not useful for the chart itself
# result['min1'] = min1,
# result['min2'] = min2,
# result['max2'] = max2,
# result['step1'] = step1,
# result['step2'] = step2
result["series"] = series
result["stats"] = statistics["stats"]
# is it useful to remove them fro the memory?
del ds1
del ds2
del array1
del array2
return result
# def worker(arr1, arr2, step1, step2, out_q):
# d = dict()
# for item_a, item_b in izip(arr1, arr2):
# value1 = round(item_a / step1, 0)
# value2 = round(item_b / step2, 0)
# # print step1, step2
# # print value1, value2
# # key = str(value1) + "_" + str(value2)
# # print key
# # print item_a, item_b
# #
# #break
# key = str(value1) + "_" + str(value2)
# try:
# d[key]["freq"] += 1
# except:
# d[key] = {
# "data": [item_a, item_b],
# "freq": 1
# }
# print "worker end"
# out_q.put(d)
# out_q.close()
def worker(arr1, arr2, step1, step2, out_q):
d = dict()
try:
# TODO: move it from here: calculation of the regression coeffient
# TODO: add a boolean to check if it's need the computation of the coeffifcients
slope, intercept, r_value, p_value, std_err = linregress(arr1, arr2)
d["stats"] = {
"slope": slope,
"intercept": intercept,
"r_value": r_value,
"p_value": p_value,
"std_err": std_err
}
d["scatter"] = {}
heatmap, xedges, yedges = np.histogram2d(arr1, arr2, bins=200)
for x in range(0, len(xedges)-1):
for y in range(0, len(yedges)-1):
if heatmap[x][y] > 0:
d["scatter"][str(xedges[x]) + "_" + str(yedges[y])] = {
"data": [xedges[x], yedges[y]],
"freq": heatmap[x][y]
}
log.info("worker end")
out_q.put(d)
out_q.close()
except PGeoException, e:
log.error(e.get_message())
raise PGeoException(e.get_message(), e.get_status_code())
def couples_with_freq_multiprocess(array1, array2, step1, step2, min1, min2, max1, max2, forced_min1, forced_min2, nodata1=None, nodata2=None, workers=3, rounding=0):
print "couples_with_freq_multiprocess"
start_time = time.time()
index1 = (array1 > forced_min1) & (array1 <= max1) & (array1 != nodata1)
index2 = (array2 > forced_min2) & (array2 <= max2) & (array2 != nodata2)
# merge array indexes
compound_index = index1 & index2
del index1
del index2
# it creates two arrays from the two original arrays
arr1 = array1[compound_index]
arr2 = array2[compound_index]
print "creates two arrays from the two original arrays"
del array1
del array2
length_interval = len(arr1)/workers
length_end = length_interval
length_start = 0
out_q = Queue()
procs = []
for x in range(0, len(arr1), length_interval):
a1 = arr1[length_start:length_end]
a2 = arr2[length_start:length_end]
p = multiprocessing.Process(target=worker, args=(a1, a2, step1, step2, out_q))
procs.append(p)
p.start()
length_start = x + length_interval
length_end = length_end + length_interval
# is it useful?
del arr1
del arr2
resultdict = []
for i in range(workers):
resultdict.append(out_q.get())
# check if the process was mono core
log.info("Workers %s ", workers)
if workers <= 1:
log.info("Computation done in %s seconds ---" % str(time.time() - start_time))
for p in procs:
p.join()
log.info("Computation done in %s seconds ---" % str(time.time() - start_time))
return resultdict[0]
else:
log.info("Merging dictionaries")
# merge ditionaries
final_dict = dict()
for d in resultdict:
for key, value in d.iteritems():
try:
final_dict[key]["freq"] += d[key]["freq"]
except:
final_dict[key] = d[key]
#log.info(final_dict)
log.info("Computation done in %s seconds ---" % str(time.time() - start_time))
for p in procs:
print "-----------"
p.terminate()
try:
# TODO: check the side effects of that workaround
kill(p.pid, 9)
except:
pass
print p, p.is_alive()
log.info("Computation done in %s seconds ---" % str(time.time() - start_time))
return final_dict
class SummingThread(threading.Thread):
def __init__(self, array1, array2, step1, step2):
super(SummingThread, self).__init__()
self.array1=array1
self.array2=array2
self.step1=step1
self.step2=step2
def run(self):
self.d = dict()
log.info("lenght of: %s" , len(self.array1))
for item_a, item_b in izip(self.array1, self.array2):
value1 = round(item_a / self.step1, 2)
value2 = round(item_b / self.step2, 2)
key = str(value1) + "_" + str(value2)
try:
self.d[key]["freq"] += 1
except:
self.d[key] = {
"data": [item_a, item_b],
"freq": 1
}
def couples_with_freq_slow(array1, array2, step1, step2, min1, min2, max1, max2, rows, cols, buckets, nodata=None):
d = dict()
print "couples_with_freq"
for i in range(0, len(array1)):
if array1[i] > min1 and array2[i] > min2:
value1 = str(int(array1[i] / step1))
value2 = str(int(array2[i] / step2))
# key value
key = str(value1 + "_" + value2)
# TODO this should be a rounding, otherwise the last one wins
value = [array1[i], array2[i]]
freq = 1
if key in d:
freq = d[key]["freq"] + 1
d[key] = {
"data": value,
"freq": freq
}
return d
def couples_with_freq_split(array1, array2, step1, step2, min1, min2, max1, max2, forced_min1, forced_min2, nodata1=None, nodata2=None, rounding=0):
# TODO: the rounding should be calculated by the step interval probably
log.info("Calculating frequencies")
start_time = time.time()
d = dict()
index1 = (array1 > forced_min1) & (array1 <= max1) & (array1 != nodata1)
index2 = (array2 > forced_min2) & (array2 <= max2) & (array2 != nodata2)
# merge array indexes
compound_index = index1 & index2
# it creates two arrays from the two original arrays
arr1 = array1[compound_index]
arr2 = array2[compound_index]
for item_a, item_b in izip(arr1, arr2):
value1 = round(item_a / step1, 0)
value2 = round(item_b / step2, 0)
key = str(value1) + "_" + str(value2)
try:
d[key]["freq"] += 1
except:
d[key] = {
"data": [item_a, item_b],
"freq": 1
}
log.info("Computation done in %s seconds ---" % str(time.time() - start_time))
print len(d)
return d
def couples_with_freq(array1, array2, step1, step2, min1, min2, max1, max2, forced_min1, forced_min2, nodata1=None, nodata2=None, rounding=0):
'''
It uses instead of the other one a boolean filter that is slightly faster than the where condition
:param array1:
:param array2:
:param step1:
:param step2:
:param min1:
:param min2:
:param max1:
:param max2:
:param forced_min1:
:param forced_min2:
:param nodata1:
:param nodata2:
:param rounding:
:return:
'''
# TODO: the rounding should be calculated by the step interval probably
log.info("Calculating frequencies")
start_time = time.time()
d = dict()
index1 = (array1 > forced_min1) & (array1 <= max1) & (array1 != nodata1)
index2 = (array2 > forced_min2) & (array2 <= max2) & (array2 != nodata2)
# merge array indexes
compound_index = index1 & index2
# it creates two arrays from the two original arrays
arr1 = array1[compound_index]
arr2 = array2[compound_index]
for item_a, item_b in izip(arr1, arr2):
value1 = round(item_a / step1, 0)
value2 = round(item_b / step2, 0)
key = str(value1) + "_" + str(value2)
try:
d[key]["freq"] += 1
except:
d[key] = {
"data": [item_a, item_b],
"freq": 1
}
log.info("Computation done in %s seconds ---" % str(time.time() - start_time))
print len(d)
return d
def couples_with_freq_old(array1, array2, step1, step2, min1, min2, max1, max2, forced_min1, forced_min2, nodata1=None, nodata2=None, rounding=0):
# TODO: the rounding should be calculated by the step interval probably
log.info("Calculating frequencies")
start_time = time.time()
d = dict()
for i in np.where((array1 > forced_min1) & (array1 <= max1) & (array1 != nodata1)):
for j in np.where((array2 > forced_min2) & (array2 <= max2) & (array2 != nodata2)):
#print len(numpy.intersect1d(i, j))
for index in np.intersect1d(i, j):
val1 = array1[index]
val2 = array2[index]
#value1 = str(round(float(array1[index] / step1), 0))
#value2 = str(round(float(array2[index] / step2), 0))
value1 = int(val1 / step1)
value2 = int(val2 / step2)
key = str(value1) + "_" + str(value2)
# if key in d:
# d[key]["freq"] += 1
# else:
# d[key] = {
# "data": [value1, value2],
# "freq": 1
# }
try:
d[key]["freq"] += 1
except:
d[key] = {
"data": [val1, val2],
"freq": 1
}
# for v in d.values():
# print v
log.info("Computation done in %s seconds ---" % str(time.time() - start_time))
print len(d)
return d
# TODO: move it
def classify_values(values, k=5, classification_type="Jenks_Caspall"):
# TODO use a "switch" between the variuos classification types (move to a classification file python file instead of here)
start_time = time.time()
#result = mapclassify.quantile(values, k)
#print values
#start_time = time.time()
array = np.array(values)
result = mapclassify.Jenks_Caspall_Forced(array, k)
log.info("Classification done in %s seconds ---" % str(time.time() - start_time))
#return result
return result.bins
def get_series(values, intervals, color, color_type, reverse=False):
classification_values = []
for v in values:
classification_values.append(float(v['freq']))
classes = classify_values(classification_values, intervals)
#bmap = brewer2mpl.get_map('RdYlGn', 'Diverging', 9, reverse=True)
bmap = brewer2mpl.get_map(color, color_type, intervals+1, reverse=reverse)
colors = bmap.hex_colors
# creating series
series = []
for color in colors:
#print color
series.append({
"color": color,
"data" : []
})
#print classes
for v in values:
freq = v['freq']
for i in range(len(classes)):
if freq <= classes[i]:
series[i]['data'].append([float(v['data'][0]), float(v['data'][1])])
break
return series | gpl-2.0 | -3,450,618,132,551,992,300 | 31.7287 | 187 | 0.564744 | false | 3.369344 | false | false | false |
andrei4ka/fuel-web-redhat | fuel_agent/fuel_agent/drivers/nailgun.py | 1 | 17344 | # Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from fuel_agent.drivers import ks_spaces_validator
from fuel_agent import errors
from fuel_agent import objects
from fuel_agent.openstack.common import log as logging
from fuel_agent.utils import hardware_utils as hu
LOG = logging.getLogger(__name__)
def match_device(hu_disk, ks_disk):
"""Tries to figure out if hu_disk got from hu.list_block_devices
and ks_spaces_disk given correspond to the same disk device. This
is the simplified version of hu.match_device
:param hu_disk: A dict representing disk device how
it is given by list_block_devices method.
:param ks_disk: A dict representing disk device according to
ks_spaces format.
:returns: True if hu_disk matches ks_spaces_disk else False.
"""
uspec = hu_disk['uspec']
# True if at least one by-id link matches ks_disk
if ('DEVLINKS' in uspec and len(ks_disk.get('extra', [])) > 0
and any(x.startswith('/dev/disk/by-id') for x in
set(uspec['DEVLINKS']) &
set(['/dev/%s' % l for l in ks_disk['extra']]))):
return True
# True if one of DEVLINKS matches ks_disk id
if (len(ks_disk.get('extra', [])) == 0
and 'DEVLINKS' in uspec and 'id' in ks_disk
and '/dev/%s' % ks_disk['id'] in uspec['DEVLINKS']):
return True
return False
class Nailgun(object):
def __init__(self, data):
# Here data is expected to be raw provisioning data
# how it is given by nailgun
self.data = data
def partition_data(self):
return self.data['ks_meta']['pm_data']['ks_spaces']
@property
def ks_disks(self):
disk_filter = lambda x: x['type'] == 'disk' and x['size'] > 0
return filter(disk_filter, self.partition_data())
@property
def ks_vgs(self):
vg_filter = lambda x: x['type'] == 'vg'
return filter(vg_filter, self.partition_data())
@property
def hu_disks(self):
"""Actual disks which are available on this node
it is a list of dicts which are formatted other way than
ks_spaces disks. To match both of those formats use
_match_device method.
"""
if not getattr(self, '_hu_disks', None):
self._hu_disks = hu.list_block_devices(disks=True)
return self._hu_disks
def _disk_dev(self, ks_disk):
# first we try to find a device that matches ks_disk
# comparing by-id and by-path links
matched = [hu_disk['device'] for hu_disk in self.hu_disks
if match_device(hu_disk, ks_disk)]
# if we can not find a device by its by-id and by-path links
# we try to find a device by its name
fallback = [hu_disk['device'] for hu_disk in self.hu_disks
if '/dev/%s' % ks_disk['name'] == hu_disk['device']]
found = matched or fallback
if not found or len(found) > 1:
raise errors.DiskNotFoundError(
'Disk not found: %s' % ks_disk['name'])
return found[0]
def _getlabel(self, label):
if not label:
return ''
# XFS will refuse to format a partition if the
# disk label is > 12 characters.
return ' -L {0} '.format(label[:12])
def _get_partition_count(self, name):
count = 0
for disk in self.ks_disks:
count += len([v for v in disk["volumes"]
if v.get('name') == name and v['size'] > 0])
return count
def _num_ceph_journals(self):
return self._get_partition_count('cephjournal')
def _num_ceph_osds(self):
return self._get_partition_count('ceph')
def partition_scheme(self):
LOG.debug('--- Preparing partition scheme ---')
data = self.partition_data()
ks_spaces_validator.validate(data)
partition_scheme = objects.PartitionScheme()
ceph_osds = self._num_ceph_osds()
journals_left = ceph_osds
ceph_journals = self._num_ceph_journals()
LOG.debug('Looping over all disks in provision data')
for disk in self.ks_disks:
LOG.debug('Processing disk %s' % disk['name'])
LOG.debug('Adding gpt table on disk %s' % disk['name'])
parted = partition_scheme.add_parted(
name=self._disk_dev(disk), label='gpt')
# we install bootloader on every disk
LOG.debug('Adding bootloader stage0 on disk %s' % disk['name'])
parted.install_bootloader = True
# legacy boot partition
LOG.debug('Adding bios_grub partition on disk %s: size=24' %
disk['name'])
parted.add_partition(size=24, flags=['bios_grub'])
# uefi partition (for future use)
LOG.debug('Adding UEFI partition on disk %s: size=200' %
disk['name'])
parted.add_partition(size=200)
LOG.debug('Looping over all volumes on disk %s' % disk['name'])
for volume in disk['volumes']:
LOG.debug('Processing volume: '
'name=%s type=%s size=%s mount=%s vg=%s' %
(volume.get('name'), volume.get('type'),
volume.get('size'), volume.get('mount'),
volume.get('vg')))
if volume['size'] <= 0:
LOG.debug('Volume size is zero. Skipping.')
continue
if volume.get('name') == 'cephjournal':
LOG.debug('Volume seems to be a CEPH journal volume. '
'Special procedure is supposed to be applied.')
# We need to allocate a journal partition for each ceph OSD
# Determine the number of journal partitions we need on
# each device
ratio = math.ceil(float(ceph_osds) / ceph_journals)
# No more than 10GB will be allocated to a single journal
# partition
size = volume["size"] / ratio
if size > 10240:
size = 10240
# This will attempt to evenly spread partitions across
# multiple devices e.g. 5 osds with 2 journal devices will
# create 3 partitions on the first device and 2 on the
# second
if ratio < journals_left:
end = ratio
else:
end = journals_left
for i in range(0, end):
journals_left -= 1
if volume['type'] == 'partition':
LOG.debug('Adding CEPH journal partition on '
'disk %s: size=%s' %
(disk['name'], size))
prt = parted.add_partition(size=size)
LOG.debug('Partition name: %s' % prt.name)
if 'partition_guid' in volume:
LOG.debug('Setting partition GUID: %s' %
volume['partition_guid'])
prt.set_guid(volume['partition_guid'])
continue
if volume['type'] in ('partition', 'pv', 'raid'):
LOG.debug('Adding partition on disk %s: size=%s' %
(disk['name'], volume['size']))
prt = parted.add_partition(size=volume['size'])
LOG.debug('Partition name: %s' % prt.name)
if volume['type'] == 'partition':
if 'partition_guid' in volume:
LOG.debug('Setting partition GUID: %s' %
volume['partition_guid'])
prt.set_guid(volume['partition_guid'])
if 'mount' in volume and volume['mount'] != 'none':
LOG.debug('Adding file system on partition: '
'mount=%s type=%s' %
(volume['mount'],
volume.get('file_system', 'xfs')))
partition_scheme.add_fs(
device=prt.name, mount=volume['mount'],
fs_type=volume.get('file_system', 'xfs'),
fs_label=self._getlabel(volume.get('disk_label')))
if volume['type'] == 'pv':
LOG.debug('Creating pv on partition: pv=%s vg=%s' %
(prt.name, volume['vg']))
lvm_meta_size = volume.get('lvm_meta_size', 64)
# The reason for that is to make sure that
# there will be enough space for creating logical volumes.
# Default lvm extension size is 4M. Nailgun volume
# manager does not care of it and if physical volume size
# is 4M * N + 3M and lvm metadata size is 4M * L then only
# 4M * (N-L) + 3M of space will be available for
# creating logical extensions. So only 4M * (N-L) of space
# will be available for logical volumes, while nailgun
# volume manager might reguire 4M * (N-L) + 3M
# logical volume. Besides, parted aligns partitions
# according to its own algorithm and actual partition might
# be a bit smaller than integer number of mebibytes.
if lvm_meta_size < 10:
raise errors.WrongPartitionSchemeError(
'Error while creating physical volume: '
'lvm metadata size is too small')
metadatasize = int(math.floor((lvm_meta_size - 8) / 2))
metadatacopies = 2
partition_scheme.vg_attach_by_name(
pvname=prt.name, vgname=volume['vg'],
metadatasize=metadatasize,
metadatacopies=metadatacopies)
if volume['type'] == 'raid':
if 'mount' in volume and volume['mount'] != 'none':
LOG.debug('Attaching partition to RAID '
'by its mount point %s' % volume['mount'])
partition_scheme.md_attach_by_mount(
device=prt.name, mount=volume['mount'],
fs_type=volume.get('file_system', 'xfs'),
fs_label=self._getlabel(volume.get('disk_label')))
# this partition will be used to put there configdrive image
if partition_scheme.configdrive_device() is None:
LOG.debug('Adding configdrive partition on disk %s: size=20' %
disk['name'])
parted.add_partition(size=20, configdrive=True)
LOG.debug('Looping over all volume groups in provision data')
for vg in self.ks_vgs:
LOG.debug('Processing vg %s' % vg['id'])
LOG.debug('Looping over all logical volumes in vg %s' % vg['id'])
for volume in vg['volumes']:
LOG.debug('Processing lv %s' % volume['name'])
if volume['size'] <= 0:
LOG.debug('Lv size is zero. Skipping.')
continue
if volume['type'] == 'lv':
LOG.debug('Adding lv to vg %s: name=%s, size=%s' %
(vg['id'], volume['name'], volume['size']))
lv = partition_scheme.add_lv(name=volume['name'],
vgname=vg['id'],
size=volume['size'])
if 'mount' in volume and volume['mount'] != 'none':
LOG.debug('Adding file system on lv: '
'mount=%s type=%s' %
(volume['mount'],
volume.get('file_system', 'xfs')))
partition_scheme.add_fs(
device=lv.device_name, mount=volume['mount'],
fs_type=volume.get('file_system', 'xfs'),
fs_label=self._getlabel(volume.get('disk_label')))
LOG.debug('Appending kernel parameters: %s' %
self.data['ks_meta']['pm_data']['kernel_params'])
partition_scheme.append_kernel_params(
self.data['ks_meta']['pm_data']['kernel_params'])
return partition_scheme
def configdrive_scheme(self):
LOG.debug('--- Preparing configdrive scheme ---')
data = self.data
configdrive_scheme = objects.ConfigDriveScheme()
LOG.debug('Adding common parameters')
admin_interface = filter(
lambda x: (x['mac_address'] ==
data['kernel_options']['netcfg/choose_interface']),
[dict(name=name, **spec) for name, spec
in data['interfaces'].iteritems()])[0]
ssh_auth_keys = data['ks_meta']['authorized_keys']
if data['ks_meta']['auth_key']:
ssh_auth_keys.append(data['ks_meta']['auth_key'])
configdrive_scheme.set_common(
ssh_auth_keys=ssh_auth_keys,
hostname=data['hostname'],
fqdn=data['hostname'],
name_servers=data['name_servers'],
search_domain=data['name_servers_search'],
master_ip=data['ks_meta']['master_ip'],
master_url='http://%s:8000/api' % data['ks_meta']['master_ip'],
udevrules=data['kernel_options']['udevrules'],
admin_mac=data['kernel_options']['netcfg/choose_interface'],
admin_ip=admin_interface['ip_address'],
admin_mask=admin_interface['netmask'],
admin_iface_name=admin_interface['name'],
timezone=data['ks_meta'].get('timezone', 'America/Los_Angeles'),
ks_repos=dict(map(lambda x: x.strip('"').strip("'"),
item.split('=')) for item in
data['ks_meta']['repo_metadata'].split(','))
)
LOG.debug('Adding puppet parameters')
configdrive_scheme.set_puppet(
master=data['ks_meta']['puppet_master'],
enable=data['ks_meta']['puppet_enable']
)
LOG.debug('Adding mcollective parameters')
configdrive_scheme.set_mcollective(
pskey=data['ks_meta']['mco_pskey'],
vhost=data['ks_meta']['mco_vhost'],
host=data['ks_meta']['mco_host'],
user=data['ks_meta']['mco_user'],
password=data['ks_meta']['mco_password'],
connector=data['ks_meta']['mco_connector'],
enable=data['ks_meta']['mco_enable']
)
LOG.debug('Setting configdrive profile %s' % data['profile'])
configdrive_scheme.set_profile(profile=data['profile'])
return configdrive_scheme
def image_scheme(self, partition_scheme):
LOG.debug('--- Preparing image scheme ---')
data = self.data
image_scheme = objects.ImageScheme()
# We assume for every file system user may provide a separate
# file system image. For example if partitioning scheme has
# /, /boot, /var/lib file systems then we will try to get images
# for all those mount points. Images data are to be defined
# at provision.json -> ['ks_meta']['image_data']
LOG.debug('Looping over all file systems in partition scheme')
for fs in partition_scheme.fss:
LOG.debug('Processing fs %s' % fs.mount)
if fs.mount not in data['ks_meta']['image_data']:
LOG.debug('There is no image for fs %s. Skipping.' % fs.mount)
continue
image_data = data['ks_meta']['image_data'][fs.mount]
LOG.debug('Adding image for fs %s: uri=%s format=%s container=%s' %
(fs.mount, image_data['uri'],
image_data['format'], image_data['container']))
image_scheme.add_image(
uri=image_data['uri'],
target_device=fs.device,
# In the future we will get format and container
# from provision.json, but currently it is hard coded.
format=image_data['format'],
container=image_data['container'],
)
return image_scheme
| apache-2.0 | -1,639,415,933,257,223,700 | 45.005305 | 79 | 0.520699 | false | 4.327345 | true | false | false |
umitproject/openmonitor-aggregator | model_utils/choices.py | 1 | 2893 | class Choices(object):
"""
A class to encapsulate handy functionality for lists of choices
for a Django model field.
Each argument to ``Choices`` is a choice, represented as either a
string, a two-tuple, or a three-tuple.
If a single string is provided, that string is used as the
database representation of the choice as well as the
human-readable presentation.
If a two-tuple is provided, the first item is used as the database
representation and the second the human-readable presentation.
If a triple is provided, the first item is the database
representation, the second a valid Python identifier that can be
used as a readable label in code, and the third the human-readable
presentation. This is most useful when the database representation
must sacrifice readability for some reason: to achieve a specific
ordering, to use an integer rather than a character field, etc.
Regardless of what representation of each choice is originally
given, when iterated over or indexed into, a ``Choices`` object
behaves as the standard Django choices list of two-tuples.
If the triple form is used, the Python identifier names can be
accessed as attributes on the ``Choices`` object, returning the
database representation. (If the single or two-tuple forms are
used and the database representation happens to be a valid Python
identifier, the database representation itself is available as an
attribute on the ``Choices`` object, returning itself.)
"""
def __init__(self, *choices):
self._full = []
self._choices = []
self._choice_dict = {}
for choice in self.equalize(choices):
self._full.append(choice)
self._choices.append((choice[0], choice[2]))
self._choice_dict[choice[1]] = choice[0]
def equalize(self, choices):
for choice in choices:
if isinstance(choice, (list, tuple)):
if len(choice) == 3:
yield choice
elif len(choice) == 2:
yield (choice[0], choice[0], choice[1])
else:
raise ValueError("Choices can't handle a list/tuple of length %s, only 2 or 3"
% len(choice))
else:
yield (choice, choice, choice)
def __len__(self):
return len(self._choices)
def __iter__(self):
return iter(self._choices)
def __getattr__(self, attname):
try:
return self._choice_dict[attname]
except KeyError:
raise AttributeError(attname)
def __getitem__(self, index):
return self._choices[index]
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__,
', '.join(("%s" % str(i) for i in self._full)))
| agpl-3.0 | 433,746,921,144,594,300 | 37.573333 | 98 | 0.620809 | false | 4.606688 | false | false | false |
MathieuLeocmach/colloids | python/colloids/povray.py | 1 | 9924 | ## {{{ http://code.activestate.com/recipes/205451/ (r1)
import sys, os
import numpy as np
import networkx as nx
#from pygraph.classes.graph import graph
#from pygraph.algorithms.accessibility import connected_components
class File:
def __init__(self,fnam="out.pov",*items):
self.file = open(fnam,"w")
self.__indent = 0
self.write(*items)
def include(self,name):
self.writeln( '#include "%s"'%name )
self.writeln()
def indent(self):
self.__indent += 1
def dedent(self):
self.__indent -= 1
assert self.__indent >= 0
def block_begin(self):
self.writeln( "{" )
self.indent()
def block_end(self):
self.dedent()
self.writeln( "}" )
if self.__indent == 0:
# blank line if this is a top level end
self.writeln( )
def write(self,*items):
for item in items:
if type(item) == str:
self.include(item)
else:
item.write(self)
def writeln(self,s=""):
#print " "*self.__indent+s
self.file.write(" "*self.__indent+s+os.linesep)
class Vector:
def __init__(self,*args):
if len(args) == 1:
self.v = args[0]
else:
self.v = args
def __str__(self):
return "<%s>"%(", ".join([str(x)for x in self.v]))
def __repr__(self):
return "Vector(%s)"%self.v
def __mul__(self,other):
return Vector( [r*other for r in self.v] )
def __rmul__(self,other):
return Vector( [r*other for r in self.v] )
class Item:
def __init__(self,name,args=[],opts=[],**kwargs):
self.name = name
args=list(args)
for i in range(len(args)):
if type(args[i]) == tuple or type(args[i]) == list:
args[i] = Vector(args[i])
self.args = args
self.opts = opts
self.kwargs=kwargs
def append(self, item):
self.opts.append( item )
def write(self, file):
file.writeln( self.name )
file.block_begin()
if self.args:
file.writeln( ", ".join([str(arg) for arg in self.args]) )
for opt in self.opts:
if hasattr(opt,"write"):
opt.write(file)
else:
file.writeln( str(opt) )
for key,val in list(self.kwargs.items()):
if type(val)==tuple or type(val)==list:
val = Vector(*val)
file.writeln( "%s %s"%(key,val) )
else:
file.writeln( "%s %s"%(key,val) )
file.block_end()
def __setattr__(self,name,val):
self.__dict__[name]=val
if name not in ["kwargs","args","opts","name"]:
self.__dict__["kwargs"][name]=val
def __setitem__(self,i,val):
if i < len(self.args):
self.args[i] = val
else:
i += len(args)
if i < len(self.opts):
self.opts[i] = val
def __getitem__(self,i,val):
if i < len(self.args):
return self.args[i]
else:
i += len(args)
if i < len(self.opts):
return self.opts[i]
class Texture(Item):
def __init__(self,*opts,**kwargs):
Item.__init__(self,"texture",(),opts,**kwargs)
class Pigment(Item):
def __init__(self,*opts,**kwargs):
Item.__init__(self,"pigment",(),opts,**kwargs)
class Finish(Item):
def __init__(self,*opts,**kwargs):
Item.__init__(self,"finish",(),opts,**kwargs)
class Normal(Item):
def __init__(self,*opts,**kwargs):
Item.__init__(self,"normal",(),opts,**kwargs)
class Camera(Item):
def __init__(self,*opts,**kwargs):
Item.__init__(self,"camera",(),opts,**kwargs)
class LightSource(Item):
def __init__(self,v,*opts,**kwargs):
Item.__init__(self,"light_source",(Vector(v),),opts,**kwargs)
class Background(Item):
def __init__(self,*opts,**kwargs):
Item.__init__(self,"background",(),opts,**kwargs)
class Box(Item):
def __init__(self,v1,v2,*opts,**kwargs):
#self.v1 = Vector(v1)
#self.v2 = Vector(v2)
Item.__init__(self,"box",(v1,v2),opts,**kwargs)
class Cylinder(Item):
def __init__(self,v1,v2,r,*opts,**kwargs):
" opts: open "
Item.__init__(self,"cylinder",(v1,v2,r),opts,**kwargs)
class Plane(Item):
def __init__(self,v,r,*opts,**kwargs):
Item.__init__(self,"plane",(v,r),opts,**kwargs)
class Torus(Item):
def __init__(self,r1,r2,*opts,**kwargs):
Item.__init__(self,"torus",(r1,r2),opts,**kwargs)
class Cone(Item):
def __init__(self,v1,r1,v2,r2,*opts,**kwargs):
" opts: open "
Item.__init__(self,"cone", (v1,r1,v2,r2),opts,**kwargs)
class Sphere(Item):
def __init__(self,v,r,*opts,**kwargs):
Item.__init__(self,"sphere",(v,r),opts,**kwargs)
class Union(Item):
def __init__(self,*opts,**kwargs):
Item.__init__(self,"union",(),opts,**kwargs)
class Intersection(Item):
def __init__(self,*opts,**kwargs):
Item.__init__(self,"intersection",(),opts,**kwargs)
class Difference(Item):
def __init__(self,*opts,**kwargs):
Item.__init__(self,"difference",(),opts,**kwargs)
class Merge(Item):
def __init__(self,*opts,**kwargs):
Item.__init__(self,"merge",(),opts,**kwargs)
class Mesh2(Item):
class VertexVectors(Item):
def __init__(self,vertex,*opts,**kwargs):
Item.__init__(self, "vertex_vectors", (len(vertex), *map(Vector, vertex)), opts,**kwargs)
class FaceIndices(Item):
def __init__(self,faces,*opts,**kwargs):
Item.__init__(self, "face_indices", (len(faces), *map(Vector, faces)), opts,**kwargs)
class VertexNormals(Item):
def __init__(self,faces,*opts, **kwargs):
Item.__init__(self, "normal_vectors", (len(faces), *map(Vector, faces)), opts,**kwargs)
def __init__(self,vertex,faces,*opts, normals=None,**kwargs):
if normals is None:
Item.__init__(self, "mesh2", (), (self.VertexVectors(vertex), self.FaceIndices(faces), *opts), **kwargs)
else:
Item.__init__(self, "mesh2", (), (self.VertexVectors(vertex), self.VertexNormals(normals), self.FaceIndices(faces), *opts), **kwargs)
x = Vector(1,0,0)
y = Vector(0,1,0)
z = Vector(0,0,1)
white = Texture(Pigment(color=(1,1,1)))
def tutorial31():
" from the povray tutorial sec. 3.1"
file=File("demo.pov","colors.inc","stones.inc")
cam = Camera(location=(0,2,-3),look_at=(0,1,2))
sphere = Sphere( (0,1,2), 2, Texture(Pigment(color="Yellow")))
light = LightSource( (2,4,-3), color="White")
file.write( cam, sphere, light )
def spiral():
" Fibonacci spiral "
gamma = (sqrt(5)-1)/2
file = File()
Camera(location=(0,0,-128), look_at=(0,0,0)).write(file)
LightSource((100,100,-100), color=(1,1,1)).write(file)
LightSource((150,150,-100), color=(0,0,0.3)).write(file)
LightSource((-150,150,-100), color=(0,0.3,0)).write(file)
LightSource((150,-150,-100), color=(0.3,0,0)).write(file)
theta = 0.0
for i in range(200):
r = i * 0.5
color = 1,1,1
v = [ r*sin(theta), r*cos(theta), 0 ]
Sphere( v, 0.7*sqrt(i),
Texture(
Finish(
ambient = 0.0,
diffuse = 0.0,
reflection = 0.85,
specular = 1
),
Pigment(color=color))
).write(file)
theta += gamma * 2 * pi
## end of http://code.activestate.com/recipes/205451/ }}}
def exportPOV(
path = '/mnt/htw20/Documents/data/retrack/go/1/',
head = 'J1_thr0_radMin3.1_radMax0_6min',
tail = '_t000',
out = '/home/mathieu/Documents/Thesis/data/go/1/mrco_ico.pov',
ico_thr = -0.027,
zmin = 100,
zmax = 175,
header = 'go1.inc',
polydisperse = False
):
if polydisperse:
positions = np.load(path+head+tail+'.npy')
radii = positions[:,-2]*np.sqrt(2)
positions = positions[:,:-2]
else:
positions = np.loadtxt(path+head+tail+'.dat', skiprows=2)
Q6 = np.loadtxt(path+head+'_space'+tail+'.cloud', usecols=[1])
bonds = np.loadtxt(path+head+tail+'.bonds', dtype=int)
q6, w6 = np.loadtxt(path+head+tail+'.cloud', usecols=[1,5], unpack=True)
u6 = ((2*6+1)/(4.0*np.pi))**1.5 * w6 * q6**3
ico_bonds = np.bitwise_and(
u6[bonds].min(axis=-1)<ico_thr,
np.bitwise_and(
positions[:,-1][bonds].min(axis=-1)<zmax,
positions[:,-1][bonds].max(axis=-1)>zmin
)
)
ico = np.unique(bonds[ico_bonds])
mrco = np.unique(bonds[np.bitwise_and(
Q6[bonds].max(axis=-1)>0.25,
np.bitwise_and(
positions[:,-1][bonds].min(axis=-1)<zmax,
positions[:,-1][bonds].max(axis=-1)>zmin
)
)])
gr = nx.Graph()
gr.add_nodes(ico)
for a,b in bonds[ico_bonds]:
gr.add_edge((a,b))
try:
cc = nx.connected_components(gr)
except RuntimeError:
print("Graph is too large for ico_thr=%g, lower the threshold."%ico_thr)
return
#remove clusters than contain less than 10 particles
## sizes = np.zeros(max(cc.values()), int)
## for p,cl in cc.iteritems():
## sizes[cl-1] +=1
## cc2 = dict()
## for p,cl in cc.iteritems():
## if sizes[cl-1]>9:
## cc2[p] = cl
## cc =cc2
if polydisperse:
pov_mrco = [
Sphere((x,y,z), r)
for x,y,z,r in np.column_stack((positions,radii))[np.setdiff1d(mrco, ico)]
]
else:
pov_mrco = [
Sphere((x,y,z), 6)
for x,y,z in positions[np.setdiff1d(mrco, ico)]
]
pov_mrco = Union(*pov_mrco + [Texture(Pigment(color="Green"))])
if polydisperse:
pov_ico = [
Sphere(
tuple(positions[p].tolist()),
radii[p],
Texture(Pigment(color="COLORSCALE(%f)"%(cl*120.0/max(cc.values()))))
)
for p, cl in cc.items()]
else:
pov_ico = [
Sphere(
tuple(positions[p].tolist()),
6,
Texture(Pigment(color="COLORSCALE(%f)"%(cl*120.0/max(cc.values()))))
)
for p, cl in cc.items()]
pov_ico = Union(*pov_ico)
f = File(out, "colors.inc", header)
f.write(pov_mrco, pov_ico)
f.file.flush()
| gpl-3.0 | 7,666,974,326,671,625,000 | 29.819876 | 139 | 0.556429 | false | 3.027456 | false | false | false |
masteroncluster/py-censure | censure/base.py | 1 | 15444 | # -*- coding: utf-8 -*-
# Author: [email protected]
# Py-Censure is an obscene words detector/replacer for Russian / English languages
# Russian patterns are from PHP-Matotest, http://php-matotest.sourceforge.net/,
# that was written by Scarab
# Ported to Python by Master.Cluster <[email protected]>, 2010, 2016
# English patters are adapted from http://www.noswearing.com/dictionary/
from __future__ import unicode_literals, print_function
import re
from copy import deepcopy
from importlib import import_module
from .lang.common import patterns, constants
def _get_token_value(token):
return token.value
def _get_remained_tokens(tags_list):
if not tags_list:
return '', '' # pre, post
pre = []
post = []
body_pre = []
body_post = []
word_started = word_ended = False
# <a><b>wo</b>rd<i> here</i><img><span>End</span>
while len(tags_list):
# pre and body
tag = tags_list.pop(0)
if tag.token_type == 'w':
word_started = True
if word_started:
if tag.token_type in 'to tc ts':
body_pre.append(tag)
else:
pre.append(tag)
# post
if len(tags_list):
tag = tags_list.pop(-1)
if tag.token_type == 'w':
word_ended = True
if word_ended:
if tag.token_type in 'to tc ts':
body_post.insert(0, tag)
else:
post.insert(0, tag)
body_tags = body_pre + body_post
while len(body_tags):
tag = body_tags.pop(0)
if tag.token_type == 'sp': # Do we need that tags?
continue
elif tag.token_type == 'tc':
# can find in pre or in body
open_tags = [x for x in pre if x.tag == tag.tag and x.token_type == 'to']
if len(open_tags):
pre.remove(open_tags[0])
continue
else:
# can be in body
close_tags = [x for x in body_tags if x.tag == tag.tag and x.token_type == 'tc']
if len(close_tags):
body_tags.remove(close_tags[0])
continue
# can find in post
close_tags = [x for x in post if x.tag == tag.tag and x.token_type == 'tc']
if len(close_tags):
post.remove(close_tags[0])
continue
return ''.join(map(_get_token_value, pre + body_tags)), ''.join(map(_get_token_value, post))
class Token(object):
def __init__(self, value=None, token_type=None):
head = value.split(' ', 1) # splits
if len(head) == 1:
# simple tag i.e <h1>, </i>
head = head[0][1:-1].lower() # need to cut last '>' symbol
else:
# complex tag with inner params i.e <input type=...>
head = head[0].lower()[1:]
if not token_type:
token_type = 'to' # open type ie <a...>
# should derive from value
if head[0] == '/':
head = head[1:]
token_type = 'tc' # close type ie </a>
elif value[-2] == '/':
token_type = 'ts' # self-closed type ie <img .../>
if token_type in 'to tc ts' and \
re.match(patterns.PAT_HTML_SPACE, value): # token_type != w aka word
token_type = 'sp' # this is SPACER!!!
self.value = value
self.token_type = token_type # w - word(part of), t - tag, s - spacer, o - o
self.tag = head
self.token_type = token_type
def __repr__(self):
return 'Token({}) {} {}'.format(self.value, self.tag, self.token_type) # .encode('utf-8')
class CensorException(Exception):
pass
class CensorBase:
lang = 'ru'
def __init__(self, do_compile=True):
self.lang_lib = import_module('censure.lang.{}'.format(self.lang))
if do_compile:
# patterns will be pre-compiled, so we need to copy them
def prep_var(v):
return deepcopy(v)
else:
def prep_var(v):
return v
# language-related constants data loading and preparations
self.bad_phrases = prep_var(self.lang_lib.constants.BAD_PHRASES)
self.bad_semi_phrases = prep_var(self.lang_lib.constants.BAD_SEMI_PHRASES)
self.excludes_data = prep_var(self.lang_lib.constants.EXCLUDES_DATA)
self.excludes_core = prep_var(self.lang_lib.constants.EXCLUDES_CORE)
self.foul_data = prep_var(self.lang_lib.constants.FOUL_DATA)
self.foul_core = prep_var(self.lang_lib.constants.FOUL_CORE)
self.do_compile = do_compile
if do_compile:
self._compile() # will compile patterns
def _compile(self):
"""
For testing functional and finding regexp rules, under which the word falls,
disable call for this function (in __init__) by specifying do_compile=False to __init__,
then debug, fix bad rule and then use do_compile=True again
"""
for attr in ('excludes_data', 'excludes_core',
'foul_data', 'foul_core', 'bad_semi_phrases', 'bad_phrases'):
obj = getattr(self, attr)
if isinstance(obj, dict):
for (k, v) in obj.items():
# safe cause of from __future__ import unicode_literals
if isinstance(v, "".__class__):
obj[k] = re.compile(v)
else:
obj[k] = tuple((re.compile(v[i]) for i in range(0, len(v))))
setattr(self, attr, obj)
else:
new_obj = []
for i in range(0, len(obj)):
new_obj.append(re.compile(obj[i]))
setattr(self, attr, new_obj)
def check_line(self, line):
line_info = {'is_good': True}
words = self._split_line(line)
# Checking each word in phrase line, if found any foul word,
# we think that all phrase line is bad
if words:
for word in words:
word_info = self.check_word(word)
if not word_info['is_good']:
line_info.update({
'is_good': False,
'bad_word_info': word_info
})
break
if line_info['is_good']:
phrases_info = self.check_line_bad_phrases(line)
if not phrases_info['is_good']:
line_info.update(phrases_info)
return line_info
def check_line_bad_phrases(self, line):
line_info = self._get_word_info(line)
self._check_regexps(self.bad_phrases, line_info)
line_info.pop('word') # not the word but the line
return line_info
def _split_line(self, line):
raise CensorException('Not implemented in CensorBase')
def _prepare_word(self, word):
if not self._is_pi_or_e_word(word):
word = re.sub(patterns.PAT_PUNCT3, '', word)
word = word.lower()
for pat, rep in self.lang_lib.patterns.PATTERNS_REPLACEMENTS:
word = re.sub(pat, rep, word)
# replace similar symbols from another charsets with russian chars
word = word.translate(self.lang_lib.constants.TRANS_TAB)
# deduplicate chars
word = self._remove_duplicates(word)
return word
@staticmethod
def _get_word_info(word):
return {
'is_good': True, 'word': word,
'accuse': [], 'excuse': []
}
def check_word(self, word, html=False):
word = self._prepare_word(word)
word_info = self._get_word_info(word)
# Accusing word
fl = word[:1] # first_letter
if fl in self.foul_data:
self._check_regexps(self.foul_data[fl], word_info)
if word_info['is_good']: # still good, more accuse checks
self._check_regexps(self.foul_core, word_info)
if word_info['is_good']: # still good, more accuse checks
self._check_regexps(self.bad_semi_phrases, word_info)
# Excusing word
if not word_info['is_good']:
self._check_regexps(self.excludes_core, word_info, accuse=False) # excusing
if not word_info['is_good'] and fl in self.excludes_data:
self._check_regexps(self.excludes_data[fl], word_info, accuse=False) # excusing
return word_info
@staticmethod
def _is_pi_or_e_word(word):
if '2.72' in word or '3.14' in word:
return True
return False
def clean_line(self, line, beep=constants.BEEP):
bad_words_count = 0
words = re.split(patterns.PAT_SPACE, line)
for word in words:
word_info = self.check_word(word)
if not word_info['is_good']:
bad_words_count += 1
line = line.replace(word, beep, 1)
bad_phrases_count = 0
line_info = self.check_line_bad_phrases(line)
if not line_info['is_good']:
for pat in line_info['accuse']:
line2 = re.sub(pat, beep, line)
if line2 != line:
bad_phrases_count += 1
line = line2
return line, bad_words_count, bad_phrases_count
def clean_html_line(self, line, beep=constants.BEEP_HTML):
bad_words_count = start = 0
tokens = []
for tag in re.finditer(patterns.PAT_HTML_TAG, line): # iter over tags
text = line[start:tag.start()]
# find spaces in text
spacers = re.finditer(patterns.PAT_SPACE, text)
spacer_start = 0
for spacer_tag in spacers:
word = text[spacer_start:spacer_tag.start()]
if word:
tokens.append(Token(token_type='w', value=word))
tokens.append(Token(token_type='sp', value=spacer_tag.group()))
spacer_start = spacer_tag.end()
word = text[spacer_start:]
if word:
tokens.append(Token(token_type='w', value=word))
start = tag.end()
tokens.append(Token(value=tag.group()))
word = line[start:]
# LAST prep
if word:
tokens.append(Token(token_type='w', value=word))
current_word = current_tagged_word = ''
result = ''
tagged_word_list = []
def process_spacer(cw, ctw, twl, r, bwc, tok=None):
if cw and not self.is_word_good(cw, html=True):
# Here we must find pre and post badword tags to add in result,
# ie <h1><b>BAD</b> -> <h1> must remain
pre, post = _get_remained_tokens(twl)
# bad word
r += pre + beep + post
bwc += 1
else:
# good word
r += ctw
twl = []
cw = ctw = ''
if tok:
r += tok.value
return cw, ctw, twl, r, bwc
for token in tokens:
if token.token_type in 'to tc ts':
tagged_word_list.append(token)
current_tagged_word += token.value
elif token.token_type == 'w':
tagged_word_list.append(token)
current_tagged_word += token.value
current_word += token.value
else:
# spacer here
current_word, current_tagged_word, tagged_word_list, result, bad_words_count = \
process_spacer(current_word, current_tagged_word, tagged_word_list,
result, bad_words_count, tok=token)
if current_word:
current_word, current_tagged_word, tagged_word_list, result, bad_words_count = \
process_spacer(
current_word, current_tagged_word, tagged_word_list, result, bad_words_count,
tok=None)
return result, bad_words_count
def is_word_good(self, word, html=True):
word_info = self.check_word(word, html=html)
return word_info['is_good']
def _get_rule(self, rule):
if not self.do_compile:
return rule
else:
return '{} {}'.format(
rule,
'If you want to see string-value of regexp, '
'init with do_compile=False for debug'
)
@staticmethod
def _remove_duplicates(word):
buf = prev_char = ''
count = 1 # can be <3
for char in word:
if char == prev_char:
count += 1
if count < 3:
buf += char
# else skip this char, so AAA -> AA, BBBB -> BB, but OO -> OO, and so on
else:
count = 1
buf += char
prev_char = char
return buf
def _check_regexps(self, regexps, word_info, accuse=True, break_on_first=True):
keys = None # assuming list regexps here
if isinstance(regexps, dict):
keys = regexps.keys()
regexps = regexps.values()
for i, regexp in enumerate(regexps):
if re.search(regexp, word_info['word']):
rule = regexp
if keys: # dict rule set
rule = list(keys)[i]
rule = self._get_rule(rule)
if accuse:
word_info['is_good'] = False
word_info['accuse'].append(rule)
else:
word_info['is_good'] = True
word_info['excuse'].append(rule)
if break_on_first:
break
class CensorRu(CensorBase):
lang = 'ru'
def _split_line(self, line):
buf, result = '', []
line = re.sub(patterns.PAT_PUNCT2, ' ', re.sub(patterns.PAT_PUNCT1, '', line))
for word in re.split(patterns.PAT_SPACE, line):
if len(word) < 3 and not re.match(self.lang_lib.patterns.PAT_PREP, word):
buf += word
else:
if buf:
result.append(buf)
buf = ''
result.append(word)
if buf:
result.append(buf)
return result
class CensorEn(CensorBase):
lang = 'en'
def _split_line(self, line):
# have some differences from russian split_line
buf, result = '', []
line = re.sub(patterns.PAT_PUNCT2, ' ', re.sub(patterns.PAT_PUNCT1, '', line))
for word in re.split(patterns.PAT_SPACE, line):
if len(word) < 3:
buf += word
else:
if buf:
result.append(buf)
buf = ''
result.append(word)
if buf:
result.append(buf)
return result
class Censor:
supported_langs = {
'ru': CensorRu,
'en': CensorEn,
}
@staticmethod
def get(lang='ru', do_compile=True, **kwargs):
if lang not in Censor.supported_langs:
raise CensorException(
'Language {} is not yet in supported: {}. Please contribute '
'to project to make it available'.format(
lang, sorted(Censor.supported_langs.keys())))
return Censor.supported_langs[lang](do_compile=do_compile, **kwargs)
| gpl-3.0 | 1,629,060,773,592,175,900 | 34.916279 | 98 | 0.520267 | false | 3.785294 | false | false | false |
bigswitch/nova | nova/console/manager.py | 1 | 4707 | # Copyright (c) 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Console Proxy Service."""
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_utils import importutils
from nova.compute import rpcapi as compute_rpcapi
import nova.conf
from nova import exception
from nova.i18n import _LI
from nova import manager
from nova import utils
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
class ConsoleProxyManager(manager.Manager):
"""Sets up and tears down any console proxy connections.
Needed for accessing instance consoles securely.
"""
target = messaging.Target(version='2.0')
def __init__(self, console_driver=None, *args, **kwargs):
if not console_driver:
console_driver = CONF.console_driver
self.driver = importutils.import_object(console_driver)
super(ConsoleProxyManager, self).__init__(service_name='console',
*args, **kwargs)
self.driver.host = self.host
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
def reset(self):
LOG.info(_LI('Reloading compute RPC API'))
compute_rpcapi.LAST_VERSION = None
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
def init_host(self):
self.driver.init_host()
def add_console(self, context, instance_id):
instance = self.db.instance_get(context, instance_id)
host = instance['host']
name = instance['name']
pool = self._get_pool_for_instance_host(context, host)
try:
console = self.db.console_get_by_pool_instance(context,
pool['id'],
instance['uuid'])
except exception.NotFound:
LOG.debug('Adding console', instance=instance)
password = utils.generate_password(8)
port = self.driver.get_port(context)
console_data = {'instance_name': name,
'instance_uuid': instance['uuid'],
'password': password,
'pool_id': pool['id']}
if port:
console_data['port'] = port
console = self.db.console_create(context, console_data)
self.driver.setup_console(context, console)
return console['id']
def remove_console(self, context, console_id):
try:
console = self.db.console_get(context, console_id)
except exception.NotFound:
LOG.debug('Tried to remove non-existent console '
'%(console_id)s.',
{'console_id': console_id})
return
self.db.console_delete(context, console_id)
self.driver.teardown_console(context, console)
def _get_pool_for_instance_host(self, context, instance_host):
context = context.elevated()
console_type = self.driver.console_type
try:
pool = self.db.console_pool_get_by_host_type(context,
instance_host,
self.host,
console_type)
except exception.NotFound:
# NOTE(mdragon): Right now, the only place this info exists is the
# compute worker's flagfile, at least for
# xenserver. Thus we ned to ask.
pool_info = self.compute_rpcapi.get_console_pool_info(context,
console_type, instance_host)
pool_info['password'] = self.driver.fix_pool_password(
pool_info['password'])
pool_info['host'] = self.host
pool_info['public_hostname'] = CONF.console_public_hostname
pool_info['console_type'] = self.driver.console_type
pool_info['compute_host'] = instance_host
pool = self.db.console_pool_create(context, pool_info)
return pool
| apache-2.0 | -7,606,692,687,360,827,000 | 39.577586 | 78 | 0.575313 | false | 4.491412 | false | false | false |
arbrandes/django-oscar-vat_moss | tests/test_forms.py | 1 | 4943 | # coding=utf-8
from __future__ import print_function, unicode_literals
from django.test import TestCase
from oscar.core.compat import get_user_model
from oscar_vat_moss.address.forms import UserAddressForm
from oscar_vat_moss.address.models import Country
User = get_user_model()
class UserAddressFormTest(TestCase):
def setUp(self):
self.johndoe = get_user_model().objects.create_user('johndoe')
self.hansmueller = get_user_model().objects.create_user('hansmueller')
self.uk = Country.objects.create(
iso_3166_1_a2='GB', name="UNITED KINGDOM")
self.at = Country.objects.create(
iso_3166_1_a2='AT', name="AUSTRIA")
self.de = Country.objects.create(
iso_3166_1_a2='DE', name="GERMANY")
def test_valid_address(self):
# Is a valid address identified correctly?
data = dict(
user=self.johndoe,
first_name="John",
last_name="Doe",
line1="123 No Such Street",
line4="Brighton",
postcode="BN1 6XX",
country=self.uk.iso_3166_1_a2,
phone_number='+44 1273 555 999',
)
form = UserAddressForm(self.johndoe,
data)
self.assertTrue(form.is_valid())
def test_missing_phone_number(self):
# Is a valid address identified correctly?
data = dict(
user=self.johndoe,
first_name="John",
last_name="Doe",
line1="123 No Such Street",
line4="Brighton",
postcode="BN1 6XX",
country=self.uk.iso_3166_1_a2,
)
form = UserAddressForm(self.johndoe,
data)
self.assertFalse(form.is_valid())
def test_valid_vatin(self):
# Is a valid VATIN identified correctly?
data = dict(
user=self.hansmueller,
first_name="Hans",
last_name="Müller",
line1="hastexo Professional Services GmbH",
line4="Wien",
postcode="1010",
country=self.at.iso_3166_1_a2,
vatin='ATU66688202',
phone_number='+43 1 555 9999',
)
form = UserAddressForm(self.hansmueller,
data)
self.assertTrue(form.is_valid())
def test_invalid_vatin(self):
# Is an invalid VATIN identified correctly?
data = dict(
user=self.hansmueller,
first_name="Hans",
last_name="Müller",
line1="hastexo Professional Services GmbH",
line4="Wien",
postcode="1010",
country=self.at.iso_3166_1_a2,
vatin='ATU99999999',
)
form = UserAddressForm(self.hansmueller,
data)
self.assertFalse(form.is_valid())
def test_non_matching_vatin(self):
# Is a VATIN that is correct, but doesn't match the company
# name, identified correctly?
data = dict(
user=self.hansmueller,
first_name="Hans",
last_name="Müller",
line1="Example, Inc.",
line4="Wien",
postcode="1010",
country=self.at.iso_3166_1_a2,
vatin='ATU66688202',
)
form = UserAddressForm(self.hansmueller,
data)
self.assertFalse(form.is_valid())
def test_non_matching_country_and_phone_number(self):
# Is an invalid combination of country and phone number
# identified correctly?
data = dict(
user=self.hansmueller,
first_name="Hans",
last_name="Müller",
line1="Example, Inc.",
line4="Wien",
postcode="1010",
phone_number="+49 30 1234567",
country=self.at.iso_3166_1_a2,
)
form = UserAddressForm(self.hansmueller,
data)
self.assertFalse(form.is_valid())
def test_non_matching_address_and_phone_number(self):
# Is an invalid combination of postcode and phone area code,
# where this information would be relevant for a VAT
# exception, identified correctly?
data = dict(
user=self.hansmueller,
first_name="Hans",
last_name="Müller",
line1="Example, Inc.",
# Jungholz is a VAT exception area where German, not
# Austrian, VAT rates apply
line4="Jungholz",
# Correct postcode for Jungholz
postcode="6691",
# Incorrect area code (valid number, but uses Graz area
# code)
phone_number="+43 316 1234567",
country=self.at.iso_3166_1_a2,
)
form = UserAddressForm(self.hansmueller,
data)
self.assertFalse(form.is_valid())
| bsd-3-clause | -8,278,369,489,824,878,000 | 33.531469 | 78 | 0.541515 | false | 3.76659 | true | false | false |
rbharath/vs-utils | vs_utils/utils/pdb_utils.py | 3 | 3495 | """
Handle PDB files.
"""
__author__ = "Steven Kearnes"
__copyright__ = "Copyright 2014, Stanford University"
__license__ = "BSD 3-clause"
from collections import OrderedDict
class PdbReader(object):
"""
Handle PDB files.
Also supports conversion from PDB to Amber-style PQR files.
"""
def parse_atom_record(self, line):
"""
Extract fields from a PDB ATOM or HETATM record.
See http://deposit.rcsb.org/adit/docs/pdb_atom_format.html.
Parameters
----------
line : str
PDB ATOM or HETATM line.
"""
assert line.startswith('ATOM') or line.startswith('HETATM')
fields = OrderedDict()
fields['record_name'] = line[:6]
fields['serial_number'] = int(line[6:11])
fields['atom_name'] = line[12:16]
fields['alternate_location'] = line[16]
fields['residue_name'] = line[17:20]
fields['chain'] = line[21]
fields['residue_number'] = int(line[22:26])
fields['insertion_code'] = line[26]
fields['x'] = float(line[30:38])
fields['y'] = float(line[38:46])
fields['z'] = float(line[46:54])
# parse additional fields
fields.update(self._parse_atom_record(line))
# strip extra whitespace from fields
for key in fields.keys():
try:
fields[key] = fields[key].strip()
except AttributeError:
pass
return fields
def _parse_atom_record(self, line):
"""
Parse optional fields in ATOM and HETATM records.
Parameters
----------
line : str
PDB ATOM or HETATM line.
"""
fields = OrderedDict()
try:
fields['occupancy'] = float(line[54:60])
fields['b_factor'] = float(line[60:66])
fields['segment'] = line[72:76]
fields['element'] = line[76:78]
fields['charge'] = line[78:80]
except IndexError:
pass
return fields
def pdb_to_pqr(self, pdb, charges, radii):
"""
Convert PDB to Amber-style PQR by adding charge and radius
information. See p. 68 of the Amber 14 Reference Manual.
Parameters
----------
pdb : file_like
PDB file.
charges : array_like
Atomic partial charges.
radii : array_like
Atomic radii.
"""
# only certain PDB fields are used in the Amber PQR format
pdb_fields = ['record_name', 'serial_number', 'atom_name',
'residue_name', 'chain', 'residue_number', 'x', 'y', 'z']
i = 0
pqr = ''
for line in pdb:
if line.startswith('ATOM') or line.startswith('HETATM'):
fields = self.parse_atom_record(line)
# charge and radius are added after x, y, z coordinates
pqr_fields = []
for field in pdb_fields:
value = fields[field]
if value == '':
value = '?'
pqr_fields.append(str(value))
pqr_fields.append(str(charges[i]))
pqr_fields.append(str(radii[i]))
line = ' '.join(pqr_fields) + '\n'
i += 1 # update atom count
pqr += line
# check that we covered all the atoms
assert i == len(charges) == len(radii)
return pqr
| gpl-3.0 | 635,821,456,009,302,500 | 28.618644 | 79 | 0.515308 | false | 4.008028 | false | false | false |
soarlab/gelpia | src/frontend/function_transforms/pass_lift_consts.py | 1 | 6605 |
import sys
from expression_walker import walk
from pass_utils import BINOPS, UNOPS
try:
import gelpia_logging as logging
import color_printing as color
except ModuleNotFoundError:
sys.path.append("../")
import gelpia_logging as logging
import color_printing as color
logger = logging.make_module_logger(color.cyan("lift_consts"),
logging.HIGH)
def pass_lift_consts(exp, inputs):
""" Extracts constant values from an expression """
CONST = {"Const", "ConstantInterval", "Integer", "Float", "SymbolicConst"}
NON_CONST_UNOPS = {"sinh", "cosh", "tanh", "dabs", "datanh", "floor_power2",
"sym_interval"}
consts = dict()
hashed = dict()
def make_constant(exp):
if exp[0] == "Const":
assert(exp[1] in consts)
return exp
try:
key = hashed[exp]
assert(logger("Found use of existing const {}", key))
except KeyError:
key = "$_const_{}".format(len(hashed))
assert(exp not in hashed)
hashed[exp] = key
assert(key not in consts)
consts[key] = exp
assert(logger("Lifting const {} as {}", exp, key))
return ('Const', key)
def _expand_positive_atom(work_stack, count, exp):
work_stack.append((True, count, (*exp, True)))
def _expand_negative_atom(work_stack, count, exp):
assert(len(exp) == 2)
work_stack.append((True, count, (exp[0], exp[1], False)))
my_expand_dict = dict()
my_expand_dict.update(zip(CONST, [_expand_positive_atom for _ in CONST]))
my_expand_dict["Input"] = _expand_negative_atom
def _pow(work_stack, count, args):
assert(args[0] == "pow")
assert(len(args) == 3)
l, left = args[1][-1], args[1][:-1]
r, right = args[2][-1], args[2][:-1]
op = args[0]
if right[0] != "Integer":
op = "powi"
if op == "pow":
r = False
# If both are constant don't consolidate yet
status = False
if l and r:
status = True
# Otherwise consolidate any arguments that are constant
elif l:
left = make_constant(left)
elif r:
right = make_constant(right)
work_stack.append((True, count, (op, left, right, status)))
def _two_item(work_stack, count, args):
assert(len(args) == 3)
l, left = args[1][-1], args[1][:-1]
r, right = args[2][-1], args[2][:-1]
op = args[0]
# If both are constant don't consolidate yet
status = False
if l and r:
status = True
# Otherwise consolidate any arguments that are constant
elif l:
left = make_constant(left)
elif r:
right = make_constant(right)
work_stack.append((True, count, (op, left, right, status)))
def _tuple(work_stack, count, args):
assert(args[0] == "Tuple")
assert(len(args) == 3)
l, left = args[1][-1], args[1][:-1]
if len(args[2]) == 1:
r, right = False, args[2]
else:
r, right = args[2][-1], args[2][:-1]
op = args[0]
if l:
left = make_constant(left)
if r:
right = make_constant(right)
work_stack.append((True, count, (op, left, right, False)))
def _one_item(work_stack, count, args):
assert(len(args) == 2)
a, arg = args[1][-1], args[1][:-1]
op = args[0]
work_stack.append((True, count, (op, arg, a)))
def _bad_one_item(work_stack, count, args):
assert(len(args) == 2)
a, arg = args[1][-1], args[1][:-1]
op = args[0]
if a:
arg = make_constant(arg)
work_stack.append((True, count, (op, arg, False)))
def _box(work_stack, count, args):
assert(args[0] == "Box")
box = ["Box"]
for sub in args[1:]:
p, part = sub[-1], sub[:-1]
if p:
part = make_constant(part)
box.append(part)
box.append(False)
work_stack.append((True, count, tuple(box)))
def _return(work_stack, count, args):
assert(args[0] == "Return")
assert(len(args) == 2)
r, retval = args[1][-1], args[1][:-1]
if r:
retval = make_constant(retval)
return r, ("Return", retval)
my_contract_dict = dict()
my_contract_dict.update(zip(BINOPS,
[_two_item for _ in BINOPS]))
my_contract_dict.update(zip(UNOPS,
[_one_item for _ in UNOPS]))
my_contract_dict.update(zip(NON_CONST_UNOPS,
[_bad_one_item for _ in NON_CONST_UNOPS]))
my_contract_dict["Box"] = _box
my_contract_dict["Tuple"] = _tuple
my_contract_dict["pow"] = _pow
my_contract_dict["Return"] = _return
n, new_exp = walk(my_expand_dict, my_contract_dict, exp)
assert(n in {True, False})
assert(type(new_exp) is tuple)
assert(new_exp[0] not in {True, False})
return n, new_exp, consts
def main(argv):
logging.set_log_filename(None)
logging.set_log_level(logging.HIGH)
try:
from function_to_lexed import function_to_lexed
from lexed_to_parsed import lexed_to_parsed
from pass_lift_inputs_and_inline_assigns import \
lift_inputs_and_inline_assigns
from pass_utils import get_runmain_input
from pass_simplify import simplify
from pass_reverse_diff import reverse_diff
data = get_runmain_input(argv)
logging.set_log_level(logging.NONE)
tokens = function_to_lexed(data)
tree = lexed_to_parsed(tokens)
exp, inputs = lift_inputs_and_inline_assigns(tree)
exp = simplify(exp, inputs)
d, diff_exp = reverse_diff(exp, inputs)
diff_exp = simplify(diff_exp, inputs)
logging.set_log_level(logging.HIGH)
logger("raw: \n{}\n", data)
const, exp, consts = pass_lift_consts(diff_exp, inputs)
logger("inputs:")
for name, interval in inputs.items():
logger(" {} = {}", name, interval)
logger("consts:")
for name, val in consts.items():
logger(" {} = {}", name, val)
logger("expression:")
logger(" {}", exp)
logger("is_const: {}", const)
return 0
except KeyboardInterrupt:
logger(color.green("Goodbye"))
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
| mit | -6,429,824,082,319,303,000 | 29.437788 | 80 | 0.537926 | false | 3.543455 | false | false | false |
awuth/bot-mv-telegram | modules/reddit.py | 1 | 2024 | import threading
import queue
import modules.loggers
import logging
from modules.tools import GetJson
logger = logging.getLogger(__name__)
porn_dict = dict()
lock = threading.Lock()
def Reddits(key):
global porn_dict # un poco guarrete...
if key in porn_dict.keys():
try:
lock.acquire()
content = porn_dict[key].pop()['data']['url']
logger.info('From {} len {} send {}'.format(key, len(porn_dict[key]), content))
lock.release()
return content
except IndexError as indexError: # lista vacia
logger.warning(indexError)
lock.release()
q = queue.Queue()
r = 'https://www.reddit.com'
reddits = {'asians_gif': '/r/asian_gifs/.json?limit=100', 'anal': '/r/anal/.json?limit=100',
'asianhotties': '/r/asianhotties/.json?limit=100', 'AsiansGoneWild': '/r/AsiansGoneWild/.json?limit=100',
'RealGirls': '/r/RealGirls/.json?limit=100', 'wallpapers': '/r/wallpapers/.json?limit=100',
'fitnessgirls': ['/r/JustFitnessGirls/.json?limit=100','/r/HotForFitness/.json?limit=100']}
urls = []
if key in reddits.keys():
if isinstance(reddits[key], str):
urls.append(r + reddits[key])
else:
for url in reddits[key]:
urls.append(r + url)
try:
threads = []
for url in urls:
t = threading.Thread(target=GetJson, args=(url,), kwargs=dict(queue=q), name=key)
threads.append(t)
t.start()
data = list()
for thread in threads:
thread.join()
result = q.get()
data.extend(result['data']['children'])
lock.acquire()
porn_dict[key] = data
content = porn_dict[key].pop()['data']['url']
lock.release()
logger.info('send {}'.format(content))
return content
except Exception as e:
lock.release()
logger.warning(e)
return "An error ocurred :(" + e
| gpl-3.0 | -6,717,088,257,358,481,000 | 34.508772 | 120 | 0.561759 | false | 3.550877 | false | false | false |
jcan37/startupcrate | app/views.py | 1 | 7575 | from app.models import Address, Subscription
from django.contrib.auth import authenticate, login, logout, update_session_auth_hash
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.http import HttpResponseRedirect
from django.shortcuts import render
from itertools import chain
import stripe
stripe.api_key = "sk_test_7omNc4LQGjHI7viCIxfGfIr5"
def index(request):
return render(request, 'app/index.html')
def signup(request):
if request.method == 'POST':
context = {'fname': request.POST['fname'], 'lname': request.POST['lname'], 'email': request.POST['email'],
'ship_addr': request.POST['shipaddr'], 'gift_addr': request.POST['giftaddr']}
username = request.POST['username']
password = request.POST['password']
try:
user = User.objects.create_user(username, context['email'], password)
user.first_name = context['fname']
user.last_name = context['lname']
user.full_clean()
user.save()
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
except Exception as e:
print(e.message)
context['user_exists'] = True
return render(request, 'app/signup.html', context)
shipping = Address(user=user, value=context['ship_addr'], personal=True)
shipping.save()
if len(context['gift_addr']) > 0:
gift = Address(user=user, value=context['gift_addr'])
gift.save()
return HttpResponseRedirect('/')
return render(request, 'app/signup.html')
def signin(request):
errors = {}
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
if 'next' in request.GET:
return HttpResponseRedirect(request.GET['next'])
else:
return HttpResponseRedirect('/')
else:
errors['disabled'] = True
else:
errors['invalid'] = True
return render(request, 'app/signin.html', errors)
def signout(request):
logout(request)
return HttpResponseRedirect('/signin/')
@login_required
def crate(request, plan):
context = {'plan': plan, 'personal_addr': Address.objects.get(user=request.user, personal=True),
'gift_addrs': Address.objects.filter(user=request.user, personal=False)}
if request.method == 'POST':
subscription = None
stripe_plan = 'startupcrate_monthly' if plan == '1' else 'startupcrate_quarterly'
if 'recipient' in request.POST:
recipient = request.POST['recipient']
address_id = request.POST['pastaddr']
address = None
if int(address_id) > 0:
address = Address.objects.get(pk=address_id)
if not address:
address = Address(user=request.user, value=request.POST['newaddr'])
else:
recipient = '{0} {1}'.format(request.user.first_name, request.user.last_name)
address = context['personal_addr']
try:
address.full_clean()
address.save()
subscription = Subscription(ship_address=address, recipient_name=recipient)
subscription.full_clean()
subscription.save()
customer = stripe.Customer.create(source=request.POST['stripeToken'], plan=stripe_plan,
email=request.POST['stripeEmail'])
subscription.stripe_customer = customer['id']
subscription.save()
return HttpResponseRedirect('/subscriptions/')
except Exception as e:
print(e.message)
context['invalid'] = True
if subscription:
subscription.delete()
return render(request, 'app/crate.html', context)
@login_required
def subscriptions(request):
if request.method == 'POST':
print(request.POST)
address_id = request.POST['pastaddr']
address = None
if int(address_id) > 0:
address = Address.objects.get(pk=address_id)
if not address:
address = Address(user=request.user, value=request.POST['newaddr'])
try:
subscription = Subscription.objects.get(pk=request.POST['subscription_id'])
address.full_clean()
address.save()
subscription.ship_address = address
subscription.full_clean()
subscription.save()
except Exception as e:
print(e.message)
context = {
'personal_subs': Subscription.objects.filter(
ship_address=Address.objects.filter(user=request.user, personal=True)),
'gift_addrs': Address.objects.filter(user=request.user, personal=False)
}
context['gift_subs'] = Subscription.objects.filter(ship_address__in=context['gift_addrs'])
context['subscriptions'] = list(chain(context['personal_subs'], context['gift_subs']))
return render(request, 'app/subscriptions.html', context)
@login_required
def settings(request):
context = {'ship_addr': Address.objects.get(user=request.user, personal=True)}
if request.method == 'POST':
username = request.user.get_username()
password = request.POST['password']
if authenticate(username=username, password=password) is not None:
if 'delete' in request.POST:
user = request.user
subs = Subscription.objects.filter(ship_address__in=Address.objects.filter(user=user))
for subscription in subs:
subscription.stripe_cancel()
logout(request)
user.delete()
return HttpResponseRedirect('/signup/')
request.user.first_name = request.POST['fname']
request.user.last_name = request.POST['lname']
request.user.email = request.POST['email']
new_password = request.POST['newpass']
if len(new_password) >= 8:
request.user.set_password(new_password)
try:
request.user.full_clean()
request.user.save()
update_session_auth_hash(request, request.user)
context['ship_addr'].value = request.POST['shipaddr']
context['ship_addr'].full_clean()
context['ship_addr'].save()
except Exception as e:
print(e.message)
context['invalid_fields'] = True
else:
context['changes_saved'] = True
else:
context['invalid_credentials'] = True
return render(request, 'app/settings.html', context)
@login_required
def change(request, subscription_id):
try:
subscription = Subscription.objects.get(pk=subscription_id)
subscription.stripe_change_plan()
except Exception as e:
print(e)
return HttpResponseRedirect('/subscriptions/')
@login_required
def cancel(request, subscription_id):
try:
subscription = Subscription.objects.get(pk=subscription_id)
subscription.stripe_cancel()
subscription.delete()
except Exception as e:
print(e)
return HttpResponseRedirect('/subscriptions/')
| mit | 7,088,152,949,236,364,000 | 38.659686 | 114 | 0.60198 | false | 4.363479 | false | false | false |
quantopian/zipline | tests/pipeline/test_pipeline_algo.py | 1 | 27098 | """
Tests for Algorithms using the Pipeline API.
"""
from os.path import (
dirname,
join,
realpath,
)
from nose_parameterized import parameterized
import numpy as np
from numpy import (
array,
arange,
full_like,
float64,
nan,
uint32,
)
from numpy.testing import assert_almost_equal
import pandas as pd
from pandas import (
concat,
DataFrame,
date_range,
read_csv,
Series,
Timestamp,
)
from six import iteritems, itervalues
from trading_calendars import get_calendar
from zipline.api import (
attach_pipeline,
pipeline_output,
get_datetime,
)
from zipline.errors import (
AttachPipelineAfterInitialize,
PipelineOutputDuringInitialize,
NoSuchPipeline,
DuplicatePipelineName,
)
from zipline.finance.trading import SimulationParameters
from zipline.lib.adjustment import MULTIPLY
from zipline.pipeline import Pipeline, CustomFactor
from zipline.pipeline.factors import VWAP
from zipline.pipeline.data import USEquityPricing
from zipline.pipeline.loaders.frame import DataFrameLoader
from zipline.pipeline.loaders.equity_pricing_loader import (
USEquityPricingLoader,
)
from zipline.testing import (
str_to_seconds
)
from zipline.testing import create_empty_splits_mergers_frame
from zipline.testing.fixtures import (
WithMakeAlgo,
WithAdjustmentReader,
WithBcolzEquityDailyBarReaderFromCSVs,
ZiplineTestCase,
)
from zipline.utils.pandas_utils import normalize_date
TEST_RESOURCE_PATH = join(
dirname(dirname(realpath(__file__))), # zipline_repo/tests
'resources',
'pipeline_inputs',
)
def rolling_vwap(df, length):
"Simple rolling vwap implementation for testing"
closes = df['close'].values
volumes = df['volume'].values
product = closes * volumes
out = full_like(closes, nan)
for upper_bound in range(length, len(closes) + 1):
bounds = slice(upper_bound - length, upper_bound)
out[upper_bound - 1] = product[bounds].sum() / volumes[bounds].sum()
return Series(out, index=df.index)
class ClosesAndVolumes(WithMakeAlgo, ZiplineTestCase):
START_DATE = pd.Timestamp('2014-01-01', tz='utc')
END_DATE = pd.Timestamp('2014-02-01', tz='utc')
dates = date_range(START_DATE, END_DATE, freq=get_calendar("NYSE").day,
tz='utc')
SIM_PARAMS_DATA_FREQUENCY = 'daily'
DATA_PORTAL_USE_MINUTE_DATA = False
# FIXME: This currently uses benchmark returns from the trading
# environment.
BENCHMARK_SID = None
@classmethod
def make_equity_info(cls):
cls.equity_info = ret = DataFrame.from_records([
{
'sid': 1,
'symbol': 'A',
'start_date': cls.dates[10],
'end_date': cls.dates[13],
'exchange': 'NYSE',
},
{
'sid': 2,
'symbol': 'B',
'start_date': cls.dates[11],
'end_date': cls.dates[14],
'exchange': 'NYSE',
},
{
'sid': 3,
'symbol': 'C',
'start_date': cls.dates[12],
'end_date': cls.dates[15],
'exchange': 'NYSE',
},
])
return ret
@classmethod
def make_exchanges_info(cls, *args, **kwargs):
return DataFrame({'exchange': ['NYSE'], 'country_code': ['US']})
@classmethod
def make_equity_daily_bar_data(cls, country_code, sids):
cls.closes = DataFrame(
{sid: arange(1, len(cls.dates) + 1) * sid for sid in sids},
index=cls.dates,
dtype=float,
)
cls.volumes = cls.closes * 1000
for sid in sids:
yield sid, DataFrame(
{
'open': cls.closes[sid].values,
'high': cls.closes[sid].values,
'low': cls.closes[sid].values,
'close': cls.closes[sid].values,
'volume': cls.volumes[sid].values,
},
index=cls.dates,
)
@classmethod
def init_class_fixtures(cls):
super(ClosesAndVolumes, cls).init_class_fixtures()
cls.first_asset_start = min(cls.equity_info.start_date)
cls.last_asset_end = max(cls.equity_info.end_date)
cls.assets = cls.asset_finder.retrieve_all(cls.asset_finder.sids)
cls.trading_day = cls.trading_calendar.day
# Add a split for 'A' on its second date.
cls.split_asset = cls.assets[0]
cls.split_date = cls.split_asset.start_date + cls.trading_day
cls.split_ratio = 0.5
cls.adjustments = DataFrame.from_records([
{
'sid': cls.split_asset.sid,
'value': cls.split_ratio,
'kind': MULTIPLY,
'start_date': Timestamp('NaT'),
'end_date': cls.split_date,
'apply_date': cls.split_date,
}
])
cls.default_sim_params = SimulationParameters(
start_session=cls.first_asset_start,
end_session=cls.last_asset_end,
trading_calendar=cls.trading_calendar,
emission_rate='daily',
data_frequency='daily',
)
def make_algo_kwargs(self, **overrides):
return self.merge_with_inherited_algo_kwargs(
ClosesAndVolumes,
suite_overrides=dict(
sim_params=self.default_sim_params,
get_pipeline_loader=lambda column: self.pipeline_close_loader,
),
method_overrides=overrides,
)
def init_instance_fixtures(self):
super(ClosesAndVolumes, self).init_instance_fixtures()
# View of the data on/after the split.
self.adj_closes = adj_closes = self.closes.copy()
adj_closes.ix[:self.split_date, self.split_asset] *= self.split_ratio
self.adj_volumes = adj_volumes = self.volumes.copy()
adj_volumes.ix[:self.split_date, self.split_asset] *= self.split_ratio
self.pipeline_close_loader = DataFrameLoader(
column=USEquityPricing.close,
baseline=self.closes,
adjustments=self.adjustments,
)
self.pipeline_volume_loader = DataFrameLoader(
column=USEquityPricing.volume,
baseline=self.volumes,
adjustments=self.adjustments,
)
def expected_close(self, date, asset):
if date < self.split_date:
lookup = self.closes
else:
lookup = self.adj_closes
return lookup.loc[date, asset]
def expected_volume(self, date, asset):
if date < self.split_date:
lookup = self.volumes
else:
lookup = self.adj_volumes
return lookup.loc[date, asset]
def exists(self, date, asset):
return asset.start_date <= date <= asset.end_date
def test_attach_pipeline_after_initialize(self):
"""
Assert that calling attach_pipeline after initialize raises correctly.
"""
def initialize(context):
pass
def late_attach(context, data):
attach_pipeline(Pipeline(), 'test')
raise AssertionError("Shouldn't make it past attach_pipeline!")
algo = self.make_algo(
initialize=initialize,
handle_data=late_attach,
)
with self.assertRaises(AttachPipelineAfterInitialize):
algo.run()
def barf(context, data):
raise AssertionError("Shouldn't make it past before_trading_start")
algo = self.make_algo(
initialize=initialize,
before_trading_start=late_attach,
handle_data=barf,
)
with self.assertRaises(AttachPipelineAfterInitialize):
algo.run()
def test_pipeline_output_after_initialize(self):
"""
Assert that calling pipeline_output after initialize raises correctly.
"""
def initialize(context):
attach_pipeline(Pipeline(), 'test')
pipeline_output('test')
raise AssertionError("Shouldn't make it past pipeline_output()")
def handle_data(context, data):
raise AssertionError("Shouldn't make it past initialize!")
def before_trading_start(context, data):
raise AssertionError("Shouldn't make it past initialize!")
algo = self.make_algo(
initialize=initialize,
handle_data=handle_data,
before_trading_start=before_trading_start,
)
with self.assertRaises(PipelineOutputDuringInitialize):
algo.run()
def test_get_output_nonexistent_pipeline(self):
"""
Assert that calling add_pipeline after initialize raises appropriately.
"""
def initialize(context):
attach_pipeline(Pipeline(), 'test')
def handle_data(context, data):
raise AssertionError("Shouldn't make it past before_trading_start")
def before_trading_start(context, data):
pipeline_output('not_test')
raise AssertionError("Shouldn't make it past pipeline_output!")
algo = self.make_algo(
initialize=initialize,
handle_data=handle_data,
before_trading_start=before_trading_start,
)
with self.assertRaises(NoSuchPipeline):
algo.run()
@parameterized.expand([('default', None),
('day', 1),
('week', 5),
('year', 252),
('all_but_one_day', 'all_but_one_day'),
('custom_iter', 'custom_iter')])
def test_assets_appear_on_correct_days(self, test_name, chunks):
"""
Assert that assets appear at correct times during a backtest, with
correctly-adjusted close price values.
"""
if chunks == 'all_but_one_day':
chunks = (
self.dates.get_loc(self.last_asset_end) -
self.dates.get_loc(self.first_asset_start)
) - 1
elif chunks == 'custom_iter':
chunks = []
st = np.random.RandomState(12345)
remaining = (
self.dates.get_loc(self.last_asset_end) -
self.dates.get_loc(self.first_asset_start)
)
while remaining > 0:
chunk = st.randint(3)
chunks.append(chunk)
remaining -= chunk
def initialize(context):
p = attach_pipeline(Pipeline(), 'test', chunks=chunks)
p.add(USEquityPricing.close.latest, 'close')
def handle_data(context, data):
results = pipeline_output('test')
date = get_datetime().normalize()
for asset in self.assets:
# Assets should appear iff they exist today and yesterday.
exists_today = self.exists(date, asset)
existed_yesterday = self.exists(date - self.trading_day, asset)
if exists_today and existed_yesterday:
latest = results.loc[asset, 'close']
self.assertEqual(latest, self.expected_close(date, asset))
else:
self.assertNotIn(asset, results.index)
before_trading_start = handle_data
algo = self.make_algo(
initialize=initialize,
handle_data=handle_data,
before_trading_start=before_trading_start,
)
# Run for a week in the middle of our data.
algo.run()
def test_multiple_pipelines(self):
"""
Test that we can attach multiple pipelines and access the correct
output based on the pipeline name.
"""
def initialize(context):
pipeline_close = attach_pipeline(Pipeline(), 'test_close')
pipeline_volume = attach_pipeline(Pipeline(), 'test_volume')
pipeline_close.add(USEquityPricing.close.latest, 'close')
pipeline_volume.add(USEquityPricing.volume.latest, 'volume')
def handle_data(context, data):
closes = pipeline_output('test_close')
volumes = pipeline_output('test_volume')
date = get_datetime().normalize()
for asset in self.assets:
# Assets should appear iff they exist today and yesterday.
exists_today = self.exists(date, asset)
existed_yesterday = self.exists(date - self.trading_day, asset)
if exists_today and existed_yesterday:
self.assertEqual(
closes.loc[asset, 'close'],
self.expected_close(date, asset)
)
self.assertEqual(
volumes.loc[asset, 'volume'],
self.expected_volume(date, asset)
)
else:
self.assertNotIn(asset, closes.index)
self.assertNotIn(asset, volumes.index)
column_to_loader = {
USEquityPricing.close: self.pipeline_close_loader,
USEquityPricing.volume: self.pipeline_volume_loader,
}
algo = self.make_algo(
initialize=initialize,
handle_data=handle_data,
get_pipeline_loader=lambda column: column_to_loader[column],
)
algo.run()
def test_duplicate_pipeline_names(self):
"""
Test that we raise an error when we try to attach a pipeline with a
name that already exists for another attached pipeline.
"""
def initialize(context):
attach_pipeline(Pipeline(), 'test')
attach_pipeline(Pipeline(), 'test')
algo = self.make_algo(initialize=initialize)
with self.assertRaises(DuplicatePipelineName):
algo.run()
class MockDailyBarSpotReader(object):
"""
A BcolzDailyBarReader which returns a constant value for spot price.
"""
def get_value(self, sid, day, column):
return 100.0
class PipelineAlgorithmTestCase(WithMakeAlgo,
WithBcolzEquityDailyBarReaderFromCSVs,
WithAdjustmentReader,
ZiplineTestCase):
AAPL = 1
MSFT = 2
BRK_A = 3
ASSET_FINDER_EQUITY_SIDS = AAPL, MSFT, BRK_A
ASSET_FINDER_EQUITY_SYMBOLS = 'AAPL', 'MSFT', 'BRK_A'
START_DATE = Timestamp('2014', tz='UTC')
END_DATE = Timestamp('2015', tz='UTC')
SIM_PARAMS_DATA_FREQUENCY = 'daily'
DATA_PORTAL_USE_MINUTE_DATA = False
# FIXME: This currently uses benchmark returns from the trading
# environment.
BENCHMARK_SID = None
ASSET_FINDER_COUNTRY_CODE = 'US'
@classmethod
def make_equity_daily_bar_data(cls, country_code, sids):
resources = {
cls.AAPL: join(TEST_RESOURCE_PATH, 'AAPL.csv'),
cls.MSFT: join(TEST_RESOURCE_PATH, 'MSFT.csv'),
cls.BRK_A: join(TEST_RESOURCE_PATH, 'BRK-A.csv'),
}
cls.raw_data = raw_data = {
asset: read_csv(path, parse_dates=['day']).set_index('day')
for asset, path in resources.items()
}
# Add 'price' column as an alias because all kinds of stuff in zipline
# depends on it being present. :/
for frame in raw_data.values():
frame['price'] = frame['close']
return resources
@classmethod
def make_splits_data(cls):
return DataFrame.from_records([
{
'effective_date': str_to_seconds('2014-06-09'),
'ratio': (1 / 7.0),
'sid': cls.AAPL,
}
])
@classmethod
def make_mergers_data(cls):
return create_empty_splits_mergers_frame()
@classmethod
def make_dividends_data(cls):
return pd.DataFrame(array([], dtype=[
('sid', uint32),
('amount', float64),
('record_date', 'datetime64[ns]'),
('ex_date', 'datetime64[ns]'),
('declared_date', 'datetime64[ns]'),
('pay_date', 'datetime64[ns]'),
]))
@classmethod
def init_class_fixtures(cls):
super(PipelineAlgorithmTestCase, cls).init_class_fixtures()
cls.pipeline_loader = USEquityPricingLoader.without_fx(
cls.bcolz_equity_daily_bar_reader,
cls.adjustment_reader,
)
cls.dates = cls.raw_data[cls.AAPL].index.tz_localize('UTC')
cls.AAPL_split_date = Timestamp("2014-06-09", tz='UTC')
cls.assets = cls.asset_finder.retrieve_all(
cls.ASSET_FINDER_EQUITY_SIDS
)
def make_algo_kwargs(self, **overrides):
return self.merge_with_inherited_algo_kwargs(
PipelineAlgorithmTestCase,
suite_overrides=dict(
get_pipeline_loader=lambda column: self.pipeline_loader,
),
method_overrides=overrides,
)
def compute_expected_vwaps(self, window_lengths):
AAPL, MSFT, BRK_A = self.AAPL, self.MSFT, self.BRK_A
# Our view of the data before AAPL's split on June 9, 2014.
raw = {k: v.copy() for k, v in iteritems(self.raw_data)}
split_date = self.AAPL_split_date
split_loc = self.dates.get_loc(split_date)
split_ratio = 7.0
# Our view of the data after AAPL's split. All prices from before June
# 9 get divided by the split ratio, and volumes get multiplied by the
# split ratio.
adj = {k: v.copy() for k, v in iteritems(self.raw_data)}
for column in 'open', 'high', 'low', 'close':
adj[AAPL].ix[:split_loc, column] /= split_ratio
adj[AAPL].ix[:split_loc, 'volume'] *= split_ratio
# length -> asset -> expected vwap
vwaps = {length: {} for length in window_lengths}
for length in window_lengths:
for asset in AAPL, MSFT, BRK_A:
raw_vwap = rolling_vwap(raw[asset], length)
adj_vwap = rolling_vwap(adj[asset], length)
# Shift computed results one day forward so that they're
# labelled by the date on which they'll be seen in the
# algorithm. (We can't show the close price for day N until day
# N + 1.)
vwaps[length][asset] = concat(
[
raw_vwap[:split_loc - 1],
adj_vwap[split_loc - 1:]
]
).shift(1, self.trading_calendar.day)
# Make sure all the expected vwaps have the same dates.
vwap_dates = vwaps[1][self.AAPL].index
for dict_ in itervalues(vwaps):
# Each value is a dict mapping sid -> expected series.
for series in itervalues(dict_):
self.assertTrue((vwap_dates == series.index).all())
# Spot check expectations near the AAPL split.
# length 1 vwap for the morning before the split should be the close
# price of the previous day.
before_split = vwaps[1][AAPL].loc[split_date -
self.trading_calendar.day]
assert_almost_equal(before_split, 647.3499, decimal=2)
assert_almost_equal(
before_split,
raw[AAPL].loc[split_date - (2 * self.trading_calendar.day),
'close'],
decimal=2,
)
# length 1 vwap for the morning of the split should be the close price
# of the previous day, **ADJUSTED FOR THE SPLIT**.
on_split = vwaps[1][AAPL].loc[split_date]
assert_almost_equal(on_split, 645.5700 / split_ratio, decimal=2)
assert_almost_equal(
on_split,
raw[AAPL].loc[split_date -
self.trading_calendar.day, 'close'] / split_ratio,
decimal=2,
)
# length 1 vwap on the day after the split should be the as-traded
# close on the split day.
after_split = vwaps[1][AAPL].loc[split_date +
self.trading_calendar.day]
assert_almost_equal(after_split, 93.69999, decimal=2)
assert_almost_equal(
after_split,
raw[AAPL].loc[split_date, 'close'],
decimal=2,
)
return vwaps
@parameterized.expand([
(True,),
(False,),
])
def test_handle_adjustment(self, set_screen):
AAPL, MSFT, BRK_A = assets = self.assets
window_lengths = [1, 2, 5, 10]
vwaps = self.compute_expected_vwaps(window_lengths)
def vwap_key(length):
return "vwap_%d" % length
def initialize(context):
pipeline = Pipeline()
context.vwaps = []
for length in vwaps:
name = vwap_key(length)
factor = VWAP(window_length=length)
context.vwaps.append(factor)
pipeline.add(factor, name=name)
filter_ = (USEquityPricing.close.latest > 300)
pipeline.add(filter_, 'filter')
if set_screen:
pipeline.set_screen(filter_)
attach_pipeline(pipeline, 'test')
def handle_data(context, data):
today = normalize_date(get_datetime())
results = pipeline_output('test')
expect_over_300 = {
AAPL: today < self.AAPL_split_date,
MSFT: False,
BRK_A: True,
}
for asset in assets:
should_pass_filter = expect_over_300[asset]
if set_screen and not should_pass_filter:
self.assertNotIn(asset, results.index)
continue
asset_results = results.loc[asset]
self.assertEqual(asset_results['filter'], should_pass_filter)
for length in vwaps:
computed = results.loc[asset, vwap_key(length)]
expected = vwaps[length][asset].loc[today]
# Only having two places of precision here is a bit
# unfortunate.
assert_almost_equal(computed, expected, decimal=2)
# Do the same checks in before_trading_start
before_trading_start = handle_data
self.run_algorithm(
initialize=initialize,
handle_data=handle_data,
before_trading_start=before_trading_start,
sim_params=SimulationParameters(
start_session=self.dates[max(window_lengths)],
end_session=self.dates[-1],
data_frequency='daily',
emission_rate='daily',
trading_calendar=self.trading_calendar,
)
)
def test_empty_pipeline(self):
# For ensuring we call before_trading_start.
count = [0]
def initialize(context):
pipeline = attach_pipeline(Pipeline(), 'test')
vwap = VWAP(window_length=10)
pipeline.add(vwap, 'vwap')
# Nothing should have prices less than 0.
pipeline.set_screen(vwap < 0)
def handle_data(context, data):
pass
def before_trading_start(context, data):
context.results = pipeline_output('test')
self.assertTrue(context.results.empty)
count[0] += 1
self.run_algorithm(
initialize=initialize,
handle_data=handle_data,
before_trading_start=before_trading_start,
sim_params=SimulationParameters(
start_session=self.dates[0],
end_session=self.dates[-1],
data_frequency='daily',
emission_rate='daily',
trading_calendar=self.trading_calendar,
)
)
self.assertTrue(count[0] > 0)
def test_pipeline_beyond_daily_bars(self):
"""
Ensure that we can run an algo with pipeline beyond the max date
of the daily bars.
"""
# For ensuring we call before_trading_start.
count = [0]
current_day = self.trading_calendar.next_session_label(
self.pipeline_loader.raw_price_reader.last_available_dt,
)
def initialize(context):
pipeline = attach_pipeline(Pipeline(), 'test')
vwap = VWAP(window_length=10)
pipeline.add(vwap, 'vwap')
# Nothing should have prices less than 0.
pipeline.set_screen(vwap < 0)
def handle_data(context, data):
pass
def before_trading_start(context, data):
context.results = pipeline_output('test')
self.assertTrue(context.results.empty)
count[0] += 1
self.run_algorithm(
initialize=initialize,
handle_data=handle_data,
before_trading_start=before_trading_start,
sim_params=SimulationParameters(
start_session=self.dates[0],
end_session=current_day,
data_frequency='daily',
emission_rate='daily',
trading_calendar=self.trading_calendar,
)
)
self.assertTrue(count[0] > 0)
class PipelineSequenceTestCase(WithMakeAlgo, ZiplineTestCase):
# run algorithm for 3 days
START_DATE = pd.Timestamp('2014-12-29', tz='utc')
END_DATE = pd.Timestamp('2014-12-31', tz='utc')
ASSET_FINDER_COUNTRY_CODE = 'US'
def get_pipeline_loader(self):
raise AssertionError("Loading terms for pipeline with no inputs")
def test_pipeline_compute_before_bts(self):
# for storing and keeping track of calls to BTS and TestFactor.compute
trace = []
class TestFactor(CustomFactor):
inputs = ()
# window_length doesn't actually matter for this test case
window_length = 1
def compute(self, today, assets, out):
trace.append("CustomFactor call")
def initialize(context):
pipeline = attach_pipeline(Pipeline(), 'my_pipeline')
test_factor = TestFactor()
pipeline.add(test_factor, 'test_factor')
def before_trading_start(context, data):
trace.append("BTS call")
pipeline_output('my_pipeline')
self.run_algorithm(
initialize=initialize,
before_trading_start=before_trading_start,
get_pipeline_loader=self.get_pipeline_loader,
)
# All pipeline computation calls should occur before any BTS calls,
# and the algorithm is being run for 3 days, so the first 3 calls
# should be to the custom factor and the next 3 calls should be to BTS
expected_result = ["CustomFactor call"] * 3 + ["BTS call"] * 3
self.assertEqual(trace, expected_result)
| apache-2.0 | -4,739,068,483,770,382,000 | 33.301266 | 79 | 0.56458 | false | 4.068158 | true | false | false |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-2.3/Lib/sre_parse.py | 3 | 24761 | #
# Secret Labs' Regular Expression Engine
#
# convert re-style regular expression to sre pattern
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
# XXX: show string offset and offending character for all errors
# this module works under 1.5.2 and later. don't use string methods
import string, sys
from sre_constants import *
SPECIAL_CHARS = ".\\[{()*+?^$|"
REPEAT_CHARS = "*+?{"
DIGITS = tuple("0123456789")
OCTDIGITS = tuple("01234567")
HEXDIGITS = tuple("0123456789abcdefABCDEF")
WHITESPACE = tuple(" \t\n\r\v\f")
ESCAPES = {
r"\a": (LITERAL, ord("\a")),
r"\b": (LITERAL, ord("\b")),
r"\f": (LITERAL, ord("\f")),
r"\n": (LITERAL, ord("\n")),
r"\r": (LITERAL, ord("\r")),
r"\t": (LITERAL, ord("\t")),
r"\v": (LITERAL, ord("\v")),
r"\\": (LITERAL, ord("\\"))
}
CATEGORIES = {
r"\A": (AT, AT_BEGINNING_STRING), # start of string
r"\b": (AT, AT_BOUNDARY),
r"\B": (AT, AT_NON_BOUNDARY),
r"\d": (IN, [(CATEGORY, CATEGORY_DIGIT)]),
r"\D": (IN, [(CATEGORY, CATEGORY_NOT_DIGIT)]),
r"\s": (IN, [(CATEGORY, CATEGORY_SPACE)]),
r"\S": (IN, [(CATEGORY, CATEGORY_NOT_SPACE)]),
r"\w": (IN, [(CATEGORY, CATEGORY_WORD)]),
r"\W": (IN, [(CATEGORY, CATEGORY_NOT_WORD)]),
r"\Z": (AT, AT_END_STRING), # end of string
}
FLAGS = {
# standard flags
"i": SRE_FLAG_IGNORECASE,
"L": SRE_FLAG_LOCALE,
"m": SRE_FLAG_MULTILINE,
"s": SRE_FLAG_DOTALL,
"x": SRE_FLAG_VERBOSE,
# extensions
"t": SRE_FLAG_TEMPLATE,
"u": SRE_FLAG_UNICODE,
}
# figure out best way to convert hex/octal numbers to integers
try:
int("10", 8)
atoi = int # 2.0 and later
except TypeError:
atoi = string.atoi # 1.5.2
class Pattern:
# master pattern object. keeps track of global attributes
def __init__(self):
self.flags = 0
self.open = []
self.groups = 1
self.groupdict = {}
def opengroup(self, name=None):
gid = self.groups
self.groups = gid + 1
if name is not None:
ogid = self.groupdict.get(name, None)
if ogid is not None:
raise error, ("redefinition of group name %s as group %d; "
"was group %d" % (repr(name), gid, ogid))
self.groupdict[name] = gid
self.open.append(gid)
return gid
def closegroup(self, gid):
self.open.remove(gid)
def checkgroup(self, gid):
return gid < self.groups and gid not in self.open
class SubPattern:
# a subpattern, in intermediate form
def __init__(self, pattern, data=None):
self.pattern = pattern
if data is None:
data = []
self.data = data
self.width = None
def dump(self, level=0):
nl = 1
for op, av in self.data:
print level*" " + op,; nl = 0
if op == "in":
# member sublanguage
print; nl = 1
for op, a in av:
print (level+1)*" " + op, a
elif op == "branch":
print; nl = 1
i = 0
for a in av[1]:
if i > 0:
print level*" " + "or"
a.dump(level+1); nl = 1
i = i + 1
elif type(av) in (type(()), type([])):
for a in av:
if isinstance(a, SubPattern):
if not nl: print
a.dump(level+1); nl = 1
else:
print a, ; nl = 0
else:
print av, ; nl = 0
if not nl: print
def __repr__(self):
return repr(self.data)
def __len__(self):
return len(self.data)
def __delitem__(self, index):
del self.data[index]
def __getitem__(self, index):
return self.data[index]
def __setitem__(self, index, code):
self.data[index] = code
def __getslice__(self, start, stop):
return SubPattern(self.pattern, self.data[start:stop])
def insert(self, index, code):
self.data.insert(index, code)
def append(self, code):
self.data.append(code)
def getwidth(self):
# determine the width (min, max) for this subpattern
if self.width:
return self.width
lo = hi = 0L
for op, av in self.data:
if op is BRANCH:
i = sys.maxint
j = 0
for av in av[1]:
l, h = av.getwidth()
i = min(i, l)
j = max(j, h)
lo = lo + i
hi = hi + j
elif op is CALL:
i, j = av.getwidth()
lo = lo + i
hi = hi + j
elif op is SUBPATTERN:
i, j = av[1].getwidth()
lo = lo + i
hi = hi + j
elif op in (MIN_REPEAT, MAX_REPEAT):
i, j = av[2].getwidth()
lo = lo + long(i) * av[0]
hi = hi + long(j) * av[1]
elif op in (ANY, RANGE, IN, LITERAL, NOT_LITERAL, CATEGORY):
lo = lo + 1
hi = hi + 1
elif op == SUCCESS:
break
self.width = int(min(lo, sys.maxint)), int(min(hi, sys.maxint))
return self.width
class Tokenizer:
def __init__(self, string):
self.string = string
self.index = 0
self.__next()
def __next(self):
if self.index >= len(self.string):
self.next = None
return
char = self.string[self.index]
if char[0] == "\\":
try:
c = self.string[self.index + 1]
except IndexError:
raise error, "bogus escape (end of line)"
char = char + c
self.index = self.index + len(char)
self.next = char
def match(self, char, skip=1):
if char == self.next:
if skip:
self.__next()
return 1
return 0
def get(self):
this = self.next
self.__next()
return this
def tell(self):
return self.index, self.next
def seek(self, index):
self.index, self.next = index
def isident(char):
return "a" <= char <= "z" or "A" <= char <= "Z" or char == "_"
def isdigit(char):
return "0" <= char <= "9"
def isname(name):
# check that group name is a valid string
if not isident(name[0]):
return False
for char in name:
if not isident(char) and not isdigit(char):
return False
return True
def _group(escape, groups):
# check if the escape string represents a valid group
try:
gid = atoi(escape[1:])
if gid and gid < groups:
return gid
except ValueError:
pass
return None # not a valid group
def _class_escape(source, escape):
# handle escape code inside character class
code = ESCAPES.get(escape)
if code:
return code
code = CATEGORIES.get(escape)
if code:
return code
try:
if escape[1:2] == "x":
# hexadecimal escape (exactly two digits)
while source.next in HEXDIGITS and len(escape) < 4:
escape = escape + source.get()
escape = escape[2:]
if len(escape) != 2:
raise error, "bogus escape: %s" % repr("\\" + escape)
return LITERAL, atoi(escape, 16) & 0xff
elif escape[1:2] in OCTDIGITS:
# octal escape (up to three digits)
while source.next in OCTDIGITS and len(escape) < 5:
escape = escape + source.get()
escape = escape[1:]
return LITERAL, atoi(escape, 8) & 0xff
if len(escape) == 2:
return LITERAL, ord(escape[1])
except ValueError:
pass
raise error, "bogus escape: %s" % repr(escape)
def _escape(source, escape, state):
# handle escape code in expression
code = CATEGORIES.get(escape)
if code:
return code
code = ESCAPES.get(escape)
if code:
return code
try:
if escape[1:2] == "x":
# hexadecimal escape
while source.next in HEXDIGITS and len(escape) < 4:
escape = escape + source.get()
if len(escape) != 4:
raise ValueError
return LITERAL, atoi(escape[2:], 16) & 0xff
elif escape[1:2] == "0":
# octal escape
while source.next in OCTDIGITS and len(escape) < 4:
escape = escape + source.get()
return LITERAL, atoi(escape[1:], 8) & 0xff
elif escape[1:2] in DIGITS:
# octal escape *or* decimal group reference (sigh)
if source.next in DIGITS:
escape = escape + source.get()
if (escape[1] in OCTDIGITS and escape[2] in OCTDIGITS and
source.next in OCTDIGITS):
# got three octal digits; this is an octal escape
escape = escape + source.get()
return LITERAL, atoi(escape[1:], 8) & 0xff
# got at least one decimal digit; this is a group reference
group = _group(escape, state.groups)
if group:
if not state.checkgroup(group):
raise error, "cannot refer to open group"
return GROUPREF, group
raise ValueError
if len(escape) == 2:
return LITERAL, ord(escape[1])
except ValueError:
pass
raise error, "bogus escape: %s" % repr(escape)
def _parse_sub(source, state, nested=1):
# parse an alternation: a|b|c
items = []
while 1:
items.append(_parse(source, state))
if source.match("|"):
continue
if not nested:
break
if not source.next or source.match(")", 0):
break
else:
raise error, "pattern not properly closed"
if len(items) == 1:
return items[0]
subpattern = SubPattern(state)
# check if all items share a common prefix
while 1:
prefix = None
for item in items:
if not item:
break
if prefix is None:
prefix = item[0]
elif item[0] != prefix:
break
else:
# all subitems start with a common "prefix".
# move it out of the branch
for item in items:
del item[0]
subpattern.append(prefix)
continue # check next one
break
# check if the branch can be replaced by a character set
for item in items:
if len(item) != 1 or item[0][0] != LITERAL:
break
else:
# we can store this as a character set instead of a
# branch (the compiler may optimize this even more)
set = []
for item in items:
set.append(item[0])
subpattern.append((IN, set))
return subpattern
subpattern.append((BRANCH, (None, items)))
return subpattern
def _parse(source, state):
# parse a simple pattern
subpattern = SubPattern(state)
while 1:
if source.next in ("|", ")"):
break # end of subpattern
this = source.get()
if this is None:
break # end of pattern
if state.flags & SRE_FLAG_VERBOSE:
# skip whitespace and comments
if this in WHITESPACE:
continue
if this == "#":
while 1:
this = source.get()
if this in (None, "\n"):
break
continue
if this and this[0] not in SPECIAL_CHARS:
subpattern.append((LITERAL, ord(this)))
elif this == "[":
# character set
set = []
## if source.match(":"):
## pass # handle character classes
if source.match("^"):
set.append((NEGATE, None))
# check remaining characters
start = set[:]
while 1:
this = source.get()
if this == "]" and set != start:
break
elif this and this[0] == "\\":
code1 = _class_escape(source, this)
elif this:
code1 = LITERAL, ord(this)
else:
raise error, "unexpected end of regular expression"
if source.match("-"):
# potential range
this = source.get()
if this == "]":
if code1[0] is IN:
code1 = code1[1][0]
set.append(code1)
set.append((LITERAL, ord("-")))
break
elif this:
if this[0] == "\\":
code2 = _class_escape(source, this)
else:
code2 = LITERAL, ord(this)
if code1[0] != LITERAL or code2[0] != LITERAL:
raise error, "bad character range"
lo = code1[1]
hi = code2[1]
if hi < lo:
raise error, "bad character range"
set.append((RANGE, (lo, hi)))
else:
raise error, "unexpected end of regular expression"
else:
if code1[0] is IN:
code1 = code1[1][0]
set.append(code1)
# XXX: <fl> should move set optimization to compiler!
if len(set)==1 and set[0][0] is LITERAL:
subpattern.append(set[0]) # optimization
elif len(set)==2 and set[0][0] is NEGATE and set[1][0] is LITERAL:
subpattern.append((NOT_LITERAL, set[1][1])) # optimization
else:
# XXX: <fl> should add charmap optimization here
subpattern.append((IN, set))
elif this and this[0] in REPEAT_CHARS:
# repeat previous item
if this == "?":
min, max = 0, 1
elif this == "*":
min, max = 0, MAXREPEAT
elif this == "+":
min, max = 1, MAXREPEAT
elif this == "{":
here = source.tell()
min, max = 0, MAXREPEAT
lo = hi = ""
while source.next in DIGITS:
lo = lo + source.get()
if source.match(","):
while source.next in DIGITS:
hi = hi + source.get()
else:
hi = lo
if not source.match("}"):
subpattern.append((LITERAL, ord(this)))
source.seek(here)
continue
if lo:
min = atoi(lo)
if hi:
max = atoi(hi)
if max < min:
raise error, "bad repeat interval"
else:
raise error, "not supported"
# figure out which item to repeat
if subpattern:
item = subpattern[-1:]
else:
item = None
if not item or (len(item) == 1 and item[0][0] == AT):
raise error, "nothing to repeat"
if item[0][0] in (MIN_REPEAT, MAX_REPEAT):
raise error, "multiple repeat"
if source.match("?"):
subpattern[-1] = (MIN_REPEAT, (min, max, item))
else:
subpattern[-1] = (MAX_REPEAT, (min, max, item))
elif this == ".":
subpattern.append((ANY, None))
elif this == "(":
group = 1
name = None
if source.match("?"):
group = 0
# options
if source.match("P"):
# python extensions
if source.match("<"):
# named group: skip forward to end of name
name = ""
while 1:
char = source.get()
if char is None:
raise error, "unterminated name"
if char == ">":
break
name = name + char
group = 1
if not isname(name):
raise error, "bad character in group name"
elif source.match("="):
# named backreference
name = ""
while 1:
char = source.get()
if char is None:
raise error, "unterminated name"
if char == ")":
break
name = name + char
if not isname(name):
raise error, "bad character in group name"
gid = state.groupdict.get(name)
if gid is None:
raise error, "unknown group name"
subpattern.append((GROUPREF, gid))
continue
else:
char = source.get()
if char is None:
raise error, "unexpected end of pattern"
raise error, "unknown specifier: ?P%s" % char
elif source.match(":"):
# non-capturing group
group = 2
elif source.match("#"):
# comment
while 1:
if source.next is None or source.next == ")":
break
source.get()
if not source.match(")"):
raise error, "unbalanced parenthesis"
continue
elif source.next in ("=", "!", "<"):
# lookahead assertions
char = source.get()
dir = 1
if char == "<":
if source.next not in ("=", "!"):
raise error, "syntax error"
dir = -1 # lookbehind
char = source.get()
p = _parse_sub(source, state)
if not source.match(")"):
raise error, "unbalanced parenthesis"
if char == "=":
subpattern.append((ASSERT, (dir, p)))
else:
subpattern.append((ASSERT_NOT, (dir, p)))
continue
else:
# flags
if not source.next in FLAGS:
raise error, "unexpected end of pattern"
while source.next in FLAGS:
state.flags = state.flags | FLAGS[source.get()]
if group:
# parse group contents
if group == 2:
# anonymous group
group = None
else:
group = state.opengroup(name)
p = _parse_sub(source, state)
if not source.match(")"):
raise error, "unbalanced parenthesis"
if group is not None:
state.closegroup(group)
subpattern.append((SUBPATTERN, (group, p)))
else:
while 1:
char = source.get()
if char is None:
raise error, "unexpected end of pattern"
if char == ")":
break
raise error, "unknown extension"
elif this == "^":
subpattern.append((AT, AT_BEGINNING))
elif this == "$":
subpattern.append((AT, AT_END))
elif this and this[0] == "\\":
code = _escape(source, this, state)
subpattern.append(code)
else:
raise error, "parser error"
return subpattern
def parse(str, flags=0, pattern=None):
# parse 're' pattern into list of (opcode, argument) tuples
source = Tokenizer(str)
if pattern is None:
pattern = Pattern()
pattern.flags = flags
pattern.str = str
p = _parse_sub(source, pattern, 0)
tail = source.get()
if tail == ")":
raise error, "unbalanced parenthesis"
elif tail:
raise error, "bogus characters at end of regular expression"
if flags & SRE_FLAG_DEBUG:
p.dump()
if not (flags & SRE_FLAG_VERBOSE) and p.pattern.flags & SRE_FLAG_VERBOSE:
# the VERBOSE flag was switched on inside the pattern. to be
# on the safe side, we'll parse the whole thing again...
return parse(str, p.pattern.flags)
return p
def parse_template(source, pattern):
# parse 're' replacement string into list of literals and
# group references
s = Tokenizer(source)
p = []
a = p.append
def literal(literal, p=p):
if p and p[-1][0] is LITERAL:
p[-1] = LITERAL, p[-1][1] + literal
else:
p.append((LITERAL, literal))
sep = source[:0]
if type(sep) is type(""):
makechar = chr
else:
makechar = unichr
while 1:
this = s.get()
if this is None:
break # end of replacement string
if this and this[0] == "\\":
# group
if this == "\\g":
name = ""
if s.match("<"):
while 1:
char = s.get()
if char is None:
raise error, "unterminated group name"
if char == ">":
break
name = name + char
if not name:
raise error, "bad group name"
try:
index = atoi(name)
except ValueError:
if not isname(name):
raise error, "bad character in group name"
try:
index = pattern.groupindex[name]
except KeyError:
raise IndexError, "unknown group name"
a((MARK, index))
elif len(this) > 1 and this[1] in DIGITS:
code = None
while 1:
group = _group(this, pattern.groups+1)
if group:
if (s.next not in DIGITS or
not _group(this + s.next, pattern.groups+1)):
code = MARK, group
break
elif s.next in OCTDIGITS:
this = this + s.get()
else:
break
if not code:
this = this[1:]
code = LITERAL, makechar(atoi(this[-6:], 8) & 0xff)
if code[0] is LITERAL:
literal(code[1])
else:
a(code)
else:
try:
this = makechar(ESCAPES[this][1])
except KeyError:
pass
literal(this)
else:
literal(this)
# convert template to groups and literals lists
i = 0
groups = []
literals = []
for c, s in p:
if c is MARK:
groups.append((i, s))
literals.append(None)
else:
literals.append(s)
i = i + 1
return groups, literals
def expand_template(template, match):
g = match.group
sep = match.string[:0]
groups, literals = template
literals = literals[:]
try:
for index, group in groups:
literals[index] = s = g(group)
if s is None:
raise IndexError
except IndexError:
raise error, "empty group"
return string.join(literals, sep)
| mit | 5,199,229,913,976,841,000 | 32.506089 | 78 | 0.448851 | false | 4.455821 | false | false | false |
edonyM/toolkitem | updatesys/kernelclean.py | 1 | 8622 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
r"""
# .---. .-----------
# / \ __ / ------
# / / \( )/ ----- (`-') _ _(`-') <-. (`-')_
# ////// '\/ ` --- ( OO).-/( (OO ).-> .-> \( OO) ) .->
# //// / // : : --- (,------. \ .'_ (`-')----. ,--./ ,--/ ,--.' ,-.
# // / / / `\/ '-- | .---' '`'-..__)( OO).-. ' | \ | | (`-')'.' /
# // //..\\ (| '--. | | ' |( _) | | | | . '| |)(OO \ /
# ============UU====UU==== | .--' | | / : \| |)| | | |\ | | / /)
# '//||\\` | `---. | '-' / ' '-' ' | | \ | `-/ /`
# ''`` `------' `------' `-----' `--' `--' `--'
# ######################################################################################
#
# Author: edony - [email protected]
#
# twitter : @edonyzpc
#
# Last modified: 2015-06-02 20:50
#
# Filename: kernelclean.py
#
# Description: All Rights Are Reserved
#
"""
#import scipy as sp
#import math as m
#import matplotlib as mpl
#import matplotlib.pyplot as plt
#from mpl_toolkits.mplot3d import Axes3D as Ax3
#from scipy import stats as st
#from matplotlib import cm
#import numpy as np
from __future__ import absolute_import
import os
import re
import sys
import subprocess as sp
import platform as pf
from getpass import getpass
import hashlib
if sys.version.startswith("3.4."):
from functools import reduce
from packages.fileparser.extractor import Extractor
class PyColor(object):
""" This class is for colored print in the python interpreter!
"F3" call Addpy() function to add this class which is defined
in the .vimrc for vim Editor."""
def __init__(self):
self.self_doc = r"""
STYLE: \033['display model';'foreground';'background'm
DETAILS:
FOREGROUND BACKGOUND COLOR
---------------------------------------
30 40 black
31 41 red
32 42 green
33 43 yellow
34 44 blue
35 45 purple
36 46 cyan
37 47 white
DISPLAY MODEL DETAILS
-------------------------
0 default
1 highlight
4 underline
5 flicker
7 reverse
8 non-visiable
e.g:
\033[1;31;40m <!--1-highlight;31-foreground red;40-background black-->
\033[0m <!--set all into default-->
"""
self.warningcolor = '\033[0;31m'
self.tipcolor = '\033[0;32m'
self.endcolor = '\033[0m'
self._newcolor = ''
@property
def new(self):
"""
Customized Python Print Color.
"""
return self._newcolor
@new.setter
def new(self, color_str):
"""
New Color.
"""
self._newcolor = color_str
def disable(self):
"""
Disable Color Print.
"""
self.warningcolor = ''
self.endcolor = ''
class KernelClean(object):
"""
Cleanup the Fedora Linux kernel after `dnf(yum) update`.
"""
def __init__(self, check=0):
self._filebuf = 'kernelclean'
self.kernel = ''
self.exist_kernels = []
self.old_kernel = []
self.kernel_clean = []
self.color = PyColor()
# self.check for manual check to remove system kernel(1 for check, 0 for not check)
self.check = check
self.record = []
def in_using_kernel(self):
"""
RPM query about the kernel existing in the system => self._filebuf
Get the version of running kernel => self.kernel
***rewrite the using kernel finding***
command_rpm_kernel = 'rpm -qa | grep "^kernel-" > '
command_rpm_kernel += self._filebuf
os.system(command_rpm_kernel)
command_kernel = 'uname -r'
pipeout = sp.Popen(command_kernel.split(), stdout=sp.PIPE)
self.kernel = pipeout.stdout.readline().rstrip().decode('utf-8')
"""
pipeout = sp.Popen('uname -r'.split(), stdout=sp.PIPE)
self.kernel = pipeout.stdout.readline().rstrip().decode('utf-8')
out = sp.Popen('rpm -qa'.split(), stdout=sp.PIPE)
for ls in out.stdout.readlines():
pattern = '^kernel-'
ls = ls.rstrip().decode('utf-8')
if re.match(pattern, ls):
self.exist_kernels.append(ls)
def find_old_kernel(self):
"""
Find the old kernel in system => self.old_kernel
"""
pattern = "^kernel-[a-zA-Z-]*([0-9.-]*)([a-zA-Z]+)(.*)"
self.record = set([re.match(pattern, item).groups() for item in self.exist_kernels])
self.old_kernel = [item for item in self.record if item[0] not in self.kernel]
def to_cleaned_kernel(self):
"""
Ensure the to be cleaned kernel in queried list => self.kernelclean
"""
if self.old_kernel:
kernel_clean_id = []
[kernel_clean_id.append(''.join(item)) for item in list(self.old_kernel)]
for id in kernel_clean_id:
[self.kernel_clean.append(item) for item in self.exist_kernels if id in item]
def cleanup(self):
"""
Cleanup the old kernel
"""
if self.old_kernel:
reboot = input(self.color.endcolor + 'Do You Need to Reboot System?(y or n)\n')
if reboot == 'y':
os.system('reboot')
elif reboot == 'n':
print(self.color.warningcolor + 'Cleanup Kernel ...' + self.color.endcolor)
pwd_md5 = 'b04c541ed735353c44c52984a1be27f8'
pwd = getpass("Enter Your Password: ")
if hashlib.md5(pwd.encode('utf-8')).hexdigest() != pwd_md5:
print(self.color.warningcolor + "Wrong Password" + self.color.endcolor)
print('\033[0;36m' + "Try Angain" + '\033[0m')
pwd = getpass("Enter Your Password: ")
if hashlib.md5(pwd.encode('utf-8')).hexdigest() != pwd_md5:
return
echo = ['echo']
echo.append(pwd)
if pf.linux_distribution()[1] > '21':
command = 'sudo -S dnf -y remove '
for item in self.kernel_clean:
command += item
command += ' '
else:
command = 'sudo -S yum -y remove '
for item in self.kernel_clean:
command += item
command += ' '
pipein = sp.Popen(echo, stdout=sp.PIPE)
pipeout = sp.Popen(command.split(), stdin=pipein.stdout, stdout=sp.PIPE)
for line in pipeout.stdout.readlines():
if line == '':
break
if isinstance(line, bytes):
line = line.decode()
print(line)
print(self.color.tipcolor + 'End Cleanup!' + self.color.endcolor)
print(self.color.warningcolor +\
'Your Kernel is Update!' +\
self.color.endcolor)
def main(self):
"""
Union the cleanup stream
"""
self.in_using_kernel()
self.find_old_kernel()
self.to_cleaned_kernel()
if self.check == 1:
if self.old_kernel:
print(self.color.tipcolor + 'Your Old Kernel: ')
for item in self.old_kernel:
print(''.join(item))
print(self.color.warningcolor + 'In Using Kernel: ')
print(self.kernel + self.color.endcolor)
check_cmd = input('Remove the old kernel?(y or n)\n')
if check_cmd == 'y':
self.cleanup()
else:
print('\033[36m' + 'Do Not Remove Old kernel' + '\033[0m')
else:
print(self.color.tipcolor +\
'Your System Has No Old Kernel To Cleanup!' +\
self.color.endcolor)
if __name__ == '__main__':
TEST = KernelClean(1)
TEST.in_using_kernel()
TEST.find_old_kernel()
TEST.to_cleaned_kernel()
| mit | -4,482,638,685,100,434,400 | 36.982379 | 93 | 0.454651 | false | 3.936986 | false | false | false |
pyblish/pyblish-mindbender | mindbender/tools/loader/app.py | 1 | 9863 | import os
import sys
import itertools
from ...vendor.Qt import QtWidgets, QtCore
from ... import api
from .. import lib
self = sys.modules[__name__]
self._window = None
# Store previous results from api.ls()
self._cache = list()
self._use_cache = False
# Custom roles
AssetRole = QtCore.Qt.UserRole + 1
SubsetRole = QtCore.Qt.UserRole + 2
class Window(QtWidgets.QDialog):
"""Basic asset loader interface
_________________________________________
| |
| Assets |
| _____________________________________ |
| | | | |
| | Asset 1 | Subset 1 | |
| | Asset 2 | Subset 2 | |
| | ... | ... | |
| | | | |
| | | | |
| | | | |
| | | | |
| | | | |
| | | | |
| |__________________|__________________| |
| _____________________________________ |
| | | |
| | Load | |
| |_____________________________________| |
|_________________________________________|
"""
def __init__(self, parent=None):
super(Window, self).__init__(parent)
self.setWindowTitle("Asset Loader")
self.setFocusPolicy(QtCore.Qt.StrongFocus)
body = QtWidgets.QWidget()
footer = QtWidgets.QWidget()
container = QtWidgets.QWidget()
assets = QtWidgets.QListWidget()
subsets = QtWidgets.QListWidget()
# Enable loading many subsets at once
subsets.setSelectionMode(subsets.ExtendedSelection)
layout = QtWidgets.QHBoxLayout(container)
layout.addWidget(assets)
layout.addWidget(subsets)
layout.setContentsMargins(0, 0, 0, 0)
options = QtWidgets.QWidget()
layout = QtWidgets.QGridLayout(options)
layout.setContentsMargins(0, 0, 0, 0)
autoclose_checkbox = QtWidgets.QCheckBox("Close after load")
autoclose_checkbox.setCheckState(QtCore.Qt.Checked)
layout.addWidget(autoclose_checkbox, 1, 0)
layout = QtWidgets.QVBoxLayout(body)
layout.addWidget(container)
layout.addWidget(options, 0, QtCore.Qt.AlignLeft)
layout.setContentsMargins(0, 0, 0, 0)
load_button = QtWidgets.QPushButton("Load")
refresh_button = QtWidgets.QPushButton("Refresh")
stop_button = QtWidgets.QPushButton("Searching..")
stop_button.setToolTip("Click to stop searching")
message = QtWidgets.QLabel()
message.hide()
layout = QtWidgets.QVBoxLayout(footer)
layout.addWidget(load_button)
layout.addWidget(stop_button)
layout.addWidget(refresh_button)
layout.addWidget(message)
layout.setContentsMargins(0, 0, 0, 0)
layout = QtWidgets.QVBoxLayout(self)
layout.addWidget(body)
layout.addWidget(footer)
self.data = {
"state": {
"running": False,
},
"button": {
"load": load_button,
"stop": stop_button,
"autoclose": autoclose_checkbox,
},
"model": {
"assets": assets,
"subsets": subsets,
},
"label": {
"message": message,
}
}
load_button.clicked.connect(self.on_load_pressed)
stop_button.clicked.connect(self.on_stop_pressed)
refresh_button.clicked.connect(self.on_refresh_pressed)
assets.currentItemChanged.connect(self.on_assetschanged)
subsets.currentItemChanged.connect(self.on_subsetschanged)
# Defaults
self.resize(320, 350)
load_button.hide()
stop_button.setFocus()
def keyPressEvent(self, event):
"""Delegate keyboard events"""
if event.key() == QtCore.Qt.Key_Return:
return self.on_enter()
def on_enter(self):
self.on_load_pressed()
def on_assetschanged(self, *args):
assets_model = self.data["model"]["assets"]
subsets_model = self.data["model"]["subsets"]
subsets_model.clear()
asset_item = assets_model.currentItem()
# The model is empty
if asset_item is None:
return
asset = asset_item.data(AssetRole)
# The model contains an empty item
if asset is None:
return
for subset in asset["subsets"]:
item = QtWidgets.QListWidgetItem(subset["name"])
item.setData(QtCore.Qt.ItemIsEnabled, True)
item.setData(SubsetRole, subset)
subsets_model.addItem(item)
def on_subsetschanged(self, *args):
button = self.data["button"]["load"]
item = self.data["model"]["assets"].currentItem()
button.setEnabled(item.data(QtCore.Qt.ItemIsEnabled))
def refresh(self):
"""Load assets from disk and add them to a QListView
This method runs part-asynchronous, in that it blocks
when busy, but takes brief intermissions between each
asset found so as to lighten the load off of disk, and
to enable the artist to abort searching once the target
asset has been found.
"""
assets_model = self.data["model"]["assets"]
assets_model.clear()
state = self.data["state"]
has = {"assets": False}
module = sys.modules[__name__]
if module._use_cache:
print("Using cache..")
iterators = iter(module._cache)
else:
print("Reading from disk..")
assets = api.ls(os.path.join(api.registered_root(), "assets"))
film = api.ls(os.path.join(api.registered_root(), "film"))
iterators = itertools.chain(assets, film)
def on_next():
if not state["running"]:
return on_finished()
try:
asset = next(iterators)
# Cache for re-use
if not module._use_cache:
module._cache.append(asset)
except StopIteration:
return on_finished()
has["assets"] = True
item = QtWidgets.QListWidgetItem(asset["name"])
item.setData(QtCore.Qt.ItemIsEnabled, True)
item.setData(AssetRole, asset)
assets_model.addItem(item)
lib.defer(25, on_next)
def on_finished():
state["running"] = False
module._use_cache = True
if not has["assets"]:
item = QtWidgets.QListWidgetItem("No assets found")
item.setData(QtCore.Qt.ItemIsEnabled, False)
assets_model.addItem(item)
assets_model.setCurrentItem(assets_model.item(0))
assets_model.setFocus()
self.data["button"]["load"].show()
self.data["button"]["stop"].hide()
state["running"] = True
lib.defer(25, on_next)
def on_refresh_pressed(self):
# Clear cache
sys.modules[__name__]._cache[:] = []
sys.modules[__name__]._use_cache = False
self.refresh()
def on_stop_pressed(self):
button = self.data["button"]["stop"]
button.setText("Stopping..")
button.setEnabled(False)
self.data["state"]["running"] = False
def on_load_pressed(self):
button = self.data["button"]["load"]
if not button.isEnabled():
return
assets_model = self.data["model"]["assets"]
subsets_model = self.data["model"]["subsets"]
autoclose_checkbox = self.data["button"]["autoclose"]
asset_item = assets_model.currentItem()
for subset_item in subsets_model.selectedItems():
if subset_item is None:
return
asset = asset_item.data(AssetRole)
subset = subset_item.data(SubsetRole)
assert asset
assert subset
try:
api.registered_host().load(asset, subset)
except ValueError as e:
self.echo(e)
raise
except NameError as e:
self.echo(e)
raise
# Catch-all
except Exception as e:
self.echo("Program error: %s" % str(e))
raise
if autoclose_checkbox.checkState():
self.close()
def echo(self, message):
widget = self.data["label"]["message"]
widget.setText(str(message))
widget.show()
print(message)
def closeEvent(self, event):
print("Good bye")
self.data["state"]["running"] = False
return super(Window, self).closeEvent(event)
def show(root=None, debug=False):
"""Display Loader GUI
Arguments:
debug (bool, optional): Run loader in debug-mode,
defaults to False
"""
if self._window:
self._window.close()
del(self._window)
try:
widgets = QtWidgets.QApplication.topLevelWidgets()
widgets = dict((w.objectName(), w) for w in widgets)
parent = widgets["MayaWindow"]
except KeyError:
parent = None
# Debug fixture
fixture = api.fixture(assets=["Ryan",
"Strange",
"Blonde_model"])
with fixture if debug else lib.dummy():
with lib.application():
window = Window(parent)
window.show()
window.refresh()
self._window = window
| mit | 8,429,649,018,812,399,000 | 28.441791 | 74 | 0.513738 | false | 4.448805 | false | false | false |
jpavel/cms-ucl-tau | SFrame/bin/sframe_parMaker.py | 1 | 3110 | #!/usr/bin/env python
# $Id: sframe_parMaker.py 344 2012-12-13 13:10:53Z krasznaa $
#***************************************************************************
#* @Project: SFrame - ROOT-based analysis framework for ATLAS
#* @Package: Core
#*
#* @author Stefan Ask <[email protected]> - Manchester
#* @author David Berge <[email protected]> - CERN
#* @author Johannes Haller <[email protected]> - Hamburg
#* @author A. Krasznahorkay <[email protected]> - NYU/Debrecen
#*
#***************************************************************************
# Script creating a PAR package from the contents of a directory.
# (As long as the directory follows the SFrame layout...)
# Import base module(s):
import sys
import os.path
import optparse
def main():
print " -- Proof ARchive creator for SFrame --"
parser = optparse.OptionParser( usage="%prog [options]" )
parser.add_option( "-s", "--scrdir", dest="srcdir",
action="store", type="string", default="./",
help="Directory that is to be converted" )
parser.add_option( "-o", "--output", dest="output",
action="store", type="string", default="Test.par",
help="Output PAR file" )
parser.add_option( "-m", "--makefile", dest="makefile",
action="store", type="string", default="Makefile",
help="Name of the makefile in the package" )
parser.add_option( "-i", "--include", dest="include",
action="store", type="string", default="include",
help="Directory holding the header files" )
parser.add_option( "-c", "--src", dest="src",
action="store", type="string", default="src",
help="Directory holding the source files" )
parser.add_option( "-p", "--proofdir", dest="proofdir",
action="store", type="string", default="proof",
help="Directory holding the special files for PROOF" )
parser.add_option( "-v", "--verbose", dest="verbose",
action="store_true",
help="Print verbose information about package creation" )
( options, garbage ) = parser.parse_args()
if len( garbage ):
print "The following options were not recognised:"
print ""
print " " + garbage
parser.print_help();
return
if options.verbose:
print " >> srcdir = " + options.srcdir
print " >> output = " + options.output
print " >> makefile = " + options.makefile
print " >> include = " + options.include
print " >> src = " + options.src
print " >> proofdir = " + options.proofdir
import PARHelpers
PARHelpers.PARMaker( options.srcdir, options.makefile, options.include,
options.src, options.proofdir, options.output,
options.verbose )
return
# Call the main function:
if __name__ == "__main__":
main()
| mit | 6,622,113,963,117,501,000 | 42.194444 | 80 | 0.533119 | false | 4.119205 | false | false | false |
WilliamDiakite/ExperimentationsACA | processing/basic_text_processing.py | 1 | 4416 |
import nltk
import re
import sys
import math
import matplotlib.pyplot as plt
from nltk import ne_chunk
from nltk.util import ngrams
from wordcloud import WordCloud, STOPWORDS
from collections import defaultdict, Counter
from pos_tagger import POSTagger
sys.path.insert(
0, '/Users/diakite_w/Documents/Dev/ExperimentationsACA/FrenchLefffLemmatizer')
from FrenchLefffLemmatizer import FrenchLefffLemmatizer
import math
import matplotlib.pyplot as plt
def read_txt(textfile):
with open(textfile, 'r') as f:
text = f.read()
text = text.replace('\n', ' ')
text = text.replace('- ', '')
text = text.replace('.', '')
text = text.replace('-', '')
text = text.replace("‘l'", 'ï')
return text
def display_wordcloud(tokens):
'''
Display a simple wordcloup from
processed tokens
'''
# Join all tokens to make one big string
join_text = ' '.join(tokens)
wordcloud = WordCloud(background_color='white',
width=1200,
height=1000
).generate(join_text)
plt.imshow(wordcloud)
plt.axis('off')
plt.show()
def link_splitted_words(texte):
next_elt = texte[1:]
def tokenize(text):
fll = FrenchLefffLemmatizer()
splck = SpellChecker()
contracted_pronouns = ["l'", "m'", "n'", "d'", "c'", "j'", "qu'", "s'"]
dictionnary = []
def link_splitted_words(texte):
next_elt = texte[1:]
def tokenizer(text):
fll = FrenchLefffLemmatizer()
#splck = SpellChecker()
# Put everything to lower case
text = text.lower()
# Tokenize text
tokens = nltk.tokenize.word_tokenize(text)
print('Nombre de tokens dans le texte :', len(tokens))
# Remove contacted pronous from tokens
contracted_pronouns = ["l'", "m'", "n'", "d'", "c'", "j'", "qu'", "s'"]
tokens = [t[2:] if t[:2] in contracted_pronouns else t for t in tokens]
# Spell check every tokens
#tokens = [splck.correct(t) for t in tokens]
# Remove all words with len <= 2
tokens = [t for t in tokens if len(t) > 2]
#tokens = [splck.correct(t) if t not in dictionnary else t for t in tokens]
tokens = [t for t in tokens if len(t) > 2]
tokens = [t for t in tokens if t not in stopwords]
tokens = [fll.lemmatize(t) for t in tokens]
print('Nombre de tokens apres traitement :', len(tokens), '\n')
return tokens
def compute_tf(document, word):
'''
Compute the TF of a document
'''
with open(document, 'r') as f:
text = f.read()
text = text.replace('\n', ' ')
text = text.replace('- ', '')
text = text.replace('.', '')
text = text.replace('-', '')
text = text.replace("‘l'", 'ï')
tokens = tokenize(text)
def extract_ngrams(tokens, n):
'''
Return list of n-grams
'''
return Counter(ngrams(tokens, n))
# Load stop words
with open('stopwords-fr.txt', 'r') as f:
stopwords = [l[:-1] for l in f.readlines()]
stopwords = list(set(stopwords))
#print(stopwords)
# Load stop words
stopwords = list(set(w.rstrip() for w in open('stopwords-fr.txt')))
# print(stopwords)
# Read text file
textfile = 'data/simple.txt'
with open(textfile, 'r') as f:
text = f.read()
text = text.replace('\n', ' ')
text = text.replace('- ', '')
text = text.replace('.', '')
text = text.replace('-', '')
#print(text)
tokens = tokenizer(text)
vocabulary = list(set(tokens))
# Compute dictionnary of word (key) and index
word_idx = {w: idx for w, idx in enumerate(vocabulary)}
#print(word_idx)
# DO YOU REALLY NEED THIS ?
# Count nb of word appearance
word_idf = defaultdict(lambda: 0)
for token in tokens:
word_idf[token] += 1
# Compute idf
for word in vocabulary:
word_idf[word] = math.log(1 / float(1))
# Compute tf
word_tf = defaultdict(lambda: 0)
for word in tokens:
word_tf[word] += 1
for word in vocabulary:
word_tf[word] /= len(tokens)
join_text = ' '.join(tokens)
wordcloud = WordCloud(background_color='white',
width=1200,
height=1000
).generate(join_text)
plt.imshow(wordcloud)
plt.axis('off')
plt.show()
# Extracting n-grams
bigrams = extract_ngrams(tokens, 2)
trigrams = extract_ngrams(tokens, 3)
[print(t) for t in trigrams.most_common(3)]
[print(t) for t in bigrams.most_common(3)]
| mit | 3,313,733,803,383,973,400 | 22.089005 | 82 | 0.608617 | false | 3.186416 | false | false | false |
Stargrazer82301/CAAPR | CAAPR/CAAPR_AstroMagic/PTS/pts/magic/plot/imagegrid.py | 1 | 23629 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.magic.plot.imagegrid Contains the ImageGridPlotter class.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import standard modules
import math
from scipy import ndimage
import copy
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.axes_grid1 import AxesGrid
import matplotlib.gridspec as gridspec
import glob
from matplotlib import colors
from matplotlib import cm
from matplotlib.colors import LogNorm
import pyfits
from collections import OrderedDict
from textwrap import wrap
from astropy.io import fits
from pyfits import PrimaryHDU, Header
from astropy.visualization import SqrtStretch, LogStretch
from astropy.visualization.mpl_normalize import ImageNormalize
import aplpy
import wcsaxes
import matplotlib.colors as mpl_colors
import matplotlib.colorbar as mpl_colorbar
# Import the relevant PTS classes and modules
from ...core.tools.logging import log
# -----------------------------------------------------------------
class ImageGridPlotter(object):
"""
This class ...
"""
def __init__(self, title=None):
"""
The constructor ...
:param title:
"""
# Set the title
self.title = title
# Figure and grid
self._figure = None
self._grid = None
# Properties
self.style = "dark" # "dark" or "light"
self.transparent = True
self.format = None
self.colormap = "viridis"
self.vmin = None
# -----------------------------------------------------------------
def set_title(self, title):
"""
This function ...
:param title:
:return:
"""
self.title = title
# -----------------------------------------------------------------
class StandardImageGridPlotter(ImageGridPlotter):
"""
This class ...
"""
def __init__(self, title=None):
"""
The constructor ...
:param title:
"""
# Call the constructor of the base class
super(StandardImageGridPlotter, self).__init__(title)
# -- Attributes --
# The images to be plotted
self.images = OrderedDict()
# Masks to be overlayed on the images
self.masks = dict()
# Regions to be overlayed on the images
self.regions = dict()
# Properties
self.ncols = 7
self.width = 16
# -----------------------------------------------------------------
def run(self, output_path):
"""
This function ...
:param output_path:
:return:
"""
# Make the plot
self.plot(output_path)
# -----------------------------------------------------------------
def add_image(self, image, label, mask=None, region=None):
"""
This function ...
:param image:
:param label:
:param mask:
:param region:
:return:
"""
self.images[label] = image
if mask is not None: self.masks[label] = mask
if region is not None: self.regions[label] = region
# -----------------------------------------------------------------
@property
def nimages(self):
"""
This function ...
:return:
"""
return len(self.images)
# -----------------------------------------------------------------
def plot(self, path):
"""
This function ...
:param path:
:return:
"""
# Determine the necessary number of rows
nrows = int(math.ceil(self.nimages / self.ncols))
ratio = float(nrows) / float(self.ncols)
height = ratio * self.width
# Create the figure
self._figure = plt.figure(figsize=(self.width, height))
self._figure.subplots_adjust(hspace=0.0, wspace=0.0)
#self._figure.text(0.385, 0.97, "Offset from centre (degrees)", color='black', size='16', weight='bold')
#self._figure.text(0.02, 0.615, "Offset from centre (degrees)", color='black', size='16', weight='bold', rotation='vertical')
def standard_setup(sp):
sp.set_frame_color('black')
sp.set_tick_labels_font(size='10')
sp.set_axis_labels_font(size='12')
# sp.set_tick_labels_format(xformat='hh:mm',yformat='dd:mm')
sp.set_xaxis_coord_type('scalar')
sp.set_yaxis_coord_type('scalar')
sp.set_tick_color('black')
sp.recenter(x=0.0, y=0.0, width=3., height=0.6)
sp.set_tick_xspacing(0.4)
sp.set_tick_yspacing(0.25)
sp.set_system_latex(True)
sp.tick_labels.hide()
sp.axis_labels.hide()
# Create grid
#self._grid = AxesGrid(self._figure, 111,
# nrows_ncols=(nrows, self.ncols),
# axes_pad=0.0,
# label_mode="L",
# #share_all=True,
# share_all=False,
# cbar_location="right",
# cbar_mode="single",
# cbar_size="0.5%",
# cbar_pad="0.5%") # cbar_mode="single"
gs = gridspec.GridSpec(nrows, self.ncols, wspace=0.0, hspace=0.0)
# Loop over the images
counter = 0
ax = None
for label in self.images:
row = int(counter / self.ncols)
col = counter % self.ncols
frame = self.images[label]
#ax = self._grid[counter]
subplotspec = gs[row, col]
#points = subplotspec.get_position(self._figure).get_points()
#print(points)
#x_min = points[0, 0]
#x_max = points[1, 0]
#y_min = points[0, 1]
#y_max = points[1, 1]
# width = x_max - x_min
# height = y_max - y_min
# ax = self._figure.add_axes([x_min, y_min, width, height])
#ax = plt.subplot(subplotspec)
#shareax = ax if ax is not None else None
#ax = plt.subplot(subplotspec, projection=frame.wcs.to_astropy(), sharex=shareax, sharey=shareax)
ax = plt.subplot(subplotspec, projection=frame.wcs.to_astropy())
#lon = ax.coords[0]
#lat = ax.coords[1]
#overlay = ax.get_coords_overlay('fk5')
#overlay.grid(color='white', linestyle='solid', alpha=0.5)
# Determine the maximum value in the box and the mimimum value for plotting
norm = ImageNormalize(stretch=LogStretch())
#min_value = np.nanmin(frame)
min_value = self.vmin if self.vmin is not None else np.nanmin(frame)
max_value = 0.5 * (np.nanmax(frame) + min_value)
#f1.show_colorscale(vmin=min_value, vmax=max_value, cmap="viridis")
#f1.show_beam(major=0.01, minor=0.01, angle=0, fill=True, color='white')
## f1.axis_labels.show_y()
#f1.tick_labels.set_xposition('top')
#f1.tick_labels.show()
ax.set_xticks([])
ax.set_yticks([])
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
#ax.spines['bottom'].set_color("white")
#ax.spines['top'].set_color("white")
#ax.spines['left'].set_color("white")
#ax.spines['right'].set_color("white")
ax.xaxis.label.set_color("white")
ax.yaxis.label.set_color("white")
ax.tick_params(axis='x', colors="white")
ax.tick_params(axis='y', colors="white")
# Get the color map
cmap = cm.get_cmap(self.colormap)
# Set background color
background_color = cmap(0.0)
ax.set_axis_bgcolor(background_color)
# Plot
frame[np.isnan(frame)] = 0.0
# Add mask if present
if label in self.masks: frame[self.masks[label]] = float('nan')
ax.imshow(frame, vmin=min_value, vmax=max_value, cmap=cmap, origin='lower', norm=norm, interpolation="nearest", aspect=1)
# Add region if present
if label in self.regions:
for patch in self.regions[label].to_mpl_patches():
ax.add_patch(patch)
# Add the label
ax.text(0.95, 0.95, label, color='white', transform=ax.transAxes, fontsize=10, va="top", ha="right") # fontweight='bold'
#ax.coords.grid(color='white')
counter += 1
all_axes = self._figure.get_axes()
# show only the outside spines
for ax in all_axes:
for sp in ax.spines.values():
sp.set_visible(False)
#if ax.is_first_row():
# ax.spines['top'].set_visible(True)
#if ax.is_last_row():
# ax.spines['bottom'].set_visible(True)
#if ax.is_first_col():
# ax.spines['left'].set_visible(True)
#if ax.is_last_col():
# ax.spines['right'].set_visible(True)
# Add a colourbar
#axisf3 = self._figure.add_axes(gs[row, col+1:])
subplotspec = gs[row, col+1:]
points = subplotspec.get_position(self._figure).get_points()
#print("colorbar points:", points)
x_min = points[0,0]
x_max = points[1,0]
y_min = points[0,1]
y_max = points[1,1]
#print((x_min, x_max), (y_min, y_max))
#points_flattened = points.flatten()
#print("colorbar:", points_flattened)
x_center = 0.5 * (x_min + x_max)
y_center = 0.5 * (y_min + y_max)
width = 0.9* (x_max - x_min)
height = 0.2 * (y_max - y_min)
x_min = x_center - 0.5 * width
x_max = x_center + 0.5 * width
y_min = y_center - 0.5 * height
y_max = y_center + 0.5 * height
#ax_cm = plt.subplot(points)
#ax_cm = plt.axes(points_flattened)
ax_cm = self._figure.add_axes([x_min, y_min, width, height])
cm_cm = cm.get_cmap(self.colormap)
norm_cm = mpl_colors.Normalize(vmin=0, vmax=1)
cb = mpl_colorbar.ColorbarBase(ax_cm, cmap=cm_cm, norm=norm_cm, orientation='horizontal')
cb.set_label('Flux (arbitrary units)')
# Set the title
if self.title is not None: self._figure.suptitle("\n".join(wrap(self.title, 60)))
#plt.tight_layout()
# Debugging
if type(path).__name__ == "BytesIO": log.debug("Saving the SED plot to a buffer ...")
elif path is None: log.debug("Showing the SED plot ...")
else: log.debug("Saving the SED plot to " + str(path) + " ...")
if path is not None:
# Save the figure
plt.savefig(path, bbox_inches='tight', pad_inches=0.25, transparent=self.transparent, format=self.format)
else: plt.show()
plt.close()
# -----------------------------------------------------------------
# TODO: add option to plot histograms of the residuals (DL14)
class ResidualImageGridPlotter(ImageGridPlotter):
"""
This class ...
"""
def __init__(self, title=None):
"""
The constructor ...
"""
# Call the constructor of the base class
super(ResidualImageGridPlotter, self).__init__(title)
# -- Attributes --
# Set the title
self.title = title
# The rows of the grid
self.rows = OrderedDict()
self.plot_residuals = []
# The names of the columns
self.column_names = ["Observation", "Model", "Residual"]
# Box (SkyRectangle) where to cut off the maps
self.box = None
self._plotted_rows = 0
self.absolute = False
# -----------------------------------------------------------------
def set_bounding_box(self, box):
"""
This function ...
:param box:
:return:
"""
self.box = box
# -----------------------------------------------------------------
def add_row(self, image_a, image_b, label, residuals=True):
"""
This function ...
:param image_a:
:param image_b:
:param label:
:param residuals:
:return:
"""
self.rows[label] = (image_a, image_b)
if residuals: self.plot_residuals.append(label)
# -----------------------------------------------------------------
def set_column_names(self, name_a, name_b, name_residual="Residual"):
"""
This function ...
:param name_a:
:param name_b:
:param name_residual:
:return:
"""
self.column_names = [name_a, name_b, name_residual]
# -----------------------------------------------------------------
def run(self, output_path):
"""
This function ...
:param output_path:
:return:
"""
# Make the plot
self.plot(output_path)
# -----------------------------------------------------------------
def clear(self):
"""
This function ...
:return:
"""
# Set default values for all attributes
self.title = None
self.rows = OrderedDict()
self.plot_residuals = []
self.column_names = ["Observation", "Model", "Residual"]
self._figure = None
self._grid = None
self._plotted_rows = 0
# -----------------------------------------------------------------
def plot(self, path):
"""
This function ...
:param path:
:return:
"""
# Determine the wcs with the smallest pixelscale
reference_wcs = None
for label in self.rows:
if reference_wcs is None or reference_wcs.average_pixelscale > self.rows[label][0].average_pixelscale: reference_wcs = copy.deepcopy(self.rows[label][0].wcs)
number_of_rows = len(self.rows)
axisratio = float(self.rows[self.rows.keys()[0]][0].xsize) / float(self.rows[self.rows.keys()[0]][0].ysize)
#print("axisratio", axisratio)
one_frame_x_size = 3.
fig_x_size = 3. * one_frame_x_size
#fig_y_size = number_of_rows * one_frame_x_size / axisratio
fig_y_size = one_frame_x_size * number_of_rows * 0.7
# Create a figure
self._figure = plt.figure(figsize=(fig_x_size, fig_y_size))
self._figure.subplots_adjust(left=0.05, right=0.95)
# Create grid
self._grid = AxesGrid(self._figure, 111,
nrows_ncols=(len(self.rows), 3),
axes_pad=0.02,
label_mode="L",
share_all=True,
cbar_location="right",
cbar_mode="single",
cbar_size="0.5%",
cbar_pad="0.5%",
) # cbar_mode="single"
for cax in self._grid.cbar_axes:
cax.toggle_label(False)
#rectangle_reference_wcs = self.box.to_pixel(reference_wcs)
data = OrderedDict()
greatest_shape = None
if self.box is not None:
for label in self.rows:
wcs = self.rows[label][0].wcs
rectangle = self.box.to_pixel(wcs)
y_min = rectangle.lower_left.y
y_max = rectangle.upper_right.y
x_min = rectangle.lower_left.x
x_max = rectangle.upper_right.x
reference = self.rows[label][0][y_min:y_max, x_min:x_max]
model = self.rows[label][1][y_min:y_max, x_min:x_max]
data[label] = (reference, model)
print(label, "box height/width ratio:", float(reference.shape[0])/float(reference.shape[1]))
if greatest_shape is None or greatest_shape[0] < reference.shape[0]: greatest_shape = reference.shape
else:
for label in self.rows:
reference = self.rows[label][0]
model = self.rows[label][1]
data[label] = (reference, model)
if greatest_shape is None or greatest_shape[0] < reference.shape[0]: greatest_shape = reference.shape
# Loop over the rows
for label in self.rows:
#wcs = self.rows[label][0].wcs
if data[label][0].shape == greatest_shape:
reference = data[label][0]
model = data[label][1]
else:
factor = float(greatest_shape[0]) / float(data[label][0].shape[0])
order = 0
reference = ndimage.zoom(data[label][0], factor, order=order)
model = ndimage.zoom(data[label][1], factor, order=order)
if self.absolute: residual = model - reference
else: residual = (model - reference)/model
# Plot the reference image
x0, x1, y0, y1, vmin, vmax = self.plot_frame(reference, label, 0)
# Plot the model image
x0, x1, y0, y1, vmin, vmax = self.plot_frame(model, label, 1, vlimits=(vmin,vmax))
# Plot the residual image
x0, x1, y0, y1, vmin, vmax = self.plot_frame(residual, label, 2, vlimits=(vmin,vmax))
self._plotted_rows += 3
#self._grid.axes_llc.set_xlim(x0, x1)
#self._grid.axes_llc.set_ylim(y0, y1)
self._grid.axes_llc.set_xticklabels([])
self._grid.axes_llc.set_yticklabels([])
self._grid.axes_llc.get_xaxis().set_ticks([]) # To remove ticks
self._grid.axes_llc.get_yaxis().set_ticks([]) # To remove ticks
# Add title if requested
#if self.title is not None: self._figure.suptitle(self.title, fontsize=12, fontweight='bold')
plt.tight_layout()
# Debugging
log.debug("Saving the SED plot to " + path + " ...")
# Save the figure
plt.savefig(path, bbox_inches='tight', pad_inches=0.25, format=self.format, transparent=self.transparent)
plt.close()
# -----------------------------------------------------------------
def plot_frame(self, frame, row_label, column_index, borders=(0,0,0,0), vlimits=None):
"""
This function ...
:param frame:
:param column_index:
:param row_label:
:param borders:
:param vlimits:
:return:
"""
grid_index = self._plotted_rows + column_index
x0 = borders[0]
y0 = borders[1]
#x1 = frame.xsize
#y1 = frame.ysize
x1 = frame.shape[1]
y1 = frame.shape[0]
#vmax = np.max(frame) # np.mean([np.max(data_ski),np.max(data_ref)])
#vmin = np.min(frame) # np.mean([np.min(data_ski),np.min(data_ref)])
#if min_int == 0.: min_int = vmin
#else: vmin = min_int
#if max_int == 0.: max_int = vmax
#else: vmax = max_int
if vlimits is None:
min_value = self.vmin if self.vmin is not None else np.nanmin(frame)
max_value = 0.5 * (np.nanmax(frame) + min_value)
else:
min_value = vlimits[0]
max_value = vlimits[1]
aspect = "equal"
if column_index != 2:
# Get the color map
cmap = cm.get_cmap(self.colormap)
# Set background color
background_color = cmap(0.0)
self._grid[grid_index].set_axis_bgcolor(background_color)
# Plot
frame[np.isnan(frame)] = 0.0
norm = ImageNormalize(stretch=LogStretch())
im = self._grid[grid_index].imshow(frame, cmap=cmap, vmin=min_value, vmax=max_value, interpolation="nearest", origin="lower", aspect=aspect, norm=norm) # 'nipy_spectral_r', 'gist_ncar_r'
else:
if self.absolute:
# Get the color map
cmap = cm.get_cmap(self.colormap)
norm = ImageNormalize(stretch=LogStretch())
else:
cmap = discrete_cmap()
min_value = 0.001
max_value = 1.
norm = None
print(min_value, max_value)
im = self._grid[grid_index].imshow(frame, cmap=cmap, vmin=min_value, vmax=max_value, interpolation="nearest", origin="lower", aspect=aspect, norm=norm)
cb = self._grid[grid_index].cax.colorbar(im)
# cb.set_xticklabels(labelsize=1)
# grid[number+numb_of_grid].cax.toggle_label(True)
for cax in self._grid.cbar_axes:
cax.toggle_label(True)
cax.axis[cax.orientation].set_label(' ')
# cax.axis[cax.orientation].set_fontsize(3)
cax.tick_params(labelsize=3)
cax.set_ylim(min_value, max_value)
# cax.set_yticklabels([0, 0.5, 1])
if column_index == 0:
self._grid[grid_index].text(0.03, 0.95, row_label, color='black', transform=self._grid[grid_index].transAxes, fontsize=fsize + 2, fontweight='bold', va='top')
# if numb_of_grid==0:
# crea_scale_bar(grid[number+numb_of_grid],x0,x1,y0,y1,pix2sec)
# crea_scale_bar(grid[number+numb_of_grid],x0,x1,y0,y1,pix2sec)
return x0, x1, y0, y1, min_value, max_value
# -----------------------------------------------------------------
fsize = 2
def sort_numbs(arr):
numbers = []
for k in range(len(arr)):
numb = str(arr[k].split('/')[-1].split('_')[-1].split('.fits'))
#print numb
numbers.append(numb)
a = sorted(numbers)
new_arr = []
for k in range(len(a)):
ind = numbers.index(a[k])
new_arr.append(arr[ind])
return new_arr
def line_reg(header1):
ima_pix2sec = float(header1['PIXSCALE_NEW'])
nx = int(header1['NAXIS1'])
ny = int(header1['NAXIS2'])
scale = int(round(nx/8.*ima_pix2sec,-1))
x2 = nx*9.8/10.
x1 = x2 - scale/ima_pix2sec
y1 = ny/7.
y2 = y1
return x1,y1,x2,y2,scale
# Define new colormap for residuals
def discrete_cmap(N=8):
# define individual colors as hex values
cpool = [ '#000000', '#00EE00', '#0000EE', '#00EEEE', '#EE0000','#FFFF00', '#EE00EE', '#FFFFFF']
cmap_i8 = colors.ListedColormap(cpool[0:N], 'i8')
cm.register_cmap(cmap=cmap_i8)
return cmap_i8
def define_scale_bar_length(x_extent,pix2sec):
scale_bar = round((x_extent * pix2sec) / 6.,0)
return int(5. * round(float(scale_bar)/5.)) # Length of the bar in arcsec
def crea_scale_bar(ax, x0, x1, y0, y1, pix2sec):
offset_x_factor = 0.98
offset_y_factor = 0.1
x_extent = x1 - x0
scale_bar_length = define_scale_bar_length(x_extent, pix2sec) / 2. #### divide by 2 !!!
xc = fabs(x1)-scale_bar_length/pix2sec - (1.-offset_x_factor)*(x1-x0)
yc = fabs(y0) + (y1-y0)* offset_y_factor
ax.errorbar(xc, yc, xerr=scale_bar_length/pix2sec,color='black',capsize=1,c='black')
ax.text(xc, yc, str(int(scale_bar_length*2.))+'\"', color='black',fontsize=fsize+1, horizontalalignment='center', verticalalignment='bottom')
# -----------------------------------------------------------------
| mit | 437,920,909,444,803,900 | 30.462051 | 198 | 0.509819 | false | 3.697074 | false | false | false |
arbuz001/sms-tools | workspace/A6/A6Part1.py | 1 | 5633 | import os
import sys
import numpy as np
import math
from scipy.signal import get_window
import matplotlib.pyplot as plt
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../software/models/'))
import utilFunctions as UF
import harmonicModel as HM
import sineModel as SM
import stft
import dftModel as DFT
eps = np.finfo(float).eps
"""
A6Part1 - Estimate fundamental frequency in polyphonic audio signal
Set the analysis parameters used within the function estimateF0() to obtain a good estimate of the
fundamental frequency (f0) corresponding to one melody within a complex audio signal. The signal
is a cello recording cello-double-2.wav, in which two strings are played simultaneously. One string
plays a constant drone while the other string plays a simple melody. You have to choose the analysis
parameter values such that only the f0 frequency of the simple melody is tracked.
The input argument to the function is the wav file name including the path (inputFile). The function
returns a numpy array of the f0 frequency values for each audio frame. For this question we take
hopSize (H) = 256 samples.
estimateF0() calls f0Detection() function of the harmonicModel.py, which uses the two way mismatch
algorithm for f0 estimation.
estimateF0() also plots the f0 contour on top of the spectrogram of the audio signal for you to
visually analyse the performance of your chosen values for the analysis parameters. In this question
we will only focus on the time segment between 0.5 and 4 seconds. So, your analysis parameter values
should produce a good f0 contour in this time region.
In addition to plotting the f0 contour on the spectrogram, this function also synthesizes the f0
contour. You can also evaluate the performance of your chosen analysis parameter values by listening
to this synthesized wav file named 'synthF0Contour.wav'
Since there can be numerous combinations of the optimal analysis parameter values, the evaluation is
done solely on the basis of the output f0 sequence. Note that only the segment of the f0 contour
between time 0.5 to 4 seconds is used to evaluate the performance of f0 estimation.
Your assignment will be tested only on inputFile = '../../sounds/cello-double-2.wav'. So choose the
analysis parameters using which the function estimates the f0 frequency contour corresponding to the
string playing simple melody and not the drone. There is no separate test case for this question.
You can keep working with the wav file mentioned above and when you think the performance is
satisfactory you can submit the assignment. The plots can help you achieve a good performance.
Be cautious while choosing the window size. Window size should be large enough to resolve the spectral
peaks and small enough to preserve the note transitions. Very large window sizes may smear the f0
contour at note transitions.
Depending on the parameters you choose and the capabilities of the hardware you use, the function
might take a while to run (even half a minute in some cases). For this part of the assignment please
refrain from posting your analysis parameters on the discussion forum.
"""
def estimateF0(inputFile = '../../sounds/cello-double-2.wav'):
"""
Function to estimate fundamental frequency (f0) in an audio signal. This function also plots the
f0 contour on the spectrogram and synthesize the f0 contour.
Input:
inputFile (string): wav file including the path
Output:
f0 (numpy array): array of the estimated fundamental frequency (f0) values
"""
### Change these analysis parameter values marked as XX
window = 'blackman'
M = 6096
N = 4096*4
f0et = 5.0
t = -60
minf0 = 40
maxf0 = 215
### Do not modify the code below
H = 256 #fix hop size
fs, x = UF.wavread(inputFile) #reading inputFile
w = get_window(window, M) #obtaining analysis window
### Method 1
f0 = HM.f0Detection(x, fs, w, N, H, t, minf0, maxf0, f0et) #estimating F0
startFrame = np.floor(0.5*fs/H)
endFrame = np.ceil(4.0*fs/H)
f0[:startFrame] = 0
f0[endFrame:] = 0
y = UF.sinewaveSynth(f0, 0.8, H, fs)
UF.wavwrite(y, fs, 'synthF0Contour.wav')
## Code for plotting the f0 contour on top of the spectrogram
# frequency range to plot
maxplotfreq = 500.0
fontSize = 16
plot = 1
fig = plt.figure()
ax = fig.add_subplot(111)
mX, pX = stft.stftAnal(x, fs, w, N, H) #using same params as used for analysis
mX = np.transpose(mX[:,:int(N*(maxplotfreq/fs))+1])
timeStamps = np.arange(mX.shape[1])*H/float(fs)
binFreqs = np.arange(mX.shape[0])*fs/float(N)
plt.pcolormesh(timeStamps, binFreqs, mX)
plt.plot(timeStamps, f0, color = 'k', linewidth=1.5)
plt.plot([0.5, 0.5], [0, maxplotfreq], color = 'b', linewidth=1.5)
plt.plot([4.0, 4.0], [0, maxplotfreq], color = 'b', linewidth=1.5)
plt.autoscale(tight=True)
plt.ylabel('Frequency (Hz)', fontsize = fontSize)
plt.xlabel('Time (s)', fontsize = fontSize)
plt.legend(('f0',))
xLim = ax.get_xlim()
yLim = ax.get_ylim()
ax.set_aspect((xLim[1]-xLim[0])/(2.0*(yLim[1]-yLim[0])))
if plot == 1: #save the plot too!
plt.autoscale(tight=True)
plt.show()
else:
fig.tight_layout()
fig.savefig('f0_over_Spectrogram.png', dpi=150, bbox_inches='tight')
return f0
| agpl-3.0 | 1,876,600,063,382,683,600 | 41.674242 | 103 | 0.693591 | false | 3.599361 | false | false | false |
aliparsai/LittleDarwin | littledarwin/JavaIO.py | 1 | 6794 | import fnmatch
import io
import os
import shutil
from typing import Dict, List
class JavaIO(object):
"""
"""
def __init__(self, verbose=False):
self.verbose = False
self.sourceDirectory = None
self.targetDirectory = None
self.fileList = list()
def filterFiles(self, mode="blacklist", filterList=None):
"""
:param mode:
:type mode:
:param filterList:
:type filterList:
:return:
:rtype:
"""
if filterList is None:
return
assert isinstance(filterList, list)
assert mode == "blacklist" or mode == "whitelist"
alteredList = list()
packageList = list()
cuList = list()
for statement in filterList:
if '\\' in statement or '/' in statement:
cuList.append(statement)
else:
packageList.append(statement)
for packageName in packageList:
if str(packageName).strip() == "":
continue
# we need to do this so that we avoid partial matching
dirList = list()
dirList.append("")
dirList.extend(packageName.strip().split("."))
dirList.append("")
dirName = os.sep.join(dirList)
alteredList.extend([x for x in self.fileList if dirName in os.sep.join(["", x, ""])])
for cuName in cuList:
alteredList.extend([x for x in self.fileList if cuName in x])
if mode == "whitelist":
self.fileList = list(set(alteredList))
elif mode == "blacklist":
self.fileList = list(set(self.fileList) - set(alteredList))
def listFiles(self, targetPath=None, buildPath=None, filterList=None, filterType="blacklist", desiredType="*.java"):
"""
:param targetPath:
:type targetPath:
:param buildPath:
:type buildPath:
:param filterList:
:type filterList:
:param filterType:
:type filterType:
:param desiredType:
:type desiredType:
"""
# print targetPath, desiredType
self.sourceDirectory = targetPath
self.targetDirectory = os.path.abspath(os.path.join(buildPath, "LittleDarwinResults"))
for root, dirnames, filenames in os.walk(self.sourceDirectory):
for filename in fnmatch.filter(filenames, desiredType):
self.fileList.append(os.path.join(root, filename))
self.filterFiles(mode=filterType, filterList=filterList)
if not os.path.exists(self.targetDirectory):
os.makedirs(self.targetDirectory)
def getFileContent(self, filePath=None):
"""
:param filePath:
:type filePath:
:return:
:rtype:
"""
with io.open(filePath, mode='r', errors='replace') as contentFile:
file_data = contentFile.read()
normalizedData = str(file_data)
return normalizedData
def getAggregateComplexityReport(self, mutantDensityPerMethod: Dict[str, int],
cyclomaticComplexityPerMethod: Dict[str, int],
linesOfCodePerMethod: Dict[str, int]) -> Dict[str, List[int]]:
"""
:param mutantDensityPerMethod:
:type mutantDensityPerMethod:
:param cyclomaticComplexityPerMethod:
:type cyclomaticComplexityPerMethod:
:param linesOfCodePerMethod:
:type linesOfCodePerMethod:
:return:
:rtype:
"""
aggregateReport = dict()
methodList = set(mutantDensityPerMethod.keys())
methodList.update(cyclomaticComplexityPerMethod.keys())
methodList.update(linesOfCodePerMethod.keys())
for method in methodList:
aggregateReport[method] = [mutantDensityPerMethod.get(method, 0),
cyclomaticComplexityPerMethod.get(method, 1),
linesOfCodePerMethod.get(method, 0)]
return aggregateReport
def generateNewFile(self, originalFile=None, fileData=None, mutantsPerLine=None, densityReport=None, aggregateComplexity=None):
"""
:param originalFile:
:type originalFile:
:param fileData:
:type fileData:
:param mutantsPerLine:
:type mutantsPerLine:
:param densityReport:
:type densityReport:
:param aggregateComplexity:
:type aggregateComplexity:
:return:
:rtype:
"""
originalFileRoot, originalFileName = os.path.split(originalFile)
targetDir = os.path.join(self.targetDirectory, os.path.relpath(originalFileRoot, self.sourceDirectory),
originalFileName)
if not os.path.exists(targetDir):
os.makedirs(targetDir)
if not os.path.isfile(os.path.join(targetDir, "original.java")):
shutil.copyfile(originalFile, os.path.join(targetDir, "original.java"))
if mutantsPerLine is not None and densityReport is not None and aggregateComplexity is not None:
densityPerLineCSVFile = os.path.abspath(os.path.join(targetDir, "MutantDensityPerLine.csv"))
complexityPerMethodCSVFile = os.path.abspath(os.path.join(targetDir, "ComplexityPerMethod.csv"))
densityReportFile = os.path.abspath(os.path.join(targetDir, "aggregate.html"))
if not os.path.isfile(complexityPerMethodCSVFile) or not os.path.isfile(
densityPerLineCSVFile) or not os.path.isfile(densityReportFile):
with open(densityPerLineCSVFile, 'w') as densityFileHandle:
for key in sorted(mutantsPerLine.keys()):
densityFileHandle.write(str(key) + ',' + str(mutantsPerLine[key]) + '\n')
with open(complexityPerMethodCSVFile, 'w') as densityFileHandle:
for key in sorted(aggregateComplexity.keys()):
line = [str(key)]
line.extend([str(x) for x in aggregateComplexity[key]])
densityFileHandle.write(";".join(line) + '\n')
with open(densityReportFile, 'w') as densityFileHandle:
densityFileHandle.write(densityReport)
counter = 1
while os.path.isfile(os.path.join(targetDir, str(counter) + ".java")):
counter += 1
targetFile = os.path.abspath(os.path.join(targetDir, str(counter) + ".java"))
with open(targetFile, 'w') as contentFile:
contentFile.write(fileData)
if self.verbose:
print("--> generated file: ", targetFile)
return os.path.relpath(targetFile, self.targetDirectory)
| gpl-3.0 | -3,523,335,284,614,272,500 | 34.94709 | 131 | 0.59479 | false | 4.254227 | false | false | false |
jgillis/casadi | documentation/api-doc/extra/doxy2swigX.py | 1 | 5378 | #
# This file is part of CasADi.
#
# CasADi -- A symbolic framework for dynamic optimization.
# Copyright (C) 2010 by Joel Andersson, Moritz Diehl, K.U.Leuven. All rights reserved.
#
# CasADi is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# CasADi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with CasADi; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
#
from doxy2swig import *
import sys
import ipdb
import texttable
def astext(node,whitespace=False,escape=True):
r = []
if node.nodeType == node.TEXT_NODE:
d = node.data
if escape:
d = d.replace('\\', r'\\\\')
d = d.replace('"', r'\"')
if not(whitespace):
d = d.strip()
r.append(d)
elif hasattr(node,'childNodes'):
for node in node.childNodes:
r.append(astext(node,whitespace=whitespace,escape=escape))
return (" ".join(r)).strip()
class Doxy2SWIG_X(Doxy2SWIG):
def clean_pieces(self, pieces):
"""Cleans the list of strings given as `pieces`. It replaces
multiple newlines by a maximum of 2 and returns a new list.
It also wraps the paragraphs nicely.
"""
ret = []
count = 0
for i in pieces:
if i == '\n':
count = count + 1
else:
if i == '";':
if count:
ret.append('\n')
elif count > 2:
ret.append('\n\n')
elif count:
ret.append('\n'*count)
count = 0
ret.append(i)
_data = "".join(ret)
ret = []
for i in _data.split('\n\n'):
if i == 'Parameters:' or i == 'Exceptions:':
ret.extend([i, '\n-----------', '\n\n'])
elif i.find('// File:') > -1: # leave comments alone.
ret.extend([i, '\n'])
else:
if i.strip().startswith(">"):
_tmp = i.strip()
else:
_tmp = textwrap.fill(i.strip(), 80-4, break_long_words=False)
_tmp = self.lead_spc.sub(r'\1"\2', _tmp)
ret.extend([_tmp, '\n\n'])
return ret
def write(self, fname):
o = my_open_write(fname)
if self.multi:
for p in self.pieces:
o.write(p.encode("ascii","ignore"))
else:
for p in self.clean_pieces(self.pieces):
o.write(p.encode("ascii","ignore"))
o.close()
def do_doxygenindex(self, node):
self.multi = 1
comps = node.getElementsByTagName('compound')
for c in comps:
refid = c.attributes['refid'].value
fname = refid + '.xml'
if not os.path.exists(fname):
fname = os.path.join(self.my_dir, fname)
if not self.quiet:
print "parsing file: %s"%fname
p = Doxy2SWIG_X(fname, self.include_function_definition, self.quiet)
p.generate()
self.pieces.extend(self.clean_pieces(p.pieces))
def do_table(self, node):
caption = node.getElementsByTagName("caption")
if len(caption)==1:
self.add_text(">" + astext(caption[0]).encode("ascii","ignore")+"\n")
rows = []
for (i,row) in enumerate(node.getElementsByTagName("row")):
rows.append([])
for (j,entry) in enumerate(row.getElementsByTagName("entry")):
rows[i].append(astext(entry,escape=False).encode("ascii","ignore"))
table = texttable.Texttable(max_width=80-4)
table.add_rows(rows)
d = table.draw()
d = d.replace('\\', r'\\\\')
d = d.replace('"', r'\"')
self.add_text(d)
self.add_text("\n")
#print table.draw()
#for row in rows:
# self.add_text("*")
# r = " "
# for col in row:
# r+=col+" | "
# self.add_text(col[:-1]+"\n")
#self.generic_parse(node, pad=1)
def convert(input, output, include_function_definition=True, quiet=False):
p = Doxy2SWIG_X(input, include_function_definition, quiet)
p.generate()
p.write(output)
def main():
usage = __doc__
parser = optparse.OptionParser(usage)
parser.add_option("-n", '--no-function-definition',
action='store_true',
default=False,
dest='func_def',
help='do not include doxygen function definitions')
parser.add_option("-q", '--quiet',
action='store_true',
default=False,
dest='quiet',
help='be quiet and minimize output')
options, args = parser.parse_args()
if len(args) != 2:
parser.error("error: no input and output specified")
convert(args[0], args[1], False, options.quiet)
if __name__ == '__main__':
main()
| lgpl-3.0 | -6,184,439,807,509,266,000 | 31.203593 | 90 | 0.544812 | false | 3.714088 | false | false | false |
DxCx/plugin.video.9anime | resources/lib/ui/js2py/constructors/jsarraybuffer.py | 22 | 1059 | # this is based on jsarray.py
# todo check everything :)
from ..base import *
try:
import numpy
except:
pass
@Js
def ArrayBuffer():
a = arguments[0]
if isinstance(a, PyJsNumber):
length = a.to_uint32()
if length!=a.value:
raise MakeError('RangeError', 'Invalid array length')
temp = Js(bytearray([0]*length))
return temp
return Js(bytearray([0]))
ArrayBuffer.create = ArrayBuffer
ArrayBuffer.own['length']['value'] = Js(None)
ArrayBuffer.define_own_property('prototype', {'value': ArrayBufferPrototype,
'enumerable': False,
'writable': False,
'configurable': False})
ArrayBufferPrototype.define_own_property('constructor', {'value': ArrayBuffer,
'enumerable': False,
'writable': False,
'configurable': True})
| gpl-3.0 | -1,120,671,688,988,825,900 | 30.147059 | 78 | 0.494806 | false | 4.880184 | false | false | false |
jungle85gopy/pyPractice | class_demo/class_demo.py | 1 | 1874 | #!/usr/bin/env python
# coding=utf-8
def func1(lens):
for i in xrange(lens):
print(i)
def p1():
print(" ... 1")
def p2():
print(" ... 2")
def pxx():
print(" ... pxx")
# test func
def test_func():
print('\n------- test func ------')
print("... style 1 ...")
func1(3)
print("... style 2 ...")
f = func1
f(4)
# not use: {1:'p1', ...}
func_dict = {1: p1, 2:p2, 3:pxx}
for x in func_dict.iterkeys():
print('--dict %s' % x)
func_dict[x]()
# object oriented programming
# advanced programming
class animal:
def __init__(self, name):
self.name = name
print(self.name)
def say_name(self):
print(self.name)
@staticmethod
def say_no_name():
print('static method xxx')
@staticmethod
def print_help():
print('this is a class for gen animal')
def test_animal():
print('\n--- test animal ---')
a1 = animal(u'cat')
a1.say_name()
a1.say_no_name()
animal.print_help()
animal.say_no_name()
class A():
def __init__(self, name):
self.name = name
print("constructor A was called!")
print("name is %s" % self.name)
class B(A):
def __init__(self, name, age):
A.__init__(self, name)
self.age = age
print("constructor B was called!")
print("age is %d" % self.age)
class C(B):
def __init__(self, name, age):
B.__init__(self, name, age)
print("constructor C was called!")
def test_contructor():
print('\n------- test constructor ------')
c = C("ccc", 23)
print("c's name is %s" % c.name)
print("c's age is %s" % c.age)
# if use var '__name', then can't use c.__name outside the class
# also for method name, eg:
# def __xxx()
test_func()
test_animal()
test_contructor()
| gpl-2.0 | -6,402,338,555,262,592,000 | 16.514019 | 68 | 0.51334 | false | 3.181664 | true | false | false |
ZeitOnline/zeit.content.image | src/zeit/content/image/metadata.py | 1 | 5422 | from zeit.cms.content.interfaces import WRITEABLE_ALWAYS
import grokcore.component as grok
import lxml.objectify
import zeit.cms.content.dav
import zeit.content.image.interfaces
import zeit.content.image.image
import zope.component
import zope.interface
import zope.schema
class ImageMetadata(object):
zope.interface.implements(zeit.content.image.interfaces.IImageMetadata)
zeit.cms.content.dav.mapProperties(
zeit.content.image.interfaces.IImageMetadata,
zeit.content.image.interfaces.IMAGE_NAMESPACE,
('alt', 'caption', 'links_to', 'nofollow', 'origin'))
zeit.cms.content.dav.mapProperties(
zeit.content.image.interfaces.IImageMetadata,
'http://namespaces.zeit.de/CMS/document',
('title',))
zeit.cms.content.dav.mapProperties(
zeit.content.image.interfaces.IImageMetadata,
zeit.content.image.interfaces.IMAGE_NAMESPACE,
('external_id',), writeable=WRITEABLE_ALWAYS)
# XXX Since ZON-4106 there should only be one copyright and the api has
# been adjusted to 'copyright'. For bw-compat reasons the DAV property is
# still called 'copyrights'
_copyrights = zeit.cms.content.dav.DAVProperty(
zeit.content.image.interfaces.IImageMetadata['copyright'],
'http://namespaces.zeit.de/CMS/document', 'copyrights',
use_default=True)
@property
def copyright(self):
value = self._copyrights
if not value:
return
# Migration for exactly one copyright (ZON-4106)
if type(value[0]) is tuple:
value = value[0]
# Migration for nofollow (VIV-104)
if len(value) == 2:
value = (value[0], None, None, value[1], False)
# Migration for companies (ZON-3174)
if len(value) == 3:
value = (value[0], None, None, value[1], value[2])
return value
@copyright.setter
def copyright(self, value):
self._copyrights = value
zeit.cms.content.dav.mapProperties(
zeit.content.image.interfaces.IImageMetadata,
'http://namespaces.zeit.de/CMS/meta',
('acquire_metadata',))
def __init__(self, context):
self.context = context
@zope.interface.implementer(zeit.connector.interfaces.IWebDAVProperties)
@zope.component.adapter(ImageMetadata)
def metadata_webdav_properties(context):
return zeit.connector.interfaces.IWebDAVProperties(
context.context)
@grok.implementer(zeit.content.image.interfaces.IImageMetadata)
@grok.adapter(zeit.content.image.interfaces.IImage)
def metadata_for_image(image):
metadata = ImageMetadata(image)
# Be sure to get the image in the repository
parent = None
if image.uniqueId:
image_in_repository = parent = zeit.cms.interfaces.ICMSContent(
image.uniqueId, None)
if image_in_repository is not None:
parent = image_in_repository.__parent__
if zeit.content.image.interfaces.IImageGroup.providedBy(parent):
# The image *is* in an image group.
if metadata.acquire_metadata is None or metadata.acquire_metadata:
group_metadata = zeit.content.image.interfaces.IImageMetadata(
parent)
if zeit.cms.workingcopy.interfaces.ILocalContent.providedBy(image):
for name, field in zope.schema.getFieldsInOrder(
zeit.content.image.interfaces.IImageMetadata):
value = getattr(group_metadata, name, None)
setattr(metadata, name, value)
metadata.acquire_metadata = False
else:
# For repository content return the metadata of the group.
metadata = group_metadata
return metadata
@grok.adapter(zeit.content.image.image.TemporaryImage)
@grok.implementer(zeit.content.image.interfaces.IImageMetadata)
def metadata_for_synthetic(context):
return zeit.content.image.interfaces.IImageMetadata(context.__parent__)
class XMLReferenceUpdater(zeit.cms.content.xmlsupport.XMLReferenceUpdater):
target_iface = zeit.content.image.interfaces.IImageMetadata
def update_with_context(self, entry, context):
def set_attribute(name, value):
if value:
entry.set(name, value)
else:
entry.attrib.pop(name, None)
set_attribute('origin', context.origin)
set_attribute('title', context.title)
set_attribute('alt', context.alt)
# XXX This is really ugly: XMLReference type 'related' uses href for
# the uniqueId, but type 'image' uses 'src' or 'base-id' instead, and
# reuses 'href' for the link information. And since XMLReferenceUpdater
# is called for all types of reference, we need to handle both ways.
if entry.get('src') or entry.get('base-id'):
set_attribute('href', context.links_to)
if context.nofollow:
set_attribute('rel', 'nofollow')
entry['bu'] = context.caption or None
for child in entry.iterchildren('copyright'):
entry.remove(child)
if context.copyright is None:
return
text, company, freetext, link, nofollow = context.copyright
node = lxml.objectify.E.copyright(text)
if link:
node.set('link', link)
if nofollow:
node.set('rel', 'nofollow')
entry.append(node)
| bsd-3-clause | -102,362,284,878,301,070 | 35.884354 | 79 | 0.655662 | false | 3.909156 | false | false | false |
azumimuo/family-xbmc-addon | plugin.video.hive/default.py | 1 | 36889 | '''
Hive for XBMC Plugin
Copyright (C) 2013-2014 ddurdle
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import xbmc, xbmcgui, xbmcplugin, xbmcaddon
import sys
import urllib
import cgi
import re
import xbmcvfs
# global variables
PLUGIN_NAME = 'hive'
#helper methods
def log(msg, err=False):
if err:
xbmc.log(addon.getAddonInfo('name') + ': ' + msg, xbmc.LOGERROR)
else:
xbmc.log(addon.getAddonInfo('name') + ': ' + msg, xbmc.LOGDEBUG)
def parse_query(query):
queries = cgi.parse_qs(query)
q = {}
for key, value in queries.items():
q[key] = value[0]
q['mode'] = q.get('mode', 'main')
return q
def addMediaFile(service, package):
listitem = xbmcgui.ListItem(package.file.displayTitle(), iconImage=package.file.thumbnail,
thumbnailImage=package.file.thumbnail)
if package.file.type == package.file.AUDIO:
if package.file.hasMeta:
infolabels = decode_dict({ 'title' : package.file.displayTitle(), 'tracknumber' : package.file.trackNumber, 'artist': package.file.artist, 'album': package.file.album,'genre': package.file.genre,'premiered': package.file.releaseDate, 'date' : package.file.date, 'size' : package.file.size})
else:
infolabels = decode_dict({ 'title' : package.file.displayTitle(), 'date' : package.file.date, 'size' : package.file.size })
listitem.setInfo('Music', infolabels)
playbackURL = '?mode=audio'
elif package.file.type == package.file.VIDEO:
infolabels = decode_dict({ 'title' : package.file.displayTitle() , 'plot' : package.file.plot, 'date' : package.file.date, 'size' : package.file.size })
listitem.setInfo('Video', infolabels)
playbackURL = '?mode=video'
elif package.file.type == package.file.PICTURE:
infolabels = decode_dict({ 'title' : package.file.displayTitle() , 'plot' : package.file.plot, 'date' : package.file.date, 'size' : package.file.size })
listitem.setInfo('Pictures', infolabels)
playbackURL = '?mode=photo'
else:
infolabels = decode_dict({ 'title' : package.file.displayTitle() , 'plot' : package.file.plot })
listitem.setInfo('Video', infolabels)
playbackURL = '?mode=video'
listitem.setProperty('IsPlayable', 'true')
listitem.setProperty('fanart_image', package.file.fanart)
cm=[]
try:
url = package.getMediaURL()
cleanURL = re.sub('---', '', url)
cleanURL = re.sub('&', '---', cleanURL)
except:
cleanURL = ''
# url = PLUGIN_URL+'?mode=streamurl&title='+package.file.title+'&url='+cleanURL
url = PLUGIN_URL+playbackURL+'&instance='+str(service.instanceName)+'&title='+package.file.title+'&filename='+package.file.id
if package.file.isEncoded == False:
cm.append(( addon.getLocalizedString(30086), 'XBMC.RunPlugin('+PLUGIN_URL+'?mode=requestencoding&instance='+str(service.instanceName)+'&title='+package.file.title+'&filename='+package.file.id+')', ))
cm.append(( addon.getLocalizedString(30042), 'XBMC.RunPlugin('+PLUGIN_URL+'?mode=buildstrm&username='+str(service.authorization.username)+'&title='+package.file.title+'&filename='+package.file.id+')', ))
# cm.append(( addon.getLocalizedString(30046), 'XBMC.PlayMedia('+playbackURL+'&title='+ package.file.title + '&directory='+ package.folder.id + '&filename='+ package.file.id +'&playback=0)', ))
# cm.append(( addon.getLocalizedString(30047), 'XBMC.PlayMedia('+playbackURL+'&title='+ package.file.title + '&directory='+ package.folder.id + '&filename='+ package.file.id +'&playback=1)', ))
# cm.append(( addon.getLocalizedString(30048), 'XBMC.PlayMedia('+playbackURL+'&title='+ package.file.title + '&directory='+ package.folder.id + '&filename='+ package.file.id +'&playback=2)', ))
#cm.append(( addon.getLocalizedString(30032), 'XBMC.RunPlugin('+PLUGIN_URL+'?mode=download&title='+package.file.title+'&filename='+package.file.id+')', ))
# listitem.addContextMenuItems( commands )
# if cm:
listitem.addContextMenuItems(cm, False)
xbmcplugin.addDirectoryItem(plugin_handle, url, listitem,
isFolder=False, totalItems=0)
def addDirectory(service, folder):
if folder.id == 'SAVED-SEARCH':
listitem = xbmcgui.ListItem('Search - ' + decode(folder.displayTitle()), iconImage='', thumbnailImage='')
else:
listitem = xbmcgui.ListItem(decode(folder.displayTitle()), iconImage=decode(folder.thumb), thumbnailImage=decode(folder.thumb))
fanart = addon.getAddonInfo('path') + '/fanart.jpg'
if folder.id != '':
cm=[]
cm.append(( addon.getLocalizedString(30042), 'XBMC.RunPlugin('+PLUGIN_URL+'?mode=buildstrm&title='+folder.title+'&username='+str(service.authorization.username)+'&folderID='+str(folder.id)+')', ))
cm.append(( addon.getLocalizedString(30081), 'XBMC.RunPlugin('+PLUGIN_URL+'?mode=createbookmark&title='+folder.title+'&instance='+str(service.instanceName)+'&folderID='+str(folder.id)+')', ))
listitem.addContextMenuItems(cm, False)
listitem.setProperty('fanart_image', fanart)
if folder.id == 'SAVED-SEARCH':
xbmcplugin.addDirectoryItem(plugin_handle, PLUGIN_URL+'?mode=search&instance='+str(service.instanceName)+'&criteria='+folder.title, listitem,
isFolder=True, totalItems=0)
else:
xbmcplugin.addDirectoryItem(plugin_handle, service.getDirectoryCall(folder), listitem,
isFolder=True, totalItems=0)
def addMenu(url,title):
listitem = xbmcgui.ListItem(decode(title), iconImage='', thumbnailImage='')
fanart = addon.getAddonInfo('path') + '/fanart.jpg'
listitem.setProperty('fanart_image', fanart)
xbmcplugin.addDirectoryItem(plugin_handle, url, listitem,
isFolder=True, totalItems=0)
#http://stackoverflow.com/questions/1208916/decoding-html-entities-with-python/1208931#1208931
def _callback(matches):
id = matches.group(1)
try:
return unichr(int(id))
except:
return id
def decode(data):
return re.sub("&#(\d+)(;|(?=\s))", _callback, data).strip()
def decode_dict(data):
for k, v in data.items():
if type(v) is str or type(v) is unicode:
data[k] = decode(v)
return data
def numberOfAccounts(accountType):
count = 1
max_count = int(addon.getSetting(accountType+'_numaccounts'))
actualCount = 0
while True:
try:
if addon.getSetting(accountType+str(count)+'_username') != '':
actualCount = actualCount + 1
except:
break
if count == max_count:
break
count = count + 1
return actualCount
#global variables
PLUGIN_URL = sys.argv[0]
plugin_handle = int(sys.argv[1])
plugin_queries = parse_query(sys.argv[2][1:])
addon = xbmcaddon.Addon(id='plugin.video.hive')
addon_dir = xbmc.translatePath( addon.getAddonInfo('path') )
import os
sys.path.append(os.path.join( addon_dir, 'resources', 'lib' ) )
import hive
import cloudservice
import folder
import file
import package
import mediaurl
import authorization
#from resources.lib import gPlayer
#from resources.lib import tvWindow
#debugging
try:
remote_debugger = addon.getSetting('remote_debugger')
remote_debugger_host = addon.getSetting('remote_debugger_host')
# append pydev remote debugger
if remote_debugger == 'true':
# Make pydev debugger works for auto reload.
# Note pydevd module need to be copied in XBMC\system\python\Lib\pysrc
import pysrc.pydevd as pydevd
# stdoutToServer and stderrToServer redirect stdout and stderr to eclipse console
pydevd.settrace(remote_debugger_host, stdoutToServer=True, stderrToServer=True)
except ImportError:
log(addon.getLocalizedString(30016), True)
sys.exit(1)
except :
pass
# retrieve settings
user_agent = addon.getSetting('user_agent')
mode = plugin_queries['mode']
# make mode case-insensitive
mode = mode.lower()
log('plugin url: ' + PLUGIN_URL)
log('plugin queries: ' + str(plugin_queries))
log('plugin handle: ' + str(plugin_handle))
instanceName = ''
try:
instanceName = (plugin_queries['instance']).lower()
except:
pass
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_LABEL)
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_DATE)
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_SIZE)
#* utilities *
#clear the authorization token(s) from the identified instanceName or all instances
if mode == 'clearauth':
if instanceName != '':
try:
addon.setSetting(instanceName + '_token', '')
xbmcgui.Dialog().ok(addon.getLocalizedString(30000), addon.getLocalizedString(30023))
except:
#error: instance doesn't exist
pass
# clear all accounts
else:
count = 1
max_count = int(addon.getSetting(PLUGIN_NAME+'_numaccounts'))
while True:
instanceName = PLUGIN_NAME+str(count)
try:
addon.setSetting(instanceName + '_token', '')
except:
break
if count == max_count:
break
count = count + 1
xbmcgui.Dialog().ok(addon.getLocalizedString(30000), addon.getLocalizedString(30023))
xbmcplugin.endOfDirectory(plugin_handle)
#create strm files
elif mode == 'buildstrm':
silent = 0
try:
silent = int(addon.getSetting('strm_silent'))
except:
silent = 0
try:
silent = int(plugin_queries['silent'])
except:
pass
path = ''
try:
path = int(plugin_queries['path'])
except:
pass
try:
path = str(addon.getSetting('strm_path'))
except:
pass
if path == '':
path = xbmcgui.Dialog().browse(0,addon.getLocalizedString(30026), 'files','',False,False,'')
addon.setSetting('strm_path', path)
if path != '':
if silent == 0:
returnPrompt = xbmcgui.Dialog().yesno(addon.getLocalizedString(30000), addon.getLocalizedString(30027) + '\n'+path + '?')
else:
returnPrompt = True
if path != '' and returnPrompt:
if silent != 2:
try:
pDialog = xbmcgui.DialogProgressBG()
pDialog.create(addon.getLocalizedString(30000), 'Building STRMs...')
except:
pass
try:
url = plugin_queries['streamurl']
title = plugin_queries['title']
url = re.sub('---', '&', url)
except:
url=''
if url != '':
filename = path + '/' + title+'.strm'
strmFile = xbmcvfs.File(filename, "w")
strmFile.write(url+'\n')
strmFile.close()
else:
try:
folderID = plugin_queries['folderID']
title = plugin_queries['title']
except:
folderID = ''
try:
filename = plugin_queries['filename']
title = plugin_queries['title']
except:
filename = ''
try:
invokedUsername = plugin_queries['username']
except:
invokedUsername = ''
if folderID != '':
count = 1
max_count = int(addon.getSetting(PLUGIN_NAME+'_numaccounts'))
loop = True
while loop:
instanceName = PLUGIN_NAME+str(count)
try:
username = addon.getSetting(instanceName+'_username')
if username == invokedUsername:
#let's log in
service = hive.hive(PLUGIN_URL,addon,instanceName, user_agent)
loop = False
except:
break
if count == max_count:
#fallback on first defined account
service = hive.hive(PLUGIN_URL,addon,PLUGIN_NAME+'1', user_agent)
break
count = count + 1
service.buildSTRM(path + '/'+title,folderID)
elif filename != '':
url = PLUGIN_URL+'?mode=video&title='+title+'&filename='+filename + '&username='+invokedUsername
# filename = xbmc.translatePath(os.path.join(path, title+'.strm'))
filename = path + '/' + title+'.strm'
strmFile = xbmcvfs.File(filename, "w")
strmFile.write(url+'\n')
strmFile.close()
else:
count = 1
max_count = int(addon.getSetting(PLUGIN_NAME+'_numaccounts'))
while True:
instanceName = PLUGIN_NAME+str(count)
try:
username = addon.getSetting(instanceName+'_username')
except:
username = ''
if username != '':
service = hive.hive(PLUGIN_URL,addon,instanceName, user_agent)
service.buildSTRM(path + '/'+username)
if count == max_count:
#fallback on first defined account
service = hive.hive(PLUGIN_URL,addon,PLUGIN_NAME+'1', user_agent)
break
count = count + 1
if silent != 2:
try:
pDialog.update(100)
except:
pass
if silent == 0:
xbmcgui.Dialog().ok(addon.getLocalizedString(30000), addon.getLocalizedString(30028))
xbmcplugin.endOfDirectory(plugin_handle)
#create strm files
elif mode == 'createbookmark':
try:
folderID = plugin_queries['folderID']
title = plugin_queries['title']
instanceName = plugin_queries['instance']
except:
folderID = ''
if folderID != '':
try:
username = addon.getSetting(instanceName+'_username')
except:
username = ''
if username != '':
service = hive.hive(PLUGIN_URL,addon,instanceName, user_agent)
newTitle = ''
try:
dialog = xbmcgui.Dialog()
newTitle = dialog.input('Enter a name for the bookmark', title, type=xbmcgui.INPUT_ALPHANUM)
except:
newTitle = title
if newTitle == '':
newTitle = title
service.createBookmark(folderID,newTitle)
xbmcplugin.endOfDirectory(plugin_handle)
#create strm files
elif mode == 'createsearch':
searchText = ''
try:
searchText = addon.getSetting('criteria')
except:
searchText = ''
if searchText == '':
try:
dialog = xbmcgui.Dialog()
searchText = dialog.input('Enter search string', type=xbmcgui.INPUT_ALPHANUM)
except:
xbmcgui.Dialog().ok(addon.getLocalizedString(30000), addon.getLocalizedString(30100))
searchText = 'life'
if searchText != '':
instanceName = ''
try:
instanceName = (plugin_queries['instance']).lower()
except:
pass
numberOfAccounts = numberOfAccounts(PLUGIN_NAME)
# show list of services
if numberOfAccounts > 1 and instanceName == '':
count = 1
max_count = int(addon.getSetting(PLUGIN_NAME+'_numaccounts'))
while True:
instanceName = PLUGIN_NAME+str(count)
try:
username = addon.getSetting(instanceName+'_username')
if username != '':
addMenu(PLUGIN_URL+'?mode=main&instance='+instanceName,username)
except:
break
if count == max_count:
#fallback on first defined account
service = hive.hive(PLUGIN_URL,addon,PLUGIN_NAME+'1', user_agent)
break
count = count + 1
else:
# show index of accounts
if instanceName == '' and numberOfAccounts == 1:
count = 1
max_count = int(addon.getSetting(PLUGIN_NAME+'_numaccounts'))
loop = True
while loop:
instanceName = PLUGIN_NAME+str(count)
try:
username = addon.getSetting(instanceName+'_username')
if username != '':
#let's log in
service = hive.hive(PLUGIN_URL,addon,instanceName, user_agent)
loop = False
except:
break
if count == max_count:
#fallback on first defined account
service = hive.hive(PLUGIN_URL,addon,PLUGIN_NAME+'1', user_agent)
break
count = count + 1
# no accounts defined
elif numberOfAccounts == 0:
#legacy account conversion
try:
username = addon.getSetting('username')
if username != '':
addon.setSetting(PLUGIN_NAME+'1_username', username)
addon.setSetting(PLUGIN_NAME+'1_password', addon.getSetting('password'))
addon.setSetting(PLUGIN_NAME+'1_auth_token', addon.getSetting('auth_token'))
addon.setSetting(PLUGIN_NAME+'1_auth_session', addon.getSetting('auth_session'))
addon.setSetting('username', '')
addon.setSetting('password', '')
addon.setSetting('auth_token', '')
addon.setSetting('auth_session', '')
else:
xbmcgui.Dialog().ok(addon.getLocalizedString(30000), addon.getLocalizedString(30015))
log(addon.getLocalizedString(30015), True)
xbmcplugin.endOfDirectory(plugin_handle)
except :
xbmcgui.Dialog().ok(addon.getLocalizedString(30000), addon.getLocalizedString(30015))
log(addon.getLocalizedString(30015), True)
xbmcplugin.endOfDirectory(plugin_handle)
#let's log in
service = hive.hive(PLUGIN_URL,addon,instanceName, user_agent)
# show entries of a single account (such as folder)
elif instanceName != '':
service = hive.hive(PLUGIN_URL,addon,instanceName, user_agent)
try:
service
except NameError:
xbmcgui.Dialog().ok(addon.getLocalizedString(30000), addon.getLocalizedString(30051), addon.getLocalizedString(30052), addon.getLocalizedString(30053))
log(addon.getLocalizedString(30050)+ 'hive-login', True)
xbmcplugin.endOfDirectory(plugin_handle)
service.createSearch(searchText)
mediaItems = service.getSearchResults(searchText)
if mediaItems:
for item in mediaItems:
try:
if item.file is None:
addDirectory(service, item.folder)
else:
addMediaFile(service, item)
except:
addMediaFile(service, item)
service.updateAuthorization(addon)
xbmcplugin.endOfDirectory(plugin_handle)
numberOfAccounts = numberOfAccounts(PLUGIN_NAME)
try:
invokedUsername = plugin_queries['username']
except:
invokedUsername = ''
# show list of services
if numberOfAccounts > 1 and instanceName == '' and invokedUsername == '':
if mode == 'main':
mode = ''
count = 1
max_count = int(addon.getSetting(PLUGIN_NAME+'_numaccounts'))
while True:
instanceName = PLUGIN_NAME+str(count)
try:
username = addon.getSetting(instanceName+'_username')
if username != '':
addMenu(PLUGIN_URL+'?mode=main&instance='+instanceName,username)
try:
service
except:
service = hive.hive(PLUGIN_URL,addon,instanceName, user_agent)
except:
break
if count == max_count:
#fallback on first defined account
service = hive.hive(PLUGIN_URL,addon,PLUGIN_NAME+'1', user_agent)
break
count = count + 1
else:
# show index of accounts
if instanceName == '' and invokedUsername == '' and numberOfAccounts == 1:
count = 1
max_count = int(addon.getSetting(PLUGIN_NAME+'_numaccounts'))
loop = True
while loop:
instanceName = PLUGIN_NAME+str(count)
try:
username = addon.getSetting(instanceName+'_username')
if username != '':
#let's log in
service = hive.hive(PLUGIN_URL,addon,instanceName, user_agent)
loop = False
except:
break
if count == max_count:
#fallback on first defined account
service = hive.hive(PLUGIN_URL,addon,PLUGIN_NAME+'1', user_agent)
break
count = count + 1
# no accounts defined
elif numberOfAccounts == 0:
#legacy account conversion
try:
username = addon.getSetting('username')
if username != '':
addon.setSetting(PLUGIN_NAME+'1_username', username)
addon.setSetting(PLUGIN_NAME+'1_password', addon.getSetting('password'))
addon.setSetting(PLUGIN_NAME+'1_auth_token', addon.getSetting('auth_token'))
addon.setSetting(PLUGIN_NAME+'1_auth_session', addon.getSetting('auth_session'))
addon.setSetting('username', '')
addon.setSetting('password', '')
addon.setSetting('auth_token', '')
addon.setSetting('auth_session', '')
else:
xbmcgui.Dialog().ok(addon.getLocalizedString(30000), addon.getLocalizedString(30015))
log(addon.getLocalizedString(30015), True)
xbmcplugin.endOfDirectory(plugin_handle)
except :
xbmcgui.Dialog().ok(addon.getLocalizedString(30000), addon.getLocalizedString(30015))
log(addon.getLocalizedString(30015), True)
xbmcplugin.endOfDirectory(plugin_handle)
#let's log in
service = hive.hive(PLUGIN_URL,addon,instanceName, user_agent)
# show entries of a single account (such as folder)
elif instanceName != '':
service = hive.hive(PLUGIN_URL,addon,instanceName, user_agent)
elif invokedUsername != '':
count = 1
max_count = int(addon.getSetting(PLUGIN_NAME+'_numaccounts'))
loop = True
while loop:
instanceName = PLUGIN_NAME+str(count)
try:
username = addon.getSetting(instanceName+'_username')
if username == invokedUsername:
#let's log in
service = hive.hive(PLUGIN_URL,addon,instanceName, user_agent)
loop = False
except:
break
if count == max_count:
#fallback on first defined account
service = hive.hive(PLUGIN_URL,addon,PLUGIN_NAME+'1', user_agent)
break
count = count + 1
if mode == 'main':
addMenu(PLUGIN_URL+'?mode=options','<< '+addon.getLocalizedString(30043)+' >>')
addMenu(PLUGIN_URL+'?mode=search','<<SEARCH>>')
#dump a list of videos available to play
if mode == 'main' or mode == 'folder':
folderName=''
if (mode == 'folder'):
folderName = plugin_queries['directory']
else:
pass
try:
service
except NameError:
xbmcgui.Dialog().ok(addon.getLocalizedString(30000), addon.getLocalizedString(30051), addon.getLocalizedString(30052), addon.getLocalizedString(30053))
log(addon.getLocalizedString(30050)+ 'hive-login', True)
xbmcplugin.endOfDirectory(plugin_handle)
if folderName == '':
addMenu(PLUGIN_URL+'?mode=folder&instance='+instanceName+'&directory=FRIENDS','['+addon.getLocalizedString(30091)+']')
addMenu(PLUGIN_URL+'?mode=folder&instance='+instanceName+'&directory=FEED','['+addon.getLocalizedString(30092)+']')
mediaItems = service.getCollections()
if mediaItems:
for item in mediaItems:
try:
if item.file is None:
addDirectory(service, item.folder)
else:
addMediaFile(service, item)
except:
addMediaFile(service, item)
mediaItems = service.getMediaList(folderName,0)
if mediaItems:
for item in mediaItems:
try:
if item.file is None:
addDirectory(service, item.folder)
else:
addMediaFile(service, item)
except:
addMediaFile(service, item)
service.updateAuthorization(addon)
#dump a list of videos available to play
elif mode == 'search':
searchText = ''
try:
searchText = plugin_queries['criteria']
except:
searchText = ''
if searchText == '':
try:
dialog = xbmcgui.Dialog()
searchText = dialog.input('Enter search string', type=xbmcgui.INPUT_ALPHANUM)
except:
xbmcgui.Dialog().ok(addon.getLocalizedString(30000), addon.getLocalizedString(30100))
searchText = 'life'
try:
service
except NameError:
xbmcgui.Dialog().ok(addon.getLocalizedString(30000), addon.getLocalizedString(30051), addon.getLocalizedString(30052), addon.getLocalizedString(30053))
log(addon.getLocalizedString(30050)+ 'hive-login', True)
xbmcplugin.endOfDirectory(plugin_handle)
mediaItems = service.getSearchResults(searchText)
if mediaItems:
for item in mediaItems:
try:
if item.file is None:
addDirectory(service, item.folder)
else:
addMediaFile(service, item)
except:
addMediaFile(service, item)
service.updateAuthorization(addon)
# xbmcplugin.setContent(int(sys.argv[1]), 'videos')
# xbmcplugin.setProperty(int(sys.argv[1]),'IsPlayable', 'false')
# xbmc.executebuiltin("ActivateWindow(Videos)")
#play a video given its exact-title
elif mode == 'video' or mode == 'audio':
filename = plugin_queries['filename']
try:
directory = plugin_queries['directory']
except:
directory = ''
try:
title = plugin_queries['title']
except:
title = ''
try:
service
except NameError:
xbmcgui.Dialog().ok(addon.getLocalizedString(30000), addon.getLocalizedString(30051), addon.getLocalizedString(30052), addon.getLocalizedString(30053))
log(aaddon.getLocalizedString(30050)+ 'hive-login', True)
xbmcplugin.endOfDirectory(plugin_handle)
playbackType = 0
try:
playbackType = plugin_queries['playback']
except:
playbackType = ''
if service.isPremium:
try:
if mode == 'audio':
playbackType = int(addon.getSetting('playback_type_audio'))
else:
playbackType = int(addon.getSetting('playback_type_video'))
except:
playbackType = 0
else:
try:
if mode == 'audio':
playbackType = int(addon.getSetting('free_playback_type_audio'))
else:
playbackType = int(addon.getSetting('free_playback_type_video'))
except:
if mode == 'audio':
playbackType = 0
else:
playbackType = 1
mediaFile = file.file(filename, title, '', 0, '','')
mediaFolder = folder.folder(directory,directory)
mediaURLs = service.getPlaybackCall(playbackType,package.package(mediaFile,mediaFolder ))
playbackURL = ''
# BEGIN JoKeRzBoX
# - Get list of possible resolutions (quality), pre-ordered from best to lower res, from a String constant
# - Create associative array (a.k.a. hash list) availableQualities with each available resolution (key) and media URL (value)
# - Simple algorithm to go through possible resolutions and find the best available one based on user's choice
# FIX: list of qualities shown to user are now ordered from highest to low resolution
if mode == 'audio':
possibleQualities = addon.getLocalizedString(30058)
else:
possibleQualities = addon.getLocalizedString(30057)
listPossibleQualities = possibleQualities.split("|")
availableQualities = {}
for mediaURL in mediaURLs:
availableQualities[mediaURL.qualityDesc] = mediaURL.url
## User has chosen: "Always original quality"
#if playbackType == 0:
# playbackURL = availableQualities['original']
# User has chosen a max quality other than "original". Let's decide on the best stream option available
#else:
userChosenQuality = listPossibleQualities[playbackType]
reachedThreshold = 0
for quality in listPossibleQualities:
if quality == userChosenQuality:
reachedThreshold = 1
if reachedThreshold and quality in availableQualities:
playbackURL = availableQualities[quality]
chosenRes = str(quality)
reachedThreshold = 0
if reachedThreshold and playbackType != len(listPossibleQualities)-1 and len(availableQualities) == 3:
# Means that the exact encoding requested by user was not found.
# Also, there are the only available: original, 360p and 240p (because cont = 3).
# Therefore if user did not choose "always ask" it is safe to assume "original" is the one closest to the quality selected by user
playbackURL = availableQualities['original']
# Desired quality still not found. Lets bring list of available options and let user select
if playbackURL == '':
options = []
for quality in listPossibleQualities:
if quality in availableQualities:
options.append(quality)
ret = xbmcgui.Dialog().select(addon.getLocalizedString(30033), options)
if ret >= 0:
playbackURL = availableQualities[str(options[ret])]
chosenRes = str(options[ret])
# END JoKeRzBoX
# JoKeRzBox: FIX: when user does not choose from list, addon was still playing a stream
if playbackURL != '':
item = xbmcgui.ListItem(path=playbackURL)
# item.setInfo( type="Video", infoLabels={ "Title": title , "Plot" : title } )
# item.setInfo( type="Video")
# Add resolution to beginning of title while playing media. Format "<RES> | <TITLE>"
if mode == 'audio':
item.setInfo( type="music", infoLabels={ "Title": title + " @ " + chosenRes} )
else:
item.setInfo( type="video", infoLabels={ "Title": title + " @ " + chosenRes, "Plot" : title } )
xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, item)
#play a video given its exact-title
elif mode == 'requestencoding':
filename = plugin_queries['filename']
try:
directory = plugin_queries['directory']
except:
directory = ''
try:
title = plugin_queries['title']
except:
title = ''
try:
service
except NameError:
xbmcgui.Dialog().ok(addon.getLocalizedString(30000), addon.getLocalizedString(30051), addon.getLocalizedString(30052), addon.getLocalizedString(30053))
log(aaddon.getLocalizedString(30050)+ 'hive-login', True)
xbmcplugin.endOfDirectory(plugin_handle)
mediaFile = file.file(filename, title, '', 0, '','')
mediaFolder = folder.folder(directory,directory)
mediaURLs = service.getPlaybackCall(0,package.package(mediaFile,mediaFolder ))
xbmcgui.Dialog().ok(addon.getLocalizedString(30000), addon.getLocalizedString(30087), title)
elif mode == 'photo':
filename = plugin_queries['filename']
try:
directory = plugin_queries['directory']
except:
directory = ''
try:
title = plugin_queries['title']
except:
title = ''
try:
service
except NameError:
xbmcgui.Dialog().ok(addon.getLocalizedString(30000), addon.getLocalizedString(30051), addon.getLocalizedString(30052), addon.getLocalizedString(30053))
log(aaddon.getLocalizedString(30050)+ 'hive-login', True)
xbmcplugin.endOfDirectory(plugin_handle)
path = ''
try:
path = addon.getSetting('photo_folder')
except:
pass
import os.path
if not os.path.exists(path):
path = ''
while path == '':
path = xbmcgui.Dialog().browse(0,addon.getLocalizedString(30038), 'files','',False,False,'')
if not os.path.exists(path):
path = ''
else:
addon.setSetting('photo_folder', path)
mediaFile = file.file(filename, title, '', 0, '','')
mediaFolder = folder.folder(directory,directory)
mediaURLs = service.getPlaybackCall(0,package.package(mediaFile,mediaFolder ))
playbackURL = ''
for mediaURL in mediaURLs:
if mediaURL.qualityDesc == 'original':
playbackURL = mediaURL.url
import xbmcvfs
xbmcvfs.mkdir(path + '/'+str(directory))
try:
xbmcvfs.rmdir(path + '/'+str(directory)+'/'+str(title))
except:
pass
service.downloadPicture(playbackURL, path + '/'+str(directory) + '/'+str(title))
xbmc.executebuiltin("XBMC.ShowPicture("+path + '/'+str(directory) + '/'+str(title)+")")
#play a video given its exact-title
elif mode == 'streamurl':
url = plugin_queries['url']
try:
title = plugin_queries['title']
except:
title = ''
try:
service
except NameError:
xbmcgui.Dialog().ok(addon.getLocalizedString(30000), addon.getLocalizedString(30051), addon.getLocalizedString(30052), addon.getLocalizedString(30053))
log(aaddon.getLocalizedString(30050)+ 'hive-login', True)
xbmcplugin.endOfDirectory(plugin_handle)
url = re.sub('---', '&', url)
item = xbmcgui.ListItem(path=url)
item.setInfo( type="Video", infoLabels={ "Title": title , "Plot" : title } )
# item.setInfo( type="Music", infoLabels={ "Title": title , "Plot" : title } )
xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, item)
if mode == 'options' or mode == 'buildstrm' or mode == 'clearauth':
addMenu(PLUGIN_URL+'?mode=clearauth','<<'+addon.getLocalizedString(30018)+'>>')
addMenu(PLUGIN_URL+'?mode=buildstrm','<<'+addon.getLocalizedString(30025)+'>>')
addMenu(PLUGIN_URL+'?mode=createsearch','<<Save Search>>')
xbmcplugin.endOfDirectory(plugin_handle)
| gpl-2.0 | -1,367,562,831,503,683,000 | 34.849368 | 302 | 0.576161 | false | 4.247927 | false | false | false |
samitnuk/urlsaver | app/forms.py | 1 | 2339 | from flask.ext.wtf import Form
# from flask.ext.wtf.file import FileField, FileRequired, FileAllowed
from wtforms import TextField, PasswordField, validators
from app import db
from models import User, Locator
#----------------------------------------------------------------------------
class LoginForm(Form):
email = TextField('Email', [validators.Required()])
password = PasswordField('Password', [validators.Required()])
def validate_email(self, field):
user = self.get_user()
if user is None:
raise validators.ValidationError('Invalid user.')
if not user.check_password(password=self.password.data):
raise validators.ValidationError('Invalid password.')
def get_user(self):
return db.session.query(User). \
filter_by(email=self.email.data).first()
#----------------------------------------------------------------------------
class RegistrationForm(Form):
email = TextField('Email Address', [validators.Required(),
validators.Email()])
password = PasswordField('Password', [validators.Required()])
confirm = PasswordField('Repeat Password',
[validators.Required(),
validators.EqualTo('password',
message='Passwords must match.')])
def validate_email(self, field):
if db.session.query(User). \
filter_by(email=self.email.data).count() > 0:
raise validators.ValidationError('Duplicate email.')
#----------------------------------------------------------------------------
class EditForm(Form):
title = TextField('title', [validators.Required()])
url = TextField('url', [validators.Required()])
groupname = TextField('groupname')
#----------------------------------------------------------------------------
class SearchForm(Form):
search = TextField('search', [validators.Required()])
#----------------------------------------------------------------------------
class RestorePasswordForm(Form):
email = TextField('Email Address', [validators.Required()])
def validate_email(self, field):
if not db.session.query(User).filter_by(email=self.email.data).count():
raise validators.ValidationError('Enter registered email.')
| mit | 2,473,511,311,918,441,000 | 40.035088 | 79 | 0.536554 | false | 5.174779 | false | false | false |
Kronuz/pyXapiand | xapiand/platforms.py | 1 | 19568 | from __future__ import unicode_literals, absolute_import, print_function
import os
import sys
import math
import errno
import atexit
import importlib
import signal as _signal
import numbers
import itertools
try:
from io import UnsupportedOperation
FILENO_ERRORS = (AttributeError, ValueError, UnsupportedOperation)
except ImportError: # pragma: no cover
# Py2
FILENO_ERRORS = (AttributeError, ValueError) # noqa
def uniq(it):
"""Return all unique elements in ``it``, preserving order."""
seen = set()
return (seen.add(obj) or obj for obj in it if obj not in seen)
def get_errno(exc):
""":exc:`socket.error` and :exc:`IOError` first got
the ``.errno`` attribute in Py2.7"""
try:
return exc.errno
except AttributeError:
try:
# e.args = (errno, reason)
if isinstance(exc.args, tuple) and len(exc.args) == 2:
return exc.args[0]
except AttributeError:
pass
return 0
def try_import(module, default=None):
"""Try to import and return module, or return
None if the module does not exist."""
try:
return importlib.import_module(module)
except ImportError:
return default
def fileno(f):
if isinstance(f, numbers.Integral):
return f
return f.fileno()
def maybe_fileno(f):
"""Get object fileno, or :const:`None` if not defined."""
try:
return fileno(f)
except FILENO_ERRORS:
pass
class LockFailed(Exception):
"""Raised if a pidlock can't be acquired."""
EX_CANTCREAT = getattr(os, 'EX_CANTCREAT', 73)
EX_FAILURE = 1
PIDFILE_FLAGS = os.O_CREAT | os.O_EXCL | os.O_WRONLY
PIDFILE_MODE = ((os.R_OK | os.W_OK) << 6) | ((os.R_OK) << 3) | ((os.R_OK))
PIDLOCKED = """ERROR: Pidfile ({0}) already exists.
Seems we're already running? (pid: {1})"""
def get_fdmax(default=None):
"""Return the maximum number of open file descriptors
on this system.
:keyword default: Value returned if there's no file
descriptor limit.
"""
try:
return os.sysconf('SC_OPEN_MAX')
except Exception:
pass
if resource is None: # Windows
return default
fdmax = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if fdmax == resource.RLIM_INFINITY:
return default
return fdmax
class Pidfile(object):
"""Pidfile
This is the type returned by :func:`create_pidlock`.
TIP: Use the :func:`create_pidlock` function instead,
which is more convenient and also removes stale pidfiles (when
the process holding the lock is no longer running).
"""
#: Path to the pid lock file.
path = None
def __init__(self, path):
self.path = os.path.abspath(path)
def acquire(self):
"""Acquire lock."""
try:
self.write_pid()
except OSError as exc:
raise (LockFailed, LockFailed(str(exc)), sys.exc_info()[2])
return self
__enter__ = acquire
def is_locked(self):
"""Return true if the pid lock exists."""
return os.path.exists(self.path)
def release(self, *args):
"""Release lock."""
self.remove()
__exit__ = release
def read_pid(self):
"""Read and return the current pid."""
try:
with open(self.path, 'r') as fh:
line = fh.readline()
if line.strip() == line: # must contain '\n'
raise ValueError(
'Partial or invalid pidfile {0.path}'.format(self))
try:
return int(line.strip())
except ValueError:
raise ValueError(
'pidfile {0.path} contents invalid.'.format(self))
except errno.ENOENT:
pass
def remove(self):
"""Remove the lock."""
try:
os.unlink(self.path)
except (errno.ENOENT, errno.EACCES):
pass
def remove_if_stale(self):
"""Remove the lock if the process is not running.
(does not respond to signals)."""
try:
pid = self.read_pid()
except ValueError:
print('Broken pidfile found. Removing it.', file=sys.stderr)
self.remove()
return True
if not pid:
self.remove()
return True
if not pid_exists(pid):
print('Stale pidfile exists. Removing it.', file=sys.stderr)
self.remove()
return True
return False
def write_pid(self):
pid = os.getpid()
content = '{0}\n'.format(pid)
pidfile_fd = os.open(self.path, PIDFILE_FLAGS, PIDFILE_MODE)
pidfile = os.fdopen(pidfile_fd, 'w')
try:
pidfile.write(content)
# flush and sync so that the re-read below works.
pidfile.flush()
try:
os.fsync(pidfile_fd)
except AttributeError: # pragma: no cover
pass
finally:
pidfile.close()
rfh = open(self.path)
try:
if rfh.read() != content:
raise LockFailed(
"Inconsistency: Pidfile content doesn't match at re-read")
finally:
rfh.close()
def create_pidlock(pidfile):
"""Create and verify pidfile.
If the pidfile already exists the program exits with an error message,
however if the process it refers to is not running anymore, the pidfile
is deleted and the program continues.
This function will automatically install an :mod:`atexit` handler
to release the lock at exit, you can skip this by calling
:func:`_create_pidlock` instead.
:returns: :class:`Pidfile`.
**Example**:
.. code-block:: python
pidlock = create_pidlock('/var/run/app.pid')
"""
pidlock = _create_pidlock(pidfile)
atexit.register(pidlock.release)
return pidlock
def _create_pidlock(pidfile):
pidlock = Pidfile(pidfile)
if pidlock.is_locked() and not pidlock.remove_if_stale():
print(PIDLOCKED.format(pidfile, pidlock.read_pid()), file=sys.stderr)
raise SystemExit(EX_CANTCREAT)
pidlock.acquire()
return pidlock
resource = try_import('resource')
pwd = try_import('pwd')
grp = try_import('grp')
DAEMON_UMASK = 0
DAEMON_WORKDIR = '/'
if hasattr(os, 'closerange'):
def close_open_fds(keep=None):
# must make sure this is 0-inclusive (Issue #1882)
keep = list(uniq(sorted(
f for f in map(maybe_fileno, keep or []) if f is not None
)))
maxfd = get_fdmax(default=2048)
kL, kH = iter([-1] + keep), iter(keep + [maxfd])
for low, high in itertools.izip_longest(kL, kH):
if low + 1 != high:
os.closerange(low + 1, high)
else:
def close_open_fds(keep=None): # noqa
keep = [maybe_fileno(f)
for f in (keep or []) if maybe_fileno(f) is not None]
for fd in reversed(range(get_fdmax(default=2048))):
if fd not in keep:
try:
os.close(fd)
except errno.EBADF:
pass
class DaemonContext(object):
_is_open = False
def __init__(self, pidfile=None, workdir=None, umask=None,
fake=False, after_chdir=None, **kwargs):
self.workdir = workdir or DAEMON_WORKDIR
self.umask = DAEMON_UMASK if umask is None else umask
self.fake = fake
self.after_chdir = after_chdir
self.stdfds = (sys.stdin, sys.stdout, sys.stderr)
def redirect_to_null(self, fd):
if fd is not None:
dest = os.open(os.devnull, os.O_RDWR)
os.dup2(dest, fd)
def open(self):
if not self._is_open:
if not self.fake:
self._detach()
os.chdir(self.workdir)
os.umask(self.umask)
if self.after_chdir:
self.after_chdir()
close_open_fds(self.stdfds)
for fd in self.stdfds:
self.redirect_to_null(maybe_fileno(fd))
self._is_open = True
__enter__ = open
def close(self, *args):
if self._is_open:
self._is_open = False
__exit__ = close
def _detach(self):
if os.fork() == 0: # first child
os.setsid() # create new session
if os.fork() > 0: # second child
os._exit(0)
else:
os._exit(0)
return self
def detached(logfile=None, pidfile=None, uid=None, gid=None, umask=0,
workdir=None, fake=False, **opts):
"""Detach the current process in the background (daemonize).
:keyword logfile: Optional log file. The ability to write to this file
will be verified before the process is detached.
:keyword pidfile: Optional pidfile. The pidfile will not be created,
as this is the responsibility of the child. But the process will
exit if the pid lock exists and the pid written is still running.
:keyword uid: Optional user id or user name to change
effective privileges to.
:keyword gid: Optional group id or group name to change effective
privileges to.
:keyword umask: Optional umask that will be effective in the child process.
:keyword workdir: Optional new working directory.
:keyword fake: Don't actually detach, intented for debugging purposes.
:keyword \*\*opts: Ignored.
**Example**:
.. code-block:: python
from celery.platforms import detached, create_pidlock
with detached(logfile='/var/log/app.log', pidfile='/var/run/app.pid',
uid='nobody'):
# Now in detached child process with effective user set to nobody,
# and we know that our logfile can be written to, and that
# the pidfile is not locked.
pidlock = create_pidlock('/var/run/app.pid')
# Run the program
program.run(logfile='/var/log/app.log')
"""
if not resource:
raise RuntimeError('This platform does not support detach.')
workdir = os.getcwd() if workdir is None else workdir
signals.reset('SIGCLD') # Make sure SIGCLD is using the default handler.
maybe_drop_privileges(uid=uid, gid=gid)
def after_chdir_do():
# Since without stderr any errors will be silently suppressed,
# we need to know that we have access to the logfile.
logfile and open(logfile, 'a').close()
# Doesn't actually create the pidfile, but makes sure it's not stale.
if pidfile:
_create_pidlock(pidfile).release()
return DaemonContext(
umask=umask, workdir=workdir, fake=fake, after_chdir=after_chdir_do,
)
def parse_uid(uid):
"""Parse user id.
uid can be an integer (uid) or a string (user name), if a user name
the uid is taken from the system user registry.
"""
try:
return int(uid)
except ValueError:
try:
return pwd.getpwnam(uid).pw_uid
except (AttributeError, KeyError):
raise KeyError('User does not exist: {0}'.format(uid))
def parse_gid(gid):
"""Parse group id.
gid can be an integer (gid) or a string (group name), if a group name
the gid is taken from the system group registry.
"""
try:
return int(gid)
except ValueError:
try:
return grp.getgrnam(gid).gr_gid
except (AttributeError, KeyError):
raise KeyError('Group does not exist: {0}'.format(gid))
def _setgroups_hack(groups):
""":fun:`setgroups` may have a platform-dependent limit,
and it is not always possible to know in advance what this limit
is, so we use this ugly hack stolen from glibc."""
groups = groups[:]
while 1:
try:
return os.setgroups(groups)
except ValueError: # error from Python's check.
if len(groups) <= 1:
raise
groups[:] = groups[:-1]
except OSError as exc: # error from the OS.
if exc.errno != errno.EINVAL or len(groups) <= 1:
raise
groups[:] = groups[:-1]
def setgroups(groups):
"""Set active groups from a list of group ids."""
max_groups = None
try:
max_groups = os.sysconf('SC_NGROUPS_MAX')
except Exception:
pass
try:
return _setgroups_hack(groups[:max_groups])
except OSError as exc:
if exc.errno != errno.EPERM:
raise
if any(group not in groups for group in os.getgroups()):
# we shouldn't be allowed to change to this group.
raise
def initgroups(uid, gid):
"""Compat version of :func:`os.initgroups` which was first
added to Python 2.7."""
if not pwd: # pragma: no cover
return
username = pwd.getpwuid(uid)[0]
if hasattr(os, 'initgroups'): # Python 2.7+
return os.initgroups(username, gid)
groups = [gr.gr_gid for gr in grp.getgrall()
if username in gr.gr_mem]
setgroups(groups)
def setgid(gid):
"""Version of :func:`os.setgid` supporting group names."""
os.setgid(parse_gid(gid))
def setuid(uid):
"""Version of :func:`os.setuid` supporting usernames."""
os.setuid(parse_uid(uid))
def maybe_drop_privileges(uid=None, gid=None):
"""Change process privileges to new user/group.
If UID and GID is specified, the real user/group is changed.
If only UID is specified, the real user is changed, and the group is
changed to the users primary group.
If only GID is specified, only the group is changed.
"""
if sys.platform == 'win32':
return
if os.geteuid():
# no point trying to setuid unless we're root.
if not os.getuid():
raise AssertionError('contact support')
uid = uid and parse_uid(uid)
gid = gid and parse_gid(gid)
if uid:
# If GID isn't defined, get the primary GID of the user.
if not gid and pwd:
gid = pwd.getpwuid(uid).pw_gid
# Must set the GID before initgroups(), as setgid()
# is known to zap the group list on some platforms.
# setgid must happen before setuid (otherwise the setgid operation
# may fail because of insufficient privileges and possibly stay
# in a privileged group).
setgid(gid)
initgroups(uid, gid)
# at last:
setuid(uid)
# ... and make sure privileges cannot be restored:
try:
setuid(0)
except OSError as exc:
if get_errno(exc) != errno.EPERM:
raise
pass # Good: cannot restore privileges.
else:
raise RuntimeError(
'non-root user able to restore privileges after setuid.')
else:
gid and setgid(gid)
if uid and (not os.getuid()) and not (os.geteuid()):
raise AssertionError('Still root uid after drop privileges!')
if gid and (not os.getgid()) and not (os.getegid()):
raise AssertionError('Still root gid after drop privileges!')
class Signals(object):
"""Convenience interface to :mod:`signals`.
If the requested signal is not supported on the current platform,
the operation will be ignored.
**Examples**:
.. code-block:: python
>>> from celery.platforms import signals
>>> from proj.handlers import my_handler
>>> signals['INT'] = my_handler
>>> signals['INT']
my_handler
>>> signals.supported('INT')
True
>>> signals.signum('INT')
2
>>> signals.ignore('USR1')
>>> signals['USR1'] == signals.ignored
True
>>> signals.reset('USR1')
>>> signals['USR1'] == signals.default
True
>>> from proj.handlers import exit_handler, hup_handler
>>> signals.update(INT=exit_handler,
... TERM=exit_handler,
... HUP=hup_handler)
"""
ignored = _signal.SIG_IGN
default = _signal.SIG_DFL
if hasattr(_signal, 'setitimer'):
def arm_alarm(self, seconds):
_signal.setitimer(_signal.ITIMER_REAL, seconds)
else: # pragma: no cover
try:
from itimer import alarm as _itimer_alarm # noqa
except ImportError:
def arm_alarm(self, seconds): # noqa
_signal.alarm(math.ceil(seconds))
else: # pragma: no cover
def arm_alarm(self, seconds): # noqa
return _itimer_alarm(seconds) # noqa
def reset_alarm(self):
return _signal.alarm(0)
def supported(self, signal_name):
"""Return true value if ``signal_name`` exists on this platform."""
try:
return self.signum(signal_name)
except AttributeError:
pass
def signum(self, signal_name):
"""Get signal number from signal name."""
if isinstance(signal_name, numbers.Integral):
return signal_name
if not isinstance(signal_name, basestring) \
or not signal_name.isupper():
raise TypeError('signal name must be uppercase string.')
if not signal_name.startswith('SIG'):
signal_name = 'SIG' + signal_name
return getattr(_signal, signal_name)
def reset(self, *signal_names):
"""Reset signals to the default signal handler.
Does nothing if the platform doesn't support signals,
or the specified signal in particular.
"""
self.update((sig, self.default) for sig in signal_names)
def ignore(self, *signal_names):
"""Ignore signal using :const:`SIG_IGN`.
Does nothing if the platform doesn't support signals,
or the specified signal in particular.
"""
self.update((sig, self.ignored) for sig in signal_names)
def __getitem__(self, signal_name):
return _signal.getsignal(self.signum(signal_name))
def __setitem__(self, signal_name, handler):
"""Install signal handler.
Does nothing if the current platform doesn't support signals,
or the specified signal in particular.
"""
try:
_signal.signal(self.signum(signal_name), handler)
except (AttributeError, ValueError):
pass
def update(self, _d_=None, **sigmap):
"""Set signal handlers from a mapping."""
for signal_name, handler in dict(_d_ or {}, **sigmap).items():
self[signal_name] = handler
signals = Signals()
def pid_exists(pid):
"""Check whether pid exists in the current process table.
UNIX only.
"""
if pid < 0:
return False
if pid == 0:
# According to "man 2 kill" PID 0 refers to every process
# in the process group of the calling process.
# On certain systems 0 is a valid PID but we have no way
# to know that in a portable fashion.
raise ValueError('invalid PID 0')
try:
os.kill(pid, 0)
except OSError as err:
if err.errno == errno.ESRCH:
# ESRCH == No such process
return False
elif err.errno == errno.EPERM:
# EPERM clearly means there's a process to deny access to
return True
else:
# According to "man 2 kill" possible error values are
# (EINVAL, EPERM, ESRCH)
raise
else:
return True
| gpl-2.0 | 7,963,018,132,772,073,000 | 28.425564 | 79 | 0.585139 | false | 4.043811 | false | false | false |
vicnala/ampabooks | config.py | 1 | 1274 | import web
import os.path
import sys
import shutil
DATABASE = 'db/libros.sqlite'
def _touch(fname, times=None):
fhandle = open(fname, 'a')
try:
os.utime(fname, times)
finally:
fhandle.close()
return True
return False
def _init():
import sqlite3
conn = sqlite3.connect(DATABASE)
c = conn.cursor()
from tables import items, item1, users, admin, org, nif, grades, groups, tickets, students, books, default_grade, default_group
c.execute(items)
c.execute(item1)
c.execute(users)
c.execute(admin)
c.execute(org)
c.execute(nif)
c.execute(grades)
c.execute(default_grade)
c.execute(groups)
c.execute(default_group)
c.execute(tickets)
c.execute(students)
c.execute(books)
conn.commit()
conn.close()
def _copy_blank():
shutil.copy2(DATABASE, 'db/libros-vacia.sqlite')
if os.path.isfile(DATABASE):
DB = web.database(dbn='sqlite', db=DATABASE)
else:
print DATABASE, 'file not found.'
if _touch(DATABASE):
print 'initializing database', DATABASE, '...'
_init()
_copy_blank()
DB = web.database(dbn='sqlite', db=DATABASE)
else:
print 'Error crerating', DATABASE, 'file.'
sys.exit(1)
cache = False | gpl-3.0 | 4,701,388,009,988,527,000 | 21.767857 | 131 | 0.629513 | false | 3.326371 | false | false | false |
multiphrenic/GooeyPi | gooeypi/util.py | 1 | 1052 | import os
import subprocess
import sys
import controller
def pyversion(installdir):
config = controller.getConfig()
sys.path.insert(0, installdir)
from PyInstaller import get_version
return float(get_version()[:3])
def getflags(fname):
config = controller.getConfig()
flags=[]
flags.append(sys.executable) # Python executable to run pyinstaller
flags.append(os.path.join(config['pyidir'], config['pyscript']))
if config['noconfirm']:
flags.append('--noconfirm')
if config['singlefile']:
flags.append('--onefile')
if config['ascii']:
flags.append('--ascii')
if config['windowed']:
flags.append('--noconsole')
if config['upxdir'] != '':
flags.append('--upx-dir=' + config['upxdir'])
if pyversion(config['pyidir']) == 2.1:
flags.append('--distpath=' + os.path.dirname(fname)) # Output to same dir as script.
else:
flags.append('--out=' + os.path.dirname(fname))
flags.append(fname)
return(flags)
| gpl-2.0 | 7,363,234,715,685,705,000 | 28.222222 | 92 | 0.618821 | false | 3.839416 | true | false | false |
gnovis/scheduler | scheduler.py | 1 | 13569 | #!/usr/bin/env python3
import argparse
import json
import time
import subprocess
import fcntl
import errno
import datetime
import os
import sys
import signal
import threading
import shlex
# CONSTANTS - Scheduler settings
SEC_DELAY = 3
PATH = "/tmp/"
GPU_INFO_FILE = os.path.join(PATH, "gpu_scheduler_info")
DEFAULT_GPU_COUNT = 4
KILL_DELAY_SEC = 3
# CONSTANTS - Data keys
GPU_AVAIL = 'avail'
GPU_USER = 'user'
GPU_TASK = 'task'
GPU_TASK_PID = 'task_pid'
GPU_TASK_START = 'task_start'
GPU_NAME = 'gpu_name'
# CONSTANTS
KILL = 0
TERMINATE = 1
WARN = 2
# GLOBAL VARIABLES
TASK_SIGNAL = WARN
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("-gc", "--gpu_count", type=int, default=1,
help="The count of required GPUs for specified task.")
parser.add_argument("-i", "--init", nargs="+", type=int,
help="""Initializes gpu info file. List of numbers is expected,
where first number is total count of GPUs and the rest of the numbers denotes unavailable GPUs.
e.g -i 5 3 4 means that total count of GPUs is 5 and GPU 3 and 4 are currently unavailable.""")
parser.add_argument("-v", "--verbose", action="store_true",
help="Prints info about the process, when the task is completed.")
parser.add_argument("-o", "--out", nargs="?", type=argparse.FileType('w'), default=sys.stdout,
help="The name of the file, which will be used to store stdout. The default file is sys.stdout.")
parser.add_argument("-e", "--err", nargs="?", type=argparse.FileType('w'), default=sys.stderr,
help="The name of the file, which will be used to store stderr. The default file is sys.stderr.")
parser.add_argument("-pg", "--prefered_gpu", type=int,
help="If possible, prefered GPU is assigned to the task, otherwise is assigned random free GPU.")
parser.add_argument("-fg", "--forced_gpu", type=int,
help="Wait until specified GPU is free.")
parser.add_argument("-s", "--status", action='store_true',
help="Show info about GPU usage - user/GPU/taskPID/start")
parser.add_argument("-rg", "--release_gpu", type=int, nargs='+',
help="Releases GPUs according their indices. e.g -rg 0 2 will release GPU 0 and 2.")
parser.add_argument("task", nargs='?',
help="The quoted task with arguments which will be started on free GPUs as soon as possible.")
return parser.parse_args()
# main function
def run_task(gpu_info_file, args):
is_waiting = False
while True:
try:
lock_file(gpu_info_file)
free_gpu = get_free_gpu(gpu_info_file)
if len(free_gpu) >= args.gpu_count:
try:
if args.prefered_gpu is not None:
free_gpu = get_prefered_gpu(free_gpu, args.prefered_gpu)
if args.forced_gpu is not None:
free_gpu = get_prefered_gpu(free_gpu, args.forced_gpu)
forced_gpu_free = check_forced_free(free_gpu, args.forced_gpu)
if not forced_gpu_free:
if not is_waiting:
is_waiting = True
print("Scheduler (PID: {}) is waiting for GPU {}.".format(os.getpid(), args.forced_gpu))
continue
# select required count of free gpu, which will be passed to the task
free_gpu = free_gpu[0:args.gpu_count]
# lock used gpu
set_occupied_gpu(gpu_info_file, free_gpu)
unlock_file(gpu_info_file)
# set enviromental variable GPU to cuda[index of allocated GPU]
cuda = set_env_vars(free_gpu)
dt_before = datetime.datetime.now()
# parse string of args to list
task = prepare_args(args.task)
# replace char '#' with port number
task = insert_portshift(task, free_gpu[0])
# run required task
p = subprocess.Popen(task,
stdout=args.out,
stderr=args.err,
preexec_fn=before_new_subprocess)
# The second Ctrl-C kill the subprocess
signal.signal(signal.SIGINT, lambda signum, frame: stop_subprocess(p, gpu_info_file, free_gpu))
set_additional_info(gpu_info_file, free_gpu, os.getlogin(), task,
p.pid, get_formated_dt(dt_before), cuda)
print("GPU: {}\nSCH PID: {}\nTASK PID: {}".format(cuda, os.getpid(), p.pid))
print("SCH PGID: {}\nTASK PGID: {}".format(os.getpgid(os.getpid()), os.getpgid(p.pid)))
p.wait()
dt_after = datetime.datetime.now()
# info message
if args.verbose:
print("\ntask: {}\nstdout: {}\nstderr: {}\nstart: {}\nend: {}\ntotal time: {}\n".format(
task, args.out.name, args.err.name,
get_formated_dt(dt_before), get_formated_dt(dt_after),
get_time_duration(dt_before, dt_after)))
break
# make sure the GPU is released even on interrupts
finally:
set_free_gpu(gpu_info_file, free_gpu)
unlock_file(gpu_info_file)
time.sleep(1)
else:
unlock_file(gpu_info_file)
time.sleep(SEC_DELAY)
except IOError as e:
handle_io_error(e)
def before_new_subprocess():
signal.signal(signal.SIGINT, signal.SIG_IGN)
os.setsid()
def prepare_args(args):
result = []
for a in args.split('\n'):
if a != '':
result.extend(shlex.split(a))
return result
def stop_subprocess(process, gpu_file, gpu_to_release):
"""
This function take care of the Ctrl-C (SIGINT) signal.
On the first Ctrl-C the warning is printed.
On the second Ctrl-C the task is terminated.
On the third Ctrl-C the task is killed.
Delay between terminate and kill is specified in KILL_DELAY_SEC.
"""
def allow_kill_task():
global TASK_SIGNAL
TASK_SIGNAL = KILL
def check_process_liveness(process, max_time):
if max_time <= 0 or (process.poll() is not None):
allow_kill_task()
else:
threading.Timer(0.1, lambda: check_process_liveness(process, max_time - 0.1)).start()
global TASK_SIGNAL
if TASK_SIGNAL is KILL:
pgid = os.getpgid(process.pid)
print("\nThe task (PGID: {}) was killed.".format(pgid))
set_free_gpu(gpu_file, gpu_to_release)
os.killpg(pgid, signal.SIGKILL)
TASK_SIGNAL = None
elif TASK_SIGNAL is TERMINATE:
pgid = os.getpgid(process.pid)
print("\nThe task (PGID: {}) was terminated.".format(pgid))
set_free_gpu(gpu_file, gpu_to_release)
os.killpg(pgid, signal.SIGTERM)
# send a second SIGTERM because of blocks
os.killpg(pgid, signal.SIGTERM)
check_process_liveness(process, KILL_DELAY_SEC)
TASK_SIGNAL = None
elif TASK_SIGNAL is WARN:
pgid = os.getpgid(process.pid)
print("\nNext Ctrl-C terminate the task (PGID: {}).".format(pgid))
TASK_SIGNAL = TERMINATE
def check_forced_free(gpu_indices, forced):
if gpu_indices:
return gpu_indices[0] == forced
return False
def get_prefered_gpu(gpu_indices, prefered):
"""Move prefered GPU on a first position if it is available."""
if prefered in gpu_indices:
gpu_indices.remove(prefered)
return [prefered, ] + gpu_indices
return gpu_indices
def insert_portshift(task, task_id):
port = 3600 + task_id * 100
task = list(map(lambda v: str(port) if v == '__num__' else v, task))
return task
# decorators
def access_gpu_file(func):
def wrapper(f, *args, **kwargs):
while True:
try:
lock_file(f)
func(f, *args, **kwargs)
unlock_file(f)
break
except IOError as e:
handle_io_error(e)
return wrapper
def seek_to_start(func):
def wrapper(f, *args, **kwargs):
f.seek(0)
result = func(f, *args, **kwargs)
f.seek(0)
return result
return wrapper
@access_gpu_file
@seek_to_start
def init_gpu_info_file(f, gpu_count, occupied_gpu):
"""
occupied_gpu - indices of GPUs which currently are not available
gpu_count - total count of GPUs on a system
"""
gpu_states = [False if i in occupied_gpu else True for i in range(gpu_count)]
f.truncate()
data = {}
data[GPU_AVAIL] = gpu_states
init_to_none = lambda c: c * [None]
data[GPU_USER] = init_to_none(gpu_count)
data[GPU_TASK] = init_to_none(gpu_count)
data[GPU_TASK_PID] = init_to_none(gpu_count)
data[GPU_TASK_START] = init_to_none(gpu_count)
data[GPU_NAME] = init_to_none(gpu_count)
json.dump(data, f, indent=4, sort_keys=True)
@seek_to_start
def get_free_gpu(gpu_info_file):
"Returns list of GPU indices which are available."
gpu_states = json.load(gpu_info_file)[GPU_AVAIL]
return [i for i, avail in enumerate(gpu_states) if avail]
@seek_to_start
def update_gpu_info(f, release_gpu, indices,
user=None, task=None,
proc_pid=None, start=None, gpu_name=None):
gpu_data = json.load(f)
f.seek(0)
f.truncate()
for i in range(len(gpu_data[GPU_AVAIL])):
if i in indices:
gpu_data[GPU_AVAIL][i] = release_gpu
gpu_data[GPU_USER][i] = user
gpu_data[GPU_TASK][i] = task
gpu_data[GPU_TASK_PID][i] = proc_pid
gpu_data[GPU_TASK_START][i] = start
gpu_data[GPU_NAME][i] = gpu_name
json.dump(gpu_data, f, indent=4, sort_keys=True)
@access_gpu_file
def set_additional_info(f, gpu_indices, user, task, proc_pid, start, gpu_name):
update_gpu_info(f, False, gpu_indices, user, task, proc_pid, start, gpu_name)
def set_occupied_gpu(f, occupied_gpu):
"""Locks currently unavailable GPUs."""
update_gpu_info(f, False, occupied_gpu)
@access_gpu_file
def set_free_gpu(f, free_gpu):
"""Releases GPUs"""
update_gpu_info(f, True, free_gpu)
def get_formated_dt(dt):
"""Returns the datetime object formated."""
return dt.strftime("%Y-%m-%d %H:%M:%S")
def get_time_duration(before, after):
"""Returns the difference between two datetime objects in format: hours:minutes:seconds"""
total_seconds = (after - before).seconds
mins, secs = divmod(total_seconds, 60)
hours, mins = divmod(mins, 60)
return "{}:{}:{}".format(hours, mins, secs)
def lock_file(f):
"""Locks the file."""
fcntl.flock(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
def unlock_file(f):
"""Unlocks the file."""
fcntl.flock(f, fcntl.LOCK_UN)
def handle_io_error(e):
if e.errno != errno.EAGAIN:
raise e
time.sleep(0.1)
def set_env_vars(gpu_indices):
"""Sets enviromental variable GPU"""
# currently is cupported just one gpu on task
cuda = "cuda{}".format(gpu_indices[0])
os.environ['GPU'] = cuda
return cuda
def validate_args(args):
if args.gpu_count != 1:
print("Usage of multiple GPUs isn't supported yet. You must use just the one GPU for the task.")
sys.exit(1)
@seek_to_start
def display_status(f):
gpu_data = json.load(f)
occupied = [i for i, avail in enumerate(gpu_data[GPU_AVAIL]) if not avail]
free = [i for i, avail in enumerate(gpu_data[GPU_AVAIL]) if avail]
if occupied:
print("Currently used GPU:")
print("-------------------")
for i in occupied:
print("GPU: {}\nUser: {}\nTask: {}\nTask PID: {}\nStarted: {}\n".format(gpu_data[GPU_NAME][i],
gpu_data[GPU_USER][i],
gpu_data[GPU_TASK][i],
gpu_data[GPU_TASK_PID][i],
gpu_data[GPU_TASK_START][i]))
if free:
print("Free GPU:")
print("---------")
for i in free:
print("GPU {}".format(i))
else:
print("No GPU available.")
# run scheduler
if __name__ == '__main__':
mode = 'r+'
need_init_gpuf = not(os.path.isfile(GPU_INFO_FILE))
if need_init_gpuf:
mode = 'w+'
with open(GPU_INFO_FILE, mode) as f:
if need_init_gpuf:
os.fchmod(f.fileno(), 0o777)
init_gpu_info_file(f, DEFAULT_GPU_COUNT, [])
# parse cli args
args = get_args()
validate_args(args)
if args.init:
init_gpu_info_file(f, args.init[0], args.init[1:])
if args.release_gpu:
set_free_gpu(f, args.release_gpu)
if args.status:
display_status(f)
if args.task:
run_task(f, args)
| mit | 5,005,323,656,014,090,000 | 32.503704 | 121 | 0.552509 | false | 3.686227 | false | false | false |
brkt/brkt-cli | brkt_cli/config/__init__.py | 1 | 23015 | # Copyright 2017 Bracket Computing, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# https://github.com/brkt/brkt-cli/blob/master/LICENSE
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and
# limitations under the License.
import argparse
import collections
import errno
import logging
import os
import os.path
import shutil
import sys
import tempfile
import yaml
import brkt_cli
from brkt_cli import argutil
from brkt_cli.subcommand import Subcommand
from brkt_cli.util import parse_endpoint, render_table_rows
from brkt_cli.validation import ValidationError
log = logging.getLogger(__name__)
CONFIG_DIR = os.path.expanduser('~/.brkt')
CONFIG_PATH = os.path.join(CONFIG_DIR, 'config')
VERSION = 3
class InvalidOptionError(Exception):
def __init__(self, option):
self.option = option
class UnknownEnvironmentError(Exception):
def __init__(self, env):
self.env = env
class InvalidEnvironmentError(Exception):
def __init__(self, missing_keys):
self.missing_keys = missing_keys
BRKT_HOSTED_ENV_NAME = 'brkt-hosted'
def _bracket_environment_to_dict(benv):
"""Convert a BracketEnvironment object to a dictionary that can be stored
in a config.
:param benv a BracketEnvironment object
:return a dictionary
"""
return {
'api-host': benv.api_host,
'api-port': benv.api_port,
'keyserver-host': benv.hsmproxy_host,
'keyserver-port': benv.hsmproxy_port,
'public-api-host': benv.public_api_host,
'public-api-port': benv.public_api_port,
'network-host': benv.network_host,
'network-port': benv.network_port,
'public-api-ca-cert-path': benv.public_api_ca_cert_path
}
def _bracket_environment_from_dict(d):
"""Convert a bracket environment from the config into a BracketEnvironment
object
:param d a dictionary
:return a BracketEnvironment object
"""
benv = brkt_cli.BracketEnvironment()
benv.api_host = d.get('api-host')
benv.api_port = d.get('api-port')
benv.hsmproxy_host = d.get('keyserver-host')
benv.hsmproxy_port = d.get('keyserver-port')
benv.public_api_host = d.get('public-api-host')
benv.public_api_port = d.get('public-api-port')
benv.network_host = d.get('network-host')
benv.network_port = d.get('network-port')
benv.public_api_ca_cert_path = d.get('public-api-ca-cert-path')
return benv
def _validate_environment(benv):
"""Make sure all the necessary attributes of an environment are set.
:raises InvalidEnvironmentError
"""
attrs = ('api_host', 'hsmproxy_host', 'public_api_host', 'network_host')
missing = []
for attr in attrs:
if getattr(benv, attr) is None:
missing.append(attr)
if len(missing) > 0:
raise InvalidEnvironmentError(missing)
def _unlink_noraise(path):
try:
os.unlink(path)
except OSError as e:
if e.errorno == errno.ENOENT:
pass
else:
log.exception("Failed unlinking %s", path)
except:
log.exception("Failed unlinking %s", path)
class CLIConfig(object):
"""CLIConfig exposes an interface that subcommands can use to retrieve
persistent configuration options.
"""
def __init__(self):
self._config = {
'current-environment': None,
'environments': {},
'options': {},
'version': VERSION,
'internal': {}
}
self._add_prod_env()
self._registered_options = collections.defaultdict(dict)
def _get_env(self, env_name):
if env_name not in self._config['environments']:
raise UnknownEnvironmentError(env_name)
d = self._config['environments'][env_name]
return _bracket_environment_from_dict(d)
def set_env(self, name, env):
"""Update the named environment.
:param name the environment name (e.g. stage)
:param env a BracketEnvironment instance
"""
d = _bracket_environment_to_dict(env)
self._config['environments'][name] = d
def get_current_env(self):
"""Return the current environment.
:return a tuple of environment name, BracketEnvironment
"""
env_name = self._config['current-environment']
return env_name, self.get_env(env_name)
def set_current_env(self, env_name):
"""Change the current environment
:param env_name the named env
"""
env = self._get_env(env_name)
_validate_environment(env)
self._config['current-environment'] = env_name
def get_env_meta(self):
"""Return all defined environments"""
meta = {}
for env_name in self._config['environments'].iterkeys():
meta[env_name] = {
'is_current': self._config['current-environment'] == env_name
}
return meta
def get_env(self, env_name):
"""Return the named environment
:param env_name a string
:return a BracketEnvironment instance
:raises UnknownEnvironmentError
"""
return self._get_env(env_name)
def unset_env(self, env_name):
"""Delete the named environment
:param env_name a string
:raises UnknownEnvironmentError
"""
self._get_env(env_name)
del self._config['environments'][env_name]
if self._config['current-environment'] == env_name:
self._config['current-environment'] = BRKT_HOSTED_ENV_NAME
def _check_option(self, option):
if option not in self._registered_options:
raise InvalidOptionError(option)
def register_option(self, option, desc):
self._registered_options[option] = desc
def registered_options(self):
return self._registered_options
def set_option(self, option, value):
"""Set the value for the supplied option.
:param option a dot-delimited option string
:param value the option value
"""
self._check_option(option)
levels = option.split('.')
attr = levels.pop()
cur = self._config['options']
for level in levels:
if level not in cur:
cur[level] = {}
cur = cur[level]
cur[attr] = value
def get_option(self, option, default=None):
"""Fetch the value for the supplied option.
:param option a dot-delimited option string
:param default the value to be returned if option is not present
:return the option value
"""
self._check_option(option)
levels = option.split('.')
attr = levels.pop()
cur = self._config['options']
for level in levels:
if level not in cur:
return default
cur = cur[level]
return cur.get(attr, default)
def _remove_empty_dicts(self, h):
to_remove = []
for k in h:
if isinstance(h[k], dict):
self._remove_empty_dicts(h[k])
if len(h[k]) == 0:
to_remove.append(k)
for k in to_remove:
del h[k]
def unset_option(self, option):
"""Unset the value for the supplied option.
:param option A dot-delimited option string
"""
self._check_option(option)
levels = option.split('.')
attr = levels.pop()
cur = self._config['options']
for level in levels:
if level not in cur:
return
cur = cur[level]
if attr in cur:
del cur[attr]
# Clean up any empty sub-sections
self._remove_empty_dicts(self._config['options'])
def set_internal_option(self, option, value):
self._config['internal'][option] = value
def get_internal_option(self, option, default=None):
return self._config['internal'].get(option, default)
def _migrate_config(self, config):
"""Handle migrating between different config versions"""
if config['version'] == 1:
config['environments'] = {}
config['current-environment'] = None
config['version'] = 2
if config['version'] == 2:
config['internal'] = {}
config['version'] = VERSION
return config
def _add_prod_env(self):
prod_env = brkt_cli.get_prod_brkt_env()
prod_dict = _bracket_environment_to_dict(prod_env)
self._config['environments'][BRKT_HOSTED_ENV_NAME] = prod_dict
if self._config.get('current-environment') is None:
self._config['current-environment'] = BRKT_HOSTED_ENV_NAME
def read(self, f=None):
"""Read the config from disk"""
try:
if not f:
f = open(CONFIG_PATH)
config = yaml.safe_load(f)
self._config = self._migrate_config(config)
self._add_prod_env()
except IOError as e:
if e.errno != errno.ENOENT:
raise
finally:
if f:
f.close()
def write(self, f):
"""Write the config to disk.
:param f A file-like object
"""
yaml.dump(self._config, f)
def save_config(self):
"""Save the current config to disk.
"""
try:
os.mkdir(CONFIG_DIR, 0755)
except OSError as e:
if e.errno != errno.EEXIST:
raise
f = tempfile.NamedTemporaryFile(delete=False, prefix='brkt_cli')
try:
self.write(f)
f.close()
except:
_unlink_noraise(f.name)
raise
try:
shutil.move(f.name, CONFIG_PATH)
except:
_unlink_noraise(f.name)
raise
class ConfigSubcommand(Subcommand):
def __init__(self, stdout=sys.stdout):
self.stdout = stdout
def name(self):
return 'config'
def register(self, subparsers, parsed_config):
self.parsed_config = parsed_config
config_parser = subparsers.add_parser(
self.name(),
description=(
'Display or update brkt-cli options stored in'
' ~/.brkt/config'),
help='Display or update brkt-cli options'
)
config_subparsers = config_parser.add_subparsers(
dest='config_subcommand',
# Hardcode the list, so that we don't expose subcommands that
# are still in development.
metavar='{list,set,get,unset,set-env,use-env,list-envs,get-env,'
'unset-env}'
)
# List all options
config_subparsers.add_parser(
'list',
help='Display the values of all options set in the config file',
description='Display the values of all options set in the config file')
# All the options available for retrieval/mutation
rows = []
descs = self.parsed_config.registered_options()
opts = sorted(descs.keys())
for opt in opts:
rows.append([opt, descs[opt]])
opts_table = render_table_rows(rows, row_prefix=' ')
epilog = "\n".join([
'supported options:',
'',
opts_table
])
# Set an option
set_parser = config_subparsers.add_parser(
'set',
help='Set the value for an option',
description='Set the value for an option',
epilog=epilog,
formatter_class=argparse.RawDescriptionHelpFormatter)
set_parser.add_argument(
'option',
help='The option name (e.g. encrypt-gcp-image.project)')
set_parser.add_argument(
'value',
help='The option value')
# Get the value for an option
get_parser = config_subparsers.add_parser(
'get',
help='Get the value for an option',
description='Get the value for an option',
epilog=epilog,
formatter_class=argparse.RawDescriptionHelpFormatter)
get_parser.add_argument(
'option',
help='The option name (e.g. encrypt-gcp-image.project)')
# Unset the value for an option
unset_parser = config_subparsers.add_parser(
'unset',
help='Unset the value for an option',
description='Unset the value for an option',
epilog=epilog,
formatter_class=argparse.RawDescriptionHelpFormatter)
unset_parser.add_argument(
'option',
help='The option name (e.g. encrypt-gcp-image.project)')
# Define or update an environment
set_env_parser = config_subparsers.add_parser(
'set-env',
help='Update the attributes of an environment',
description="""
Update the attributes of an environment
Environments are persisted in your configuration and can be activated via the
`use-env` config subcommand. This command is particularly helpful if you need
to work with multiple on-prem control-plane deployments. For example, we could
define stage and prod control planes hosted at stage.foo.com and prod.foo.com,
respectively, by executing:
> brkt config set-env stage --service-domain stage.foo.com
> brkt config set-env prod --service-domain prod.foo.com
We can switch between the environments using the `use-env` config subcommand
like so:
> brkt config use-env stage
We can determine the current environment using the `list-envs` config
subcommand:
> brkt config list-envs
brkt-hosted
prod
* stage
>
The leading `*' indicates that the `stage' environment is currently active.
""",
formatter_class=argparse.RawDescriptionHelpFormatter)
set_env_parser.add_argument(
'env_name',
help='The environment name (e.g. stage)')
set_env_parser.add_argument(
'--api-server',
help='The api server (host[:port]) the metavisor will connect to')
set_env_parser.add_argument(
'--key-server',
help='The key server (host[:port]) the metavisor will connect to')
set_env_parser.add_argument(
'--network-server',
help='The network server (host[:port]) the metavisor will connect to')
argutil.add_public_api_ca_cert(set_env_parser)
set_env_parser.add_argument(
'--public-api-server',
help='The public api (host[:port])')
set_env_parser.add_argument(
'--service-domain',
help=('Set server values from the service domain. This option '
'assumes that each server is resolvable via a hostname '
'rooted at service-domain. Specifically, api is expected '
'to live at yetiapi.<service-domain>, key-server at '
'hsmproxy.<service-domain>, network at '
'network.<service-domain>, and public-api-server at '
'api.<service-domain>.')
)
# Set the active environment
use_env_parser = config_subparsers.add_parser(
'use-env',
help='Set the active environment',
description='Set the active environment',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
use_env_parser.add_argument(
'env_name',
help='The environment name (e.g. stage)')
# Display all defined environments
config_subparsers.add_parser(
'list-envs',
help='Display all environments',
description=(
"Display all environments. The leading `*' indicates"
" the currently active environment."))
# Get the details of a specific environment
get_env_parser = config_subparsers.add_parser(
'get-env',
help='Display the details of a specific environment',
description='Display the details of an environment',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
get_env_parser.add_argument(
'env_name',
help='The environment name')
# Unset a specific environment
unset_env_parser = config_subparsers.add_parser(
'unset-env',
help='Delete an environment',
description='Delete an environment')
unset_env_parser.add_argument(
'env_name',
help='The environment name')
def _list_options(self):
"""Display the contents of the config"""
for opt in sorted(self.parsed_config.registered_options().keys()):
val = self.parsed_config.get_option(opt)
if val is not None:
line = "%s=%s\n" % (opt, val)
self.stdout.write(line)
return 0
def _get_option(self, opt):
try:
val = self.parsed_config.get_option(opt)
except InvalidOptionError:
raise ValidationError('Error: unknown option "%s".' % (opt,))
if val:
self.stdout.write("%s\n" % (val,))
return 0
def _set_option(self, opt, val):
"""Set the specified option"""
try:
self.parsed_config.set_option(opt, val)
except InvalidOptionError:
raise ValidationError('Error: unknown option "%s".' % (opt,))
return 0
def _unset_option(self, opt):
"""Unset the specified option"""
try:
self.parsed_config.unset_option(opt)
except InvalidOptionError:
raise ValidationError('Error: unknown option "%s".' % (opt,))
return 0
def _set_env(self, values):
"""Update attributes for the named environment"""
if values.env_name == BRKT_HOSTED_ENV_NAME:
raise ValidationError(
'Error: cannot modify environment ' + values.env_name)
try:
env = self.parsed_config.get_env(values.env_name)
except UnknownEnvironmentError:
env = brkt_cli.BracketEnvironment()
opt_attr = {
'api': 'api',
'key': 'hsmproxy',
'public_api': 'public_api',
'network': 'network',
}
for k in opt_attr.iterkeys():
endpoint = k + '_server'
endpoint = getattr(values, endpoint)
if endpoint is None:
continue
try:
host, port = parse_endpoint(endpoint)
except ValueError:
raise ValidationError('Error: Invalid value for option --' + k + '-server')
port = port or 443
setattr(env, opt_attr[k] + '_host', host)
setattr(env, opt_attr[k] + '_port', port)
if values.service_domain is not None:
env = brkt_cli.brkt_env_from_domain(values.service_domain)
env.public_api_ca_cert_path = values.public_api_ca_cert
self.parsed_config.set_env(values.env_name, env)
return 0
def _use_env(self, values):
"""Set the active environment"""
try:
self.parsed_config.set_current_env(values.env_name)
except UnknownEnvironmentError:
raise ValidationError('Error: unknown environment ' + values.env_name)
except InvalidEnvironmentError, e:
attr_opt = {
'api_host': 'api-server',
'hsmproxy_host': 'key-server',
'public_api_host': 'public-api-server',
'network_host': 'network',
}
msg = ("Error: the environment %s is missing values for %s."
" Use `brkt config set-env` to set the appropriate values.")
opts = []
for attr in e.missing_keys:
opts.append(attr_opt[attr])
raise ValidationError(msg % (values.env_name, ', '.join(opts)))
def _list_envs(self):
"""Display all envs"""
meta = self.parsed_config.get_env_meta()
rows = []
for env_name in sorted(meta.keys()):
marker = ' '
if meta[env_name]['is_current']:
marker = '*'
rows.append((marker, env_name))
self.stdout.write(render_table_rows(rows) + "\n")
def _get_env(self, values):
"""Display the details of an environment"""
try:
env = self.parsed_config.get_env(values.env_name)
except UnknownEnvironmentError:
raise ValidationError('Error: unknown environment ' + values.env_name)
attr_opt = {
'api': 'api',
'hsmproxy': 'key',
'public_api': 'public-api',
'network': 'network',
}
for k in sorted(attr_opt.keys()):
host = getattr(env, k + '_host')
if host is None:
continue
port = getattr(env, k + '_port')
self.stdout.write("%s-server=%s:%d\n" % (attr_opt[k], host, port))
if env.public_api_ca_cert_path:
self.stdout.write(
'public-api-ca-cert=%s\n' % env.public_api_ca_cert_path)
def _unset_env(self, values):
"""Delete the named environment"""
if values.env_name == BRKT_HOSTED_ENV_NAME:
raise ValidationError(
'Error: cannot delete environment ' + values.env_name)
try:
self.parsed_config.unset_env(values.env_name)
except UnknownEnvironmentError:
raise ValidationError('Error: unknown environment ' + values.env_name)
def run(self, values):
subcommand = values.config_subcommand
if subcommand == 'list':
self._list_options()
elif subcommand == 'set':
self._set_option(values.option, values.value)
self.parsed_config.save_config()
elif subcommand == 'get':
self._get_option(values.option)
elif subcommand == 'unset':
self._unset_option(values.option)
self.parsed_config.save_config()
elif subcommand == 'set-env':
self._set_env(values)
self.parsed_config.save_config()
elif subcommand == 'use-env':
self._use_env(values)
self.parsed_config.save_config()
elif subcommand == 'list-envs':
self._list_envs()
elif subcommand == 'get-env':
self._get_env(values)
elif subcommand == 'unset-env':
self._unset_env(values)
self.parsed_config.save_config()
return 0
def get_subcommands():
return [ConfigSubcommand()]
| apache-2.0 | 6,100,424,114,709,933,000 | 32.59854 | 91 | 0.578883 | false | 4.143114 | true | false | false |
sbussmann/kaggle-right-whale | Code/whaleutil.py | 1 | 3147 | """
2015 September 30
Shane Bussmann
Find the whale. Model the whale as a rectangle with aspect ratio = 3.0.
"""
import numpy as np
from skimage.color import rgb2gray, rgb2hsv
from scipy.ndimage import gaussian_filter
def xy_rotate(x, y, x0, y0, phi):
phirad = np.deg2rad(phi)
xnew = (x - x0) * np.cos(phirad) + (y - y0) * np.sin(phirad)
ynew = (y - y0) * np.cos(phirad) - (x - x0) * np.sin(phirad)
return (xnew,ynew)
def ellipse_2d(x, y, par):
(xnew,ynew) = xy_rotate(x, y, par[2], par[3], par[5])
r_ell_sq = ((xnew**2)*par[4] + (ynew**2)/par[4]) / np.abs(par[1])**2
ellipse = r_ell_sq.copy()
ellipse[:] = 0.
inside = r_ell_sq < 1
ellipse[inside] = par[0]
#import matplotlib.pyplot as plt
#plt.imshow(r_ell_sq, origin='lower', vmax=10*par[1])
#plt.colorbar()
#plt.contour(ellipse)
#plt.show()
return ellipse
def whale_2d(x, y, par):
# the head and body of the whale
e1 = ellipse_2d(x, y, par)
## the tail of the whale
#r1 = par[1] / 3.
#q1 = 0.5
#b1 = r1 * np.sqrt(q1)
#a0 = par[1] / np.sqrt(par[4])
#d = a0 + b1
#dx = d * np.cos(par[5])
#dy = d * np.sin(par[5])
#x1 = par[2] - dx
#y1 = par[3] - dy
#phi1 = par[5] - 90.
#par2 = [par[0], r1, x1, y1, q1, phi1]
#e2 = ellipse_2d(x, y, par2)
#import matplotlib.pyplot as plt
#plt.contour(e1)
#plt.contour(e2)
#plt.show()
#import pdb; pdb.set_trace()
#print(par)
#print(par2)
return e1# + e2
def color(im):
diff = 2 * im[:, :, 0] - im[:, :, 1] - im[:, :, 2]
invdiff = diff.max() / diff
uhoh = invdiff * 0 != 0
invdiff[uhoh] = 0
invdiff = gaussian_filter(invdiff, 20)
return invdiff
def lumin(im):
diff = im[:, :, 0] + im[:, :, 1] + im[:, :, 2]
return diff
def colorlumin(im):
#diff = rgb2hsv(im)
#diff = diff[:, :, 0]#
im = np.array(im).astype('float')
diff = 2 * im[:, :, 0] - im[:, :, 1] - im[:, :, 2]
print(np.median(diff))
imcolor = diff - np.median(diff)
colorthresh = np.percentile(imcolor, 97)
print("Found color threshold of " + str(colorthresh))
#invdiff = diff.max() / diff
#uhoh = invdiff * 0 != 0
#invdiff[uhoh] = 0
#invdiff = gaussian_filter(diff, 2)
#import matplotlib.pyplot as plt
#plt.hist(imcolor.flatten(), bins=100)
#plt.show()
#import pdb; pdb.set_trace()
diff = rgb2gray(im)
imlumin = diff.copy()
imlumin /= imlumin.max()
#plt.imshow(imlumin)
#plt.colorbar()
#plt.show()
# mask regions with a strong wave signature
waveindex = imlumin > 0.9
imcolor[waveindex] = imcolor.min()
#plt.imshow(imcolor)
#plt.colorbar()
#plt.show()
# first guess at whale region
#import matplotlib.pyplot as plt
#plt.imshow(imcolor)
#plt.colorbar()
#plt.show()
hicol = imcolor >= colorthresh
locol = imcolor < colorthresh
imcolor[hicol] = 10#np.abs(colorthresh)
#locol = imcolor < colorthresh
imcolor[locol] = 0
#plt.imshow(imcolor)
#plt.colorbar()
#plt.show()
#print(smallim.mean())
return (imcolor, imlumin, colorthresh)
| mit | -8,273,047,915,708,201,000 | 24.585366 | 72 | 0.568796 | false | 2.637888 | false | false | false |
vipod/pyzimbra | test/base.py | 3 | 2551 | # -*- coding: utf-8 -*-
"""
################################################################################
# Copyright (c) 2010, Ilgar Mashayev
#
# E-mail: [email protected]
# Website: http://github.com/ilgarm/pyzimbra
################################################################################
# This file is part of pyzimbra.
#
# Pyzimbra is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyzimbra is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyzimbra. If not, see <http://www.gnu.org/licenses/>.
################################################################################
@author: ilgar
"""
from test.util import load_test_properties
class BaseTest(object):
# --------------------------------------------------------------- properties
domain = property(lambda self: self._domain,
lambda self, v: setattr(self, '_domain', v))
hostname = property(lambda self: self._hostname,
lambda self, v: setattr(self, '_hostname', v))
domain_key = property(lambda self: self._domain_key,
lambda self, v: setattr(self, '_domain_key', v))
username = property(lambda self: self._username,
lambda self, v: setattr(self, '_username', v))
account_name = property(lambda self: self._account_name,
lambda self, v: setattr(self, '_account_name', v))
account_id = property(lambda self: self._account_id,
lambda self, v: setattr(self, '_account_id', v))
password = property(lambda self: self._password,
lambda self, v: setattr(self, '_password', v))
token = property(lambda self: self._token,
lambda self, v: setattr(self, '_token', v))
session_id = property(lambda self: self._session_id,
lambda self, v: setattr(self, '_session_id', v))
# ------------------------------------------------------------------ unbound
def setUp(self):
load_test_properties(self)
def tearDown(self):
pass
| lgpl-3.0 | 766,115,566,635,475,800 | 41.516667 | 80 | 0.53626 | false | 4.45979 | false | false | false |
praekelt/sideloader | skeleton/settings.py | 1 | 6214 | # Django settings for skeleton project.
import os, datetime, socket
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
def abspath(*args):
"""convert relative paths to absolute paths relative to PROJECT_ROOT"""
return os.path.join(PROJECT_ROOT, *args)
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'sideloader',
'USER': 'postgres',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': '',
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['*']
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'UTC'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = abspath('media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = abspath('static')
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
# Leaving this intentionally blank because you have to generate one yourself.
SECRET_KEY = 'please-change-me'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'skeleton.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'skeleton.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
abspath('templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'gunicorn',
'raven.contrib.django.raven_compat',
'social.apps.django_app.default',
'crispy_forms',
'sideloader',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# Celery configuration options
BROKER_URL = 'redis://localhost:6379/0'
# Defer email sending to Celery, except if we're in debug mode,
# then just print the emails to stdout for debugging.
LOGIN_REDIRECT_URL = '/'
SOCIAL_AUTH_NEW_USER_REDIRECT_URL = '/'
CRISPY_TEMPLATE_PACK = 'bootstrap3'
SESSION_COOKIE_AGE = 1209600
SESSION_SERIALIZER='django.contrib.sessions.serializers.PickleSerializer'
SIDELOADER_DOMAIN = socket.getfqdn()
SIDELOADER_FROM = 'Sideloader <no-reply@%s>' % SIDELOADER_DOMAIN
SIDELOADER_PACKAGEURL = "http://%s/packages" % SIDELOADER_DOMAIN
SLACK_TOKEN = None
SLACK_CHANNEL = ''
SLACK_HOST = 'foo.slack.com'
try:
from local_settings import *
except ImportError:
pass
| mit | 2,228,628,026,273,977,300 | 30.226131 | 88 | 0.701641 | false | 3.668241 | false | false | false |
tavallaie/pypot | pypot/server/snap.py | 1 | 10724 | import os
import shutil
import bottle
import socket
import re
import logging
from ast import literal_eval as make_tuple
from ..utils.appdirs import user_data_dir
from .server import AbstractServer
from .httpserver import EnableCors
logger = logging.getLogger(__name__)
def get_snap_user_projects_directory():
snap_user_projects_directory = user_data_dir('pypot', 'SnapRobotServer')
if not os.path.exists(snap_user_projects_directory):
os.makedirs(snap_user_projects_directory)
return snap_user_projects_directory
def find_local_ip():
# see here: http://stackoverflow.com/questions/166506/
return [(s.connect(('8.8.8.8', 80)), s.getsockname()[0], s.close())
for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]
def set_snap_server_variables(host, port, snap_extension='.xml', path=None):
""" Allow to change dynamically port and host variable in xml Snap! project file"""
localdir = os.getcwd()
if path is None:
os.chdir(os.path.dirname(os.path.realpath(__file__)))
else:
os.chdir(path)
xml_files = [f for f in os.listdir('.') if f.endswith(snap_extension)]
for filename in xml_files:
with open(filename, 'r') as xf:
xml = xf.read()
with open(filename, 'w') as xf:
xml = re.sub(r'''<variable name="host"><l>[\s\S]*?<\/l><\/variable>''',
'''<variable name="host"><l>{}</l></variable>'''.format(host), xml)
xml = re.sub(r'''<variable name="port"><l>[\s\S]*?<\/l><\/variable>''',
'''<variable name="port"><l>{}</l></variable>'''.format(port), xml)
xf.write(xml)
os.chdir(localdir)
class SnapRobotServer(AbstractServer):
def __init__(self, robot, host, port, quiet=True):
AbstractServer.__init__(self, robot, host, port)
self.quiet = quiet
self.app = bottle.Bottle()
self.app.install(EnableCors())
rr = self.restfull_robot
# Copy Snap files from system directory to user directory. It avoids
# right issue while PyPot is installed from pip in an admin directory
snap_system_projects_directory = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'snap_projects')
xml_files = [os.path.join(snap_system_projects_directory, f)
for f in os.listdir(snap_system_projects_directory) if f.endswith('.xml')]
for xml_file in xml_files:
dst = os.path.join(get_snap_user_projects_directory(), os.path.basename(xml_file))
logger.warning('Src: {}, Dest {}'.format(xml_file, dst))
shutil.copyfile(xml_file, dst)
set_snap_server_variables(find_local_ip(), port, path=get_snap_user_projects_directory())
@self.app.get('/motors/<alias>')
def get_motors(alias):
return '/'.join(rr.get_motors_list(alias))
@self.app.get('/motor/<motor>/get/<register>')
def get_motor_register(motor, register):
return str(rr.get_motor_register_value(motor, register))
@self.app.get('/motors/get/positions')
def get_motors_positions():
get_pos = lambda m: rr.get_motor_register_value(
m, 'present_position')
msg = '/'.join('{}'.format(get_pos(m))
for m in rr.get_motors_list())
msg = ';'.join('{}'.format(get_pos(m))
for m in rr.get_motors_list())
return msg
@self.app.get('/motors/alias')
def get_robot_aliases():
return '/'.join('{}'.format(alias) for alias in rr.get_motors_alias())
@self.app.get('/motors/set/goto/<motors_position_duration>')
def set_motors_goto(motors_position_duration):
""" Allow lot of motors position settings with a single http request
Be carefull: with lot of motors, it could overlap the GET max
lentgh of your web browser
"""
for m_settings in motors_position_duration.split(';'):
settings = m_settings.split(':')
rr.set_goto_position_for_motor(settings[0], float(settings[1]), float(settings[2]))
return 'Done!'
@self.app.get('/motors/set/registers/<motors_register_value>')
def set_motors_registers(motors_register_value):
""" Allow lot of motors register settings with a single http request
Be carefull: with lot of motors, it could overlap the GET max
lentgh of your web browser
"""
for m_settings in motors_register_value.split(';'):
settings = m_settings.split(':')
rr.set_motor_register_value(settings[0], settings[1], make_tuple(settings[2]))
return 'Done!'
# TODO : delete ?
@self.app.get('/motors/set/positions/<positions>')
def set_motors_positions(positions):
positions = map(lambda s: float(s), positions[:-1].split(';'))
for m, p in zip(rr.get_motors_list(), positions):
rr.set_motor_register_value(m, 'goal_position', p)
return 'Done!'
@self.app.get('/motor/<motor>/set/<register>/<value>')
def set_reg(motor, register, value):
rr.set_motor_register_value(motor, register, float(value))
return 'Done!'
@self.app.get('/motor/<motor>/goto/<position>/<duration>')
def set_goto(motor, position, duration):
rr.set_goto_position_for_motor(
motor, float(position), float(duration))
return 'Done!'
@self.app.get('/snap-blocks.xml')
def get_pypot_snap_blocks():
with open(os.path.join(get_snap_user_projects_directory(), 'pypot-snap-blocks.xml')) as f:
return f.read()
@self.app.get('/ip')
def get_ip():
return socket.gethostbyname(socket.gethostname())
@self.app.get('/reset-simulation')
def reset_simulation():
if hasattr(robot, 'reset_simulation'):
robot.reset_simulation()
return 'Done!'
@self.app.get('/primitives')
def get_primitives():
return '/'.join(rr.get_primitives_list())
@self.app.get('/primitives/running')
def get_running_primitives():
return '/'.join(rr.get_running_primitives_list())
@self.app.get('/primitive/<primitive>/start')
def start_primitive(primitive):
rr.start_primitive(primitive)
return 'Done!'
@self.app.get('/primitive/<primitive>/stop')
def stop_primitive(primitive):
rr.stop_primitive(primitive)
return 'Done!'
@self.app.get('/primitive/<primitive>/pause')
def pause_primitive(primitive):
rr.pause_primitive(primitive)
return 'Done!'
@self.app.get('/primitive/<primitive>/resume')
def resume_primitive(primitive):
rr.resume_primitive(primitive)
return 'Done!'
@self.app.get('/primitive/<primitive>/properties')
def get_primitive_properties_list(primitive):
return '/'.join(rr.get_primitive_properties_list(primitive))
@self.app.get('/primitive/<primitive>/get/<property>')
def get_primitive_property(primitive, property):
return rr.get_primitive_property(primitive, property)
@self.app.get('/primitive/<primitive>/set/<property>/<value>')
def set_primitive_property(primitive, property, value):
return rr.set_primitive_property(primitive, property, value)
@self.app.get('/primitive/<primitive>/methodes')
def get_primitive_methodes_list(primitive):
return '/'.join(rr.get_primitive_methods_list(primitive))
@self.app.get('/primitive/<primitive>/call/<method>/<args>')
def call_primitive_methode(primitive, method, args):
kwargs = dict(item.split(":") for item in args.split(";"))
return rr._call_primitive_method(primitive, method, **kwargs)
# Hacks (no restfull) to record movements
@self.app.get('/primitive/MoveRecorder/<move_name>/start')
def start_move_recorder(move_name):
rr.start_move_recorder(move_name)
return 'Done!'
@self.app.get('/primitive/MoveRecorder/<move_name>/stop')
def stop_move_recorder(move_name):
rr.stop_move_recorder(move_name)
return 'Done!'
@self.app.get('/primitive/MoveRecorder/<move_name>/attach/<motors>')
def attach_move_recorder(move_name, motors):
rr.attach_move_recorder(move_name, motors.split(';'))
return 'Done!'
@self.app.get('/primitive/MoveRecorder/<move_name>/get_motors')
def get_move_recorder_motors(move_name):
motors = rr.get_move_recorder_motors(move_name)
return '/'.join(motors) if motors is not None else 'None'
# Obsolete ?
@self.app.get('/primitive/MoveRecorder/<move_name>/start/<motors>')
def start_move_recorder_with_motors(move_name, motors):
raise DeprecationWarning
rr.start_move_recorder(move_name, motors.split(';'))
return 'Done!'
@self.app.get('/primitive/MoveRecorder/<move_name>/remove')
def remove_move_record(move_name):
rr.remove_move_record(move_name)
return 'Done!'
@self.app.get('/primitive/MoveRecorder')
def get_available_records():
return '/'.join(rr.get_available_record_list())
@self.app.get('/primitive/MovePlayer')
def get_available_records2():
return '/'.join(rr.get_available_record_list())
@self.app.get('/primitive/MovePlayer/<move_name>/start')
def start_move_player(move_name):
return str(rr.start_move_player(move_name))
@self.app.get('/primitive/MovePlayer/<move_name>/start/<move_speed>')
def start_move_player_with_speed(move_name, move_speed):
return str(rr.start_move_player(move_name, float(move_speed)))
@self.app.get('/primitive/MovePlayer/<move_name>/start/<move_speed>/backwards')
def start_move_player_backwards_with_speed(move_name, move_speed):
return str(rr.start_move_player(move_name, float(move_speed), backwards=True))
@self.app.get('/primitive/MovePlayer/<move_name>/stop')
def stop_move_player(move_name):
rr.stop_primitive('_{}_player'.format(move_name))
return 'Done!'
def run(self):
bottle.run(self.app, host=self.host, port=self.port, quiet=self.quiet)
| gpl-3.0 | 4,437,873,530,799,601,700 | 40.727626 | 114 | 0.596512 | false | 3.79745 | false | false | false |
MaterialsDiscovery/PyChemia | pychemia/analysis/matching.py | 1 | 5441 | import numpy as np
from pychemia.utils.mathematics import lcm, shortest_triple_set
from pychemia import Structure
import itertools
class StructureMatch:
def __init__(self, structure1, structure2):
"""
Creates a structure match between 2 structures
The structures will be change to match their number of
atoms and the order of atoms inside such that the distances
between atoms in equivalent positions on both structures
is minimized.
:param structure1: (Structure)
:param structure2: (Structure)
"""
assert (isinstance(structure1, Structure))
assert (isinstance(structure2, Structure))
assert structure1.is_perfect
assert structure2.is_perfect
self.structure1 = structure1.copy()
self.structure2 = structure2.copy()
self.base_lattice = self.structure1.lattice
def match_size(self):
assert self.structure1.is_crystal
assert self.structure2.is_crystal
gcd1 = self.structure1.get_composition().gcd
gcd2 = self.structure2.get_composition().gcd
sts = np.array(shortest_triple_set(lcm(gcd1, gcd2) / gcd1)).astype(int)
supercell_multiples = sts[self.structure1.lattice.lengths.argsort()[::-1]]
self.structure1 = self.structure1.supercell(supercell_multiples)
sts = np.array(shortest_triple_set(lcm(gcd1, gcd2) / gcd2))
supercell_multiples = sts[self.structure2.lattice.lengths.argsort()[::-1]]
self.structure2 = self.structure2.supercell(supercell_multiples)
def match_shape(self):
self.structure1.canonical_form()
self.structure2.canonical_form()
assert (self.structure1.symbols == self.structure2.symbols)
def match_atoms(self):
if self.structure1.natom != self.structure2.natom:
raise ValueError('Match the size first')
best = {}
for specie in self.structure1.species:
selection = np.array(self.structure1.symbols) == specie
distance_matrix, close_images = self.base_lattice.minimal_distances(self.structure1.reduced[selection],
self.structure2.reduced[selection])
min_trace = 1E10
best[specie] = None
if self.structure1.natom < 7:
for i in itertools.permutations(range(len(distance_matrix))):
if distance_matrix[:, np.array(i)].trace() < min_trace:
min_trace = distance_matrix[:, np.array(i)].trace()
best[specie] = i
else:
# Only consider permutations of 2 positions
if len(distance_matrix) > 1:
for ipar in itertools.permutations(range(len(distance_matrix)), 2):
i = list(range(len(distance_matrix)))
i[ipar[0]] = ipar[1]
i[ipar[1]] = ipar[0]
if distance_matrix[:, np.array(i)].trace() < min_trace:
min_trace = distance_matrix[:, np.array(i)].trace()
best[specie] = i
for ipar in itertools.permutations(range(len(distance_matrix)), 4):
i = list(range(len(distance_matrix)))
i[ipar[0]] = ipar[1]
i[ipar[1]] = ipar[0]
i[ipar[2]] = ipar[3]
i[ipar[3]] = ipar[2]
if distance_matrix[:, np.array(i)].trace() < min_trace:
min_trace = distance_matrix[:, np.array(i)].trace()
best[specie] = i
else:
best[specie] = [0]
print('For specie %s best permutation is %s' % (specie, str(best[specie])))
best_permutation = np.zeros(self.structure1.natom, dtype=int)
index = 0
while index < self.structure1.natom:
specie = self.structure1.symbols[index]
selection = np.array(self.structure1.symbols) == specie
best_permutation[selection] = index + np.array(best[specie])
index += len(best[specie])
self.structure2.sort_sites_using_list(best_permutation)
def reduced_displacement(self):
assert (self.structure1.symbols == self.structure2.symbols)
assert (self.structure1.nsites == self.structure2.nsites)
assert (self.structure1.natom == self.structure2.natom)
ret = np.zeros((self.structure1.nsites, 3))
distance_matrix, close_images = self.base_lattice.minimal_distances(self.structure1.reduced,
self.structure2.reduced)
for i in range(self.structure1.nsites):
x1 = self.structure1.reduced[i]
x2 = self.structure2.reduced[i] + close_images[i, i]
ret[i] = x2 - x1
return ret
def cell_displacement(self):
return np.dot(self.structure1.cell, np.linalg.inv(self.structure2.cell))
def cartesian_distances(self):
rd = self.reduced_displacement()
ret = np.zeros(self.structure1.nsites)
for i in range(self.structure1.nsites):
ret[i] = np.dot(np.dot(rd[i], self.base_lattice.metric), rd[i])
ret[i] = np.sqrt(ret[i])
return ret
| mit | -5,938,888,186,492,161,000 | 40.853846 | 115 | 0.570851 | false | 3.986081 | false | false | false |
rosswhitfield/mantid | scripts/Diffraction/isis_powder/hrpd.py | 3 | 9155 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import os
from isis_powder.abstract_inst import AbstractInst
from isis_powder.routines import absorb_corrections, common, instrument_settings
from isis_powder.hrpd_routines import hrpd_advanced_config, hrpd_algs, hrpd_param_mapping
import mantid.simpleapi as mantid
# A bug on the instrument when recording historic NeXus files (< 2015) caused
# corrupted data. Use raw files for now until sufficient time has past and old
# data is unlikely to be reanalysed.
RAW_DATA_EXT = '.raw'
# Constants
PROMPT_PULSE_INTERVAL = 20000.0
PROMPT_PULSE_RIGHT_WIDTH = 140.0
PROMPT_PULSE_LEFT_WIDTH = 30.0
class HRPD(AbstractInst):
def __init__(self, **kwargs):
self._inst_settings = instrument_settings.InstrumentSettings(
param_map=hrpd_param_mapping.attr_mapping, kwargs=kwargs,
adv_conf_dict=hrpd_advanced_config.get_all_adv_variables())
super(HRPD, self).__init__(user_name=self._inst_settings.user_name,
calibration_dir=self._inst_settings.calibration_dir,
output_dir=self._inst_settings.output_dir,
inst_prefix="HRPD")
self._cached_run_details = {}
self._sample_details = None
def focus(self, **kwargs):
self._switch_tof_window_inst_settings(kwargs.get("window"))
self._inst_settings.update_attributes(kwargs=kwargs)
return self._focus(
run_number_string=self._inst_settings.run_number, do_van_normalisation=self._inst_settings.do_van_norm,
do_absorb_corrections=self._inst_settings.do_absorb_corrections)
def create_vanadium(self, **kwargs):
self._switch_tof_window_inst_settings(kwargs.get("window"))
self._inst_settings.update_attributes(kwargs=kwargs)
return self._create_vanadium(run_number_string=self._inst_settings.run_in_range,
do_absorb_corrections=self._inst_settings.do_absorb_corrections)
def set_sample_details(self, **kwargs):
kwarg_name = "sample"
sample_details_obj = common.dictionary_key_helper(
dictionary=kwargs, key=kwarg_name,
exception_msg="The argument containing sample details was not found. Please"
" set the following argument: {}".format(kwarg_name))
self._sample_details = sample_details_obj
def mask_prompt_pulses_if_necessary(self, ws_list):
for ws in ws_list:
self._mask_prompt_pulses(ws)
def should_subtract_empty_inst(self):
return self._inst_settings.subtract_empty_inst
def create_solid_angle_corrections(self, vanadium, run_details):
"""
Creates the solid angle corrections from a vanadium run, only applicable on HRPD otherwise return None
:param vanadium: The vanadium used to create this
:param run_details: the run details of to use
"""
settings = self._inst_settings
if not settings.do_solid_angle:
return
solid_angle = mantid.SolidAngle(InputWorkspace=vanadium)
solid_angle = mantid.Scale(InputWorkspace=solid_angle, Factor=100, Operation='Multiply')
eff = mantid.Divide(LHSWorkspace=vanadium, RHSWorkspace=solid_angle)
eff = mantid.ConvertUnits(InputWorkspace=eff, Target='Wavelength')
integration_range = settings.eff_integration_range
# use full range if no range is supplied
integration_range = integration_range if integration_range is not None else (None, None)
eff = mantid.Integration(InputWorkspace=eff,
RangeLower=integration_range[0],
RangeUpper=integration_range[1])
correction = mantid.Multiply(LHSWorkspace=solid_angle, RHSWorkspace=eff)
correction = mantid.Scale(InputWorkspace=correction, Factor=1e-5,
Operation='Multiply')
name = "sac" + common.generate_splined_name(run_details.run_number, [])
path = run_details.van_paths
mantid.SaveNexus(InputWorkspace=correction, Filename=os.path.join(path, name))
common.remove_intermediate_workspace(eff)
common.remove_intermediate_workspace(correction)
def get_solid_angle_corrections(self, vanadium, run_details):
if not self._inst_settings.do_solid_angle:
return
name = "sac" + common.generate_splined_name(vanadium, [])
path = run_details.van_paths
try:
solid_angle = mantid.Load(Filename=os.path.join(path,name))
return solid_angle
except ValueError:
raise RuntimeError("Could not find " + os.path.join(path, name)+" please run create_vanadium with "
"\"do_solid_angle_corrections=True\"")
def _generate_input_file_name(self, run_number, file_ext=""):
"""
Generates a name which Mantid uses within Load to find the file.
:param run_number: The run number to convert into a valid format for Mantid
:param file_ext: An optional file extension to add to force a particular format
:return: A filename that will allow Mantid to find the correct run for that instrument.
"""
if not file_ext:
file_ext = RAW_DATA_EXT
return self._generate_inst_filename(run_number=run_number, file_ext=file_ext)
def _apply_absorb_corrections(self, run_details, ws_to_correct):
if self._is_vanadium:
return hrpd_algs.calculate_van_absorb_corrections(
ws_to_correct=ws_to_correct, multiple_scattering=self._inst_settings.multiple_scattering)
elif self._sample_details is None:
raise RuntimeError("Absorption corrections cannot be run without sample details."
" Please set sample details using set_sample before running absorption corrections.")
elif self._sample_details.shape_type() == "slab":
return hrpd_algs.calculate_slab_absorb_corrections(ws_to_correct=ws_to_correct,
sample_details_obj=self._sample_details)
else:
return absorb_corrections.run_cylinder_absorb_corrections(
ws_to_correct=ws_to_correct, multiple_scattering=self._inst_settings.multiple_scattering,
sample_details_obj=self._sample_details, is_vanadium=self._is_vanadium)
def _crop_banks_to_user_tof(self, focused_banks):
return common.crop_banks_using_crop_list(focused_banks, self._inst_settings.tof_cropping_values)
def _crop_van_to_expected_tof_range(self, van_ws_to_crop):
return common.crop_in_tof(ws_to_crop=van_ws_to_crop, x_min=self._inst_settings.van_tof_cropping[0],
x_max=self._inst_settings.van_tof_cropping[-1])
def _get_instrument_bin_widths(self):
return self._inst_settings.focused_bin_widths
def _get_run_details(self, run_number_string):
run_number_string_key = self._generate_run_details_fingerprint(run_number_string,
self._inst_settings.file_extension)
if run_number_string_key in self._cached_run_details:
return self._cached_run_details[run_number_string_key]
self._cached_run_details[run_number_string_key] = hrpd_algs.get_run_details(
run_number_string=run_number_string, inst_settings=self._inst_settings, is_vanadium=self._is_vanadium)
return self._cached_run_details[run_number_string_key]
def _mask_prompt_pulses(self, ws):
"""
HRPD has a long flight path from the moderator resulting
in sharp peaks from the proton pulse that maintain their
sharp resolution. Here we mask these pulses out that occur
at 20ms intervals.
:param ws: The workspace containing the pulses. It is
masked in place.
"""
# The number of pulse can vary depending on the data range
# Compute number of pulses that occur at each 20ms interval.
x_data = ws.readX(0)
pulse_min = int(round(x_data[0]) / PROMPT_PULSE_INTERVAL) + 1
pulse_max = int(round(x_data[-1]) / PROMPT_PULSE_INTERVAL) + 1
for i in range(pulse_min, pulse_max):
centre = PROMPT_PULSE_INTERVAL * float(i)
mantid.MaskBins(InputWorkspace=ws, OutputWorkspace=ws,
XMin=centre - PROMPT_PULSE_LEFT_WIDTH,
XMax=centre + PROMPT_PULSE_RIGHT_WIDTH)
def _switch_tof_window_inst_settings(self, tof_window):
self._inst_settings.update_attributes(
advanced_config=hrpd_advanced_config.get_tof_window_dict(tof_window=tof_window))
| gpl-3.0 | -5,944,348,338,016,820,000 | 48.755435 | 116 | 0.64391 | false | 3.744376 | false | false | false |
fengyuanjs/catawampus | dm/storage.py | 6 | 15462 | #!/usr/bin/python
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TR-069 has mandatory attribute names that don't comply with policy
#pylint: disable-msg=C6409
"""Implementation of tr-140 Storage Services objects."""
__author__ = '[email protected] (Denton Gentry)'
import ctypes
import fcntl
import os
import os.path
import re
import subprocess
import tr.core
import tr.tr140_v1_1
import tr.x_catawampus_storage_1_0
BASESTORAGE = tr.x_catawampus_storage_1_0.X_CATAWAMPUS_ORG_Storage_v1_0.StorageService
class MtdEccStats(ctypes.Structure):
"""<mtd/mtd-abi.h> struct mtd_ecc_stats."""
_fields_ = [('corrected', ctypes.c_uint32),
('failed', ctypes.c_uint32),
('badblocks', ctypes.c_uint32),
('bbtblocks', ctypes.c_uint32)]
def _GetMtdStats(mtddev):
"""Return the MtdEccStats for the given mtd device.
Arguments:
mtddev: the string path to the device, ex: '/dev/mtd14'
Raises:
IOError: if the ioctl fails.
Returns:
an MtdEccStats.
"""
ECCGETSTATS = 0x40104d12 # ECCGETSTATS _IOR('M', 18, struct mtd_ecc_stats)
with open(mtddev, 'r') as f:
ecc = MtdEccStats()
if fcntl.ioctl(f, ECCGETSTATS, ctypes.addressof(ecc)) != 0:
raise IOError('ECCGETSTATS failed')
return ecc
# Unit tests can override these
GETMTDSTATS = _GetMtdStats
PROC_FILESYSTEMS = '/proc/filesystems'
PROC_MOUNTS = '/proc/mounts'
SLASHDEV = '/dev/'
SMARTCTL = '/usr/sbin/smartctl'
STATVFS = os.statvfs
SYS_BLOCK = '/sys/block/'
SYS_UBI = '/sys/class/ubi/'
def _FsType(fstype):
supported = {'vfat': 'FAT32', 'ext2': 'ext2', 'ext3': 'ext3',
'ext4': 'ext4', 'msdos': 'FAT32', 'xfs': 'xfs',
'reiserfs': 'REISER'}
if fstype in supported:
return supported[fstype]
else:
return 'X_CATAWAMPUS-ORG_' + fstype
def _IsSillyFilesystem(fstype):
"""Filesystems which are not interesting to export to the ACS."""
SILLY = frozenset(['devtmpfs', 'proc', 'sysfs', 'usbfs', 'devpts',
'rpc_pipefs', 'autofs', 'nfsd', 'binfmt_misc', 'fuseblk'])
return fstype in SILLY
def _GetFieldFromOutput(prefix, output, default=''):
"""Search output for line of the form 'Foo: Bar', return 'Bar'."""
field_re = re.compile(prefix + '\s*(\S+)')
for line in output.splitlines():
result = field_re.search(line)
if result is not None:
return result.group(1).strip()
return default
def _ReadOneLine(filename, default):
"""Read one line from a file. Return default if anything fails."""
try:
f = open(filename, 'r')
return f.readline().strip()
except IOError:
return default
def IntFromFile(filename):
"""Read one line from a file and return an int, or zero if an error occurs."""
try:
buf = _ReadOneLine(filename, '0')
return int(buf)
except ValueError:
return 0
class LogicalVolumeLinux26(BASESTORAGE.LogicalVolume):
"""Implementation of tr-140 StorageService.LogicalVolume for Linux FS."""
def __init__(self, rootpath, fstype):
BASESTORAGE.LogicalVolume.__init__(self)
self.rootpath = rootpath
self.fstype = fstype
self.Unexport('Alias')
self.Unexport('Encrypted')
self.Unexport('ThresholdReached')
self.Unexport('PhysicalReference')
self.FolderList = {}
self.ThresholdLimit = 0
@property
def Name(self):
return self.rootpath
@property
def Status(self):
return 'Online'
@property
def Enable(self):
return True
@property
def FileSystem(self):
return self.fstype
# TODO(dgentry) need @sessioncache decorator
def _GetStatVfs(self):
return STATVFS(self.rootpath)
@property
def Capacity(self):
vfs = self._GetStatVfs()
return int(vfs.f_blocks * vfs.f_bsize / 1024 / 1024)
@property
def ThresholdReached(self):
vfs = self._GetStatVfs()
require = self.ThresholdLimit * 1024 * 1024
avail = vfs.f_bavail * vfs.f_bsize
return True if avail < require else False
@property
def UsedSpace(self):
vfs = self._GetStatVfs()
b_used = vfs.f_blocks - vfs.f_bavail
return int(b_used * vfs.f_bsize / 1024 / 1024)
@property
def X_CATAWAMPUS_ORG_ReadOnly(self):
ST_RDONLY = 0x0001
vfs = self._GetStatVfs()
return True if vfs.f_flag & ST_RDONLY else False
@property
def FolderNumberOfEntries(self):
return len(self.FolderList)
class PhysicalMediumDiskLinux26(BASESTORAGE.PhysicalMedium):
"""tr-140 PhysicalMedium implementation for non-removable disks."""
CONNECTION_TYPES = frozenset(
['USB 1.1', 'USB 2.0', 'IEEE1394', 'IEEE1394b', 'IDE', 'EIDE',
'ATA/33', 'ATA/66', 'ATA/100', 'ATA/133', 'SATA/150', 'SATA/300',
'SCSI-1', 'Fast SCSI', 'Fast-Wide SCSI', 'Ultra SCSI', 'Ultra Wide SCSI',
'Ultra2 SCSI', 'Ultra2 Wide SCSI', 'Ultra3 SCSI', 'Ultra-320 SCSI',
'Ultra-640 SCSI', 'SSA', 'SSA-40', 'Fibre Channel'])
def __init__(self, dev, conn_type=None):
BASESTORAGE.PhysicalMedium.__init__(self)
self.dev = dev
self.name = dev
self.Unexport('Alias')
# TODO(dgentry) read SMART attribute for PowerOnHours
self.Unexport('Uptime')
# TODO(dgentry) What does 'Standby' or 'Offline' mean?
self.Unexport('Status')
if conn_type is None:
# transport is really, really hard to infer programatically.
# If platform code doesn't provide it, don't try to guess.
self.Unexport('ConnectionType')
else:
# Provide a hint to the platform code: use a valid enumerated string,
# or define a vendor extension. Don't just make something up.
assert conn_type[0:1] == 'X_' or conn_type in self.CONNECTION_TYPES
self.conn_type = conn_type
# TODO(dgentry) need @sessioncache decorator
def _GetSmartctlOutput(self):
"""Return smartctl info and health output."""
dev = SLASHDEV + self.dev
smart = subprocess.Popen([SMARTCTL, '--info', '--health', dev],
stdout=subprocess.PIPE)
out, _ = smart.communicate(None)
return out
def GetName(self):
return self.name
def SetName(self, value):
self.name = value
Name = property(GetName, SetName, None, 'PhysicalMedium.Name')
@property
def Vendor(self):
filename = SYS_BLOCK + '/' + self.dev + '/device/vendor'
vendor = _ReadOneLine(filename=filename, default='')
# /sys/block/?da/device/vendor is often 'ATA'. Not useful.
return '' if vendor == 'ATA' else vendor
@property
def Model(self):
filename = SYS_BLOCK + '/' + self.dev + '/device/model'
return _ReadOneLine(filename=filename, default='')
@property
def SerialNumber(self):
return _GetFieldFromOutput(prefix='Serial Number:',
output=self._GetSmartctlOutput(),
default='')
@property
def FirmwareVersion(self):
return _GetFieldFromOutput(prefix='Firmware Version:',
output=self._GetSmartctlOutput(),
default='')
@property
def ConnectionType(self):
return self.conn_type
@property
def Removable(self):
return False
@property
def Capacity(self):
"""Return capacity in Megabytes."""
filename = SYS_BLOCK + '/' + self.dev + '/size'
size = _ReadOneLine(filename=filename, default='0')
try:
# TODO(dgentry) Do 4k sector drives populate size in 512 byte blocks?
return int(size) * 512 / 1048576
except ValueError:
return 0
@property
def SMARTCapable(self):
capable = _GetFieldFromOutput(prefix='SMART support is: Enab',
output=self._GetSmartctlOutput(),
default=None)
return True if capable else False
@property
def Health(self):
health = _GetFieldFromOutput(
prefix='SMART overall-health self-assessment test result:',
output=self._GetSmartctlOutput(),
default='')
if health == 'PASSED':
return 'OK'
elif health.find('FAIL') >= 0:
return 'Failing'
else:
return 'Error'
@property
def HotSwappable(self):
filename = SYS_BLOCK + '/' + self.dev + '/removable'
removable = _ReadOneLine(filename=filename, default='0').strip()
return False if removable == '0' else True
class FlashSubVolUbiLinux26(BASESTORAGE.X_CATAWAMPUS_ORG_FlashMedia.SubVolume):
"""Catawampus Storage Flash SubVolume implementation for UBI volumes."""
def __init__(self, ubivol):
BASESTORAGE.X_CATAWAMPUS_ORG_FlashMedia.SubVolume.__init__(self)
self.ubivol = ubivol
@property
def DataMBytes(self):
bytesiz = IntFromFile(os.path.join(SYS_UBI, self.ubivol, 'data_bytes'))
return int(bytesiz / 1024 / 1024)
@property
def Name(self):
return _ReadOneLine(os.path.join(SYS_UBI, self.ubivol, 'name'), self.ubivol)
@property
def Status(self):
corr = IntFromFile(os.path.join(SYS_UBI, self.ubivol, 'corrupted'))
return 'OK' if corr == 0 else 'Corrupted'
class FlashMediumUbiLinux26(BASESTORAGE.X_CATAWAMPUS_ORG_FlashMedia):
"""Catawampus Storage FlashMedium implementation for UBI volumes."""
def __init__(self, ubiname):
BASESTORAGE.X_CATAWAMPUS_ORG_FlashMedia.__init__(self)
self.ubiname = ubiname
self.SubVolumeList = {}
num = 0
for i in range(128):
subvolname = ubiname + '_' + str(i)
try:
if os.stat(os.path.join(SYS_UBI, self.ubiname, subvolname)):
self.SubVolumeList[str(num)] = FlashSubVolUbiLinux26(subvolname)
num += 1
except OSError:
pass
@property
def BadEraseBlocks(self):
return IntFromFile(os.path.join(SYS_UBI, self.ubiname, 'bad_peb_count'))
@property
def CorrectedErrors(self):
mtdnum = IntFromFile(os.path.join(SYS_UBI, self.ubiname, 'mtd_num'))
ecc = GETMTDSTATS(os.path.join(SLASHDEV, 'mtd' + str(mtdnum)))
return ecc.corrected
@property
def EraseBlockSize(self):
return IntFromFile(os.path.join(SYS_UBI, self.ubiname, 'eraseblock_size'))
@property
def IOSize(self):
return IntFromFile(os.path.join(SYS_UBI, self.ubiname, 'min_io_size'))
@property
def MaxEraseCount(self):
return IntFromFile(os.path.join(SYS_UBI, self.ubiname, 'max_ec'))
@property
def SubVolumeNumberOfEntries(self):
return len(self.SubVolumeList)
@property
def Name(self):
return self.ubiname
@property
def ReservedEraseBlocks(self):
return IntFromFile(os.path.join(SYS_UBI, self.ubiname, 'reserved_for_bad'))
@property
def TotalEraseBlocks(self):
return IntFromFile(os.path.join(SYS_UBI, self.ubiname, 'total_eraseblocks'))
@property
def UncorrectedErrors(self):
mtdnum = IntFromFile(os.path.join(SYS_UBI, self.ubiname, 'mtd_num'))
ecc = GETMTDSTATS(os.path.join(SLASHDEV, 'mtd' + str(mtdnum)))
return ecc.failed
class CapabilitiesNoneLinux26(BASESTORAGE.Capabilities):
"""Trivial tr-140 StorageService.Capabilities, all False."""
def __init__(self):
BASESTORAGE.Capabilities.__init__(self)
@property
def FTPCapable(self):
return False
@property
def HTTPCapable(self):
return False
@property
def HTTPSCapable(self):
return False
@property
def HTTPWritable(self):
return False
@property
def SFTPCapable(self):
return False
@property
def SupportedFileSystemTypes(self):
"""Returns possible filesystems.
Parses /proc/filesystems, omit any defined as uninteresting in
_IsSillyFileSystem(), and return the rest.
Returns:
a string of comma-separated filesystem types.
"""
fslist = set()
f = open(PROC_FILESYSTEMS)
for line in f:
if line.find('nodev') >= 0:
# rule of thumb to skip internal, non-interesting filesystems
continue
fstype = line.strip()
if _IsSillyFilesystem(fstype):
continue
fslist.add(_FsType(fstype))
return ','.join(sorted(fslist, key=str.lower))
@property
def SupportedNetworkProtocols(self):
return ''
@property
def SupportedRaidTypes(self):
return ''
@property
def VolumeEncryptionCapable(self):
return False
class StorageServiceLinux26(BASESTORAGE):
"""Implements a basic tr-140 for Linux 2.6-ish systems.
This class implements no network file services, it only exports
the LogicalVolume information.
"""
def __init__(self):
BASESTORAGE.__init__(self)
self.Capabilities = CapabilitiesNoneLinux26()
self.Unexport('Alias')
self.Unexport(objects='NetInfo')
self.Unexport(objects='NetworkServer')
self.Unexport(objects='FTPServer')
self.Unexport(objects='SFTPServer')
self.Unexport(objects='HTTPServer')
self.Unexport(objects='HTTPSServer')
self.PhysicalMediumList = {}
self.StorageArrayList = {}
self.LogicalVolumeList = tr.core.AutoDict(
'LogicalVolumeList', iteritems=self.IterLogicalVolumes,
getitem=self.GetLogicalVolumeByIndex)
self.UserAccountList = {}
self.UserGroupList = {}
self.X_CATAWAMPUS_ORG_FlashMediaList = {}
@property
def Enable(self):
# TODO(dgentry): tr-140 says this is supposed to be writable
return True
@property
def PhysicalMediumNumberOfEntries(self):
return len(self.PhysicalMediumList)
@property
def StorageArrayNumberOfEntries(self):
return len(self.StorageArrayList)
@property
def LogicalVolumeNumberOfEntries(self):
return len(self.LogicalVolumeList)
@property
def UserAccountNumberOfEntries(self):
return len(self.UserAccountList)
@property
def UserGroupNumberOfEntries(self):
return len(self.UserGroupList)
@property
def X_CATAWAMPUS_ORG_FlashMediaNumberOfEntries(self):
return len(self.X_CATAWAMPUS_ORG_FlashMediaList)
def _ParseProcMounts(self):
"""Return list of (mount point, filesystem type) tuples."""
mounts = dict()
try:
f = open(PROC_MOUNTS)
except IOError:
return []
for line in f:
fields = line.split()
# ex: /dev/mtdblock9 / squashfs ro,relatime 0 0
if len(fields) < 6:
continue
fsname = fields[0]
mountpoint = fields[1]
fstype = fields[2]
if fsname == 'none' or _IsSillyFilesystem(fstype):
continue
mounts[mountpoint] = _FsType(fstype)
return sorted(mounts.items())
def GetLogicalVolume(self, fstuple):
"""Get an LogicalVolume object for a mounted filesystem."""
(mountpoint, fstype) = fstuple
return LogicalVolumeLinux26(mountpoint, fstype)
def IterLogicalVolumes(self):
"""Retrieves a list of all mounted filesystems."""
fstuples = self._ParseProcMounts()
for idx, fstuple in enumerate(fstuples):
yield idx, self.GetLogicalVolume(fstuple)
def GetLogicalVolumeByIndex(self, index):
fstuples = self._ParseProcMounts()
if index >= len(fstuples):
raise IndexError('No such object LogicalVolume.{0}'.format(index))
return self.GetLogicalVolume(fstuples[index])
def main():
pass
if __name__ == '__main__':
main()
| apache-2.0 | -1,964,247,422,968,195,000 | 27.475138 | 86 | 0.667895 | false | 3.429142 | false | false | false |
polyaxon/polyaxon | core/polyaxon/polytune/matrix/utils.py | 1 | 7553 | #!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import numpy as np
from marshmallow import ValidationError
from polyaxon.polyflow import (
V1HpChoice,
V1HpGeomSpace,
V1HpLinSpace,
V1HpLogNormal,
V1HpLogSpace,
V1HpLogUniform,
V1HpNormal,
V1HpPChoice,
V1HpQLogNormal,
V1HpQLogUniform,
V1HpQNormal,
V1HpQUniform,
V1HpRange,
V1HpUniform,
)
from polyaxon.polyflow.matrix.params import pchoice
from polyaxon.polytune.matrix import dist
def space_sample(value, size, rand_generator):
size = None if size == 1 else size
rand_generator = rand_generator or np.random
try:
return rand_generator.choice(value, size=size)
except ValueError:
idx = rand_generator.randint(0, len(value))
return value[idx]
def dist_sample(fct, value, size, rand_generator):
size = None if size == 1 else size
rand_generator = rand_generator or np.random
value = copy.deepcopy(value)
value["size"] = size
value["rand_generator"] = rand_generator
return fct(**value)
def get_length(matrix):
if matrix.IDENTIFIER == V1HpChoice.IDENTIFIER:
return len(matrix.value)
if matrix.IDENTIFIER == V1HpPChoice.IDENTIFIER:
return len(matrix.value)
if matrix.IDENTIFIER == V1HpRange.IDENTIFIER:
return len(np.arange(**matrix.value))
if matrix.IDENTIFIER == V1HpLinSpace.IDENTIFIER:
return len(np.linspace(**matrix.value))
if matrix.IDENTIFIER == V1HpLogSpace.IDENTIFIER:
return len(np.logspace(**matrix.value))
if matrix.IDENTIFIER == V1HpGeomSpace.IDENTIFIER:
return len(np.geomspace(**matrix.value))
if matrix.IDENTIFIER in {
V1HpUniform.IDENTIFIER,
V1HpQUniform.IDENTIFIER,
V1HpLogUniform.IDENTIFIER,
V1HpQLogUniform.IDENTIFIER,
V1HpNormal.IDENTIFIER,
V1HpQNormal.IDENTIFIER,
V1HpLogNormal.IDENTIFIER,
V1HpQLogNormal.IDENTIFIER,
}:
raise ValidationError("Distribution should not call `length`")
def get_min(matrix):
if matrix.IDENTIFIER == V1HpChoice.IDENTIFIER:
if matrix.is_categorical:
return None
return min(to_numpy(matrix))
if matrix.IDENTIFIER == V1HpPChoice.IDENTIFIER:
return None
if matrix.IDENTIFIER in {
V1HpRange.IDENTIFIER,
V1HpLinSpace.IDENTIFIER,
V1HpLogSpace.IDENTIFIER,
V1HpGeomSpace.IDENTIFIER,
}:
return matrix.value.get("start")
if matrix.IDENTIFIER == V1HpUniform.IDENTIFIER:
return matrix.value.get("low")
if matrix.IDENTIFIER in {
V1HpQUniform.IDENTIFIER,
V1HpLogUniform.IDENTIFIER,
V1HpQLogUniform.IDENTIFIER,
V1HpNormal.IDENTIFIER,
V1HpQNormal.IDENTIFIER,
V1HpLogNormal.IDENTIFIER,
V1HpQLogNormal.IDENTIFIER,
}:
return None
def get_max(matrix):
if matrix.IDENTIFIER == V1HpChoice.IDENTIFIER:
if matrix.is_categorical:
return None
return max(to_numpy(matrix))
if matrix.IDENTIFIER == V1HpPChoice.IDENTIFIER:
return None
if matrix.IDENTIFIER in {
V1HpRange.IDENTIFIER,
V1HpLinSpace.IDENTIFIER,
V1HpLogSpace.IDENTIFIER,
V1HpGeomSpace.IDENTIFIER,
}:
return matrix.value.get("stop")
if matrix.IDENTIFIER == V1HpUniform.IDENTIFIER:
return matrix.value.get("high")
if matrix.IDENTIFIER in {
V1HpQUniform.IDENTIFIER,
V1HpLogUniform.IDENTIFIER,
V1HpQLogUniform.IDENTIFIER,
V1HpNormal.IDENTIFIER,
V1HpQNormal.IDENTIFIER,
V1HpLogNormal.IDENTIFIER,
V1HpQLogNormal.IDENTIFIER,
}:
return None
def to_numpy(matrix):
if matrix.IDENTIFIER == V1HpChoice.IDENTIFIER:
return matrix.value
if matrix.IDENTIFIER == V1HpPChoice.IDENTIFIER:
raise ValidationError(
"Distribution should not call `to_numpy`, "
"instead it should call `sample`."
)
if matrix.IDENTIFIER == V1HpRange.IDENTIFIER:
return np.arange(**matrix.value)
if matrix.IDENTIFIER == V1HpLinSpace.IDENTIFIER:
return np.linspace(**matrix.value)
if matrix.IDENTIFIER == V1HpLogSpace.IDENTIFIER:
return np.logspace(**matrix.value)
if matrix.IDENTIFIER == V1HpGeomSpace.IDENTIFIER:
return np.geomspace(**matrix.value)
if matrix.IDENTIFIER in {
V1HpUniform.IDENTIFIER,
V1HpQUniform.IDENTIFIER,
V1HpLogUniform.IDENTIFIER,
V1HpQLogUniform.IDENTIFIER,
V1HpNormal.IDENTIFIER,
V1HpQNormal.IDENTIFIER,
V1HpLogNormal.IDENTIFIER,
V1HpQLogNormal.IDENTIFIER,
}:
raise ValidationError(
"Distribution should not call `to_numpy`, "
"instead it should call `sample`."
)
def sample(matrix, size=None, rand_generator=None):
size = None if size == 1 else size
if matrix.IDENTIFIER == V1HpChoice.IDENTIFIER:
return space_sample(
value=to_numpy(matrix), size=size, rand_generator=rand_generator
)
if matrix.IDENTIFIER == V1HpPChoice.IDENTIFIER:
return pchoice(values=matrix.value, size=size, rand_generator=rand_generator)
if matrix.IDENTIFIER == V1HpRange.IDENTIFIER:
return space_sample(
value=to_numpy(matrix), size=size, rand_generator=rand_generator
)
if matrix.IDENTIFIER == V1HpLinSpace.IDENTIFIER:
return space_sample(
value=to_numpy(matrix), size=size, rand_generator=rand_generator
)
if matrix.IDENTIFIER == V1HpLogSpace.IDENTIFIER:
return space_sample(
value=to_numpy(matrix), size=size, rand_generator=rand_generator
)
if matrix.IDENTIFIER == V1HpGeomSpace.IDENTIFIER:
return space_sample(
value=to_numpy(matrix), size=size, rand_generator=rand_generator
)
if matrix.IDENTIFIER == V1HpUniform.IDENTIFIER:
return dist_sample(dist.uniform, matrix.value, size, rand_generator)
if matrix.IDENTIFIER == V1HpQUniform.IDENTIFIER:
return dist_sample(dist.quniform, matrix.value, size, rand_generator)
if matrix.IDENTIFIER == V1HpLogUniform.IDENTIFIER:
return dist_sample(dist.loguniform, matrix.value, size, rand_generator)
if matrix.IDENTIFIER == V1HpQLogUniform.IDENTIFIER:
return dist_sample(dist.qloguniform, matrix.value, size, rand_generator)
if matrix.IDENTIFIER == V1HpNormal.IDENTIFIER:
return dist_sample(dist.normal, matrix.value, size, rand_generator)
if matrix.IDENTIFIER == V1HpQNormal.IDENTIFIER:
return dist_sample(dist.qnormal, matrix.value, size, rand_generator)
if matrix.IDENTIFIER == V1HpLogNormal.IDENTIFIER:
return dist_sample(dist.lognormal, matrix.value, size, rand_generator)
if matrix.IDENTIFIER == V1HpQLogNormal.IDENTIFIER:
return dist_sample(dist.qlognormal, matrix.value, size, rand_generator)
| apache-2.0 | -4,968,710,938,983,054,000 | 29.828571 | 85 | 0.677214 | false | 3.668286 | false | false | false |
cocreature/pytracer | color.py | 1 | 2055 | def dot(a, b):
''' Matrixmultiplication
:param RaytracerColor a: color triple
:param RaytracerColor b: color triple
:returns matrixmultiplicition of a and b
'''
return RaytracerColor(a.r * b.r, a.g * b.g, a.b * b.b)
class RaytracerColor:
''' represents a color
'''
def __init__(self, r=0.0, g=0.0, b=0.0):
''' :param float r: red color component between 0 and 1
:param float g: green color component between 0 and 1
:param float b: blue color component between 0 and 1
:var float r: red color component between 0 and 1
:var float g: green color component between 0 and 1
:var float b: blue color component between 0 and 1
'''
self._r = r
self._g = g
self._b = b
@property
def r(self):
return self._r
@property
def g(self):
return self._g
@property
def b(self):
return self._b
def __add__(self, a):
''' Matrixaddition
:param RaytracerColor a: color triple
:param RaytracerColor b: color triple
:returns matrxaddition of a and b
'''
return RaytracerColor(self.r + a.r, self.g + a.g, self.b + a.b)
def __mul__(self, a):
''' Scalarmultiplication
:param float a: scalar
:param RaytracerColor b: color triple
:returns scalarmultiplicition of a and b
'''
return RaytracerColor(self.r * a, self.g * a, self.b * a)
def __imul__(self, a):
self._r *= a
self._g *= a
self._b *= a
return self
def __ne__(self, a):
return not (self._r == a.r and self._g == a.g and self._b == a.b)
def get_color(self):
return (int(self._r*255), int(self._g*255), int(self._b*255))
@property
def sanitized(self):
return RaytracerColor(min(max(0.0, self._r), 1),
min(max(0.0, self._g), 1),
min(max(0.0, self._b), 1))
| mit | -8,807,139,461,773,170,000 | 28.782609 | 73 | 0.526521 | false | 3.637168 | false | false | false |
boxcontrol/lowendspirit | lowendspirit/solusvmAPI.py | 1 | 2282 | import requests
class Solus_Enduser_API:
def __init__(self, url, api_hash, api_key):
self.url = url
self.api_hash = api_hash
self.api_key = api_key
self.values = ({'rdtype': 'json', 'hash': self.api_hash, 'key': self.api_key})
def to_json(self, data):
data=data.replace('><', '>...<')
data = data.split('>')
result = []
for i in data:
i = i.replace('<', '')
i = i.replace('...', '')
i = i.split('/')[0]
result.append(i)
if len(result) % 2 == 0:
result.pop()
result = {result[i]: result[i+1] for i in range(0, len(result) - 1, 2)}
return result
def sQuery(self, url, api_hash, api_key, values, action, extra=''):
if not extra:
values.update({'rdtype': 'json', 'hash': api_hash, 'key': api_key, 'action': action})
response = requests.get('https://'+url+'/api/client/command.php', params=values, timeout=50)
else:
response = requests.get('https://'+url+'/api/client/command.php?key=' +
api_key + "&hash=" + api_hash +
"&action=info&" + extra, timeout=50)
return response.text
def get_status(self):
data = self.sQuery(self.url, self.api_hash, self.api_key, self.values, action='status')
return self.to_json(data)
def get_info(self):
data = self.sQuery(self.url, self.api_hash, self.api_key, self.values, action='info')
return self.to_json(data)
def get_full_info(self):
extra = 'ipaddr=true&hdd=true&mem=true&bw=true'
data = self.sQuery(self.url, self.api_hash, self.api_key, self.values, action='info', extra=extra)
return self.to_json(data)
def server_reboot(self):
data = self.sQuery(self.url, self.api_hash, self.api_key, self.values, action='reboot')
return self.to_json(data)
def server_shutdown(self):
data = self.sQuery(self.url, self.api_hash, self.api_key, self.values, action='shutdown')
return self.to_json(data)
def server_boot(self):
data = self.sQuery(self.url, self.api_hash, self.api_key, self.values, action='boot')
return self.to_json(data)
| mit | -4,944,491,562,993,256,000 | 36.409836 | 106 | 0.556529 | false | 3.346041 | false | false | false |
endlessm/chromium-browser | third_party/grpc/src/test/cpp/naming/utils/run_dns_server_for_lb_interop_tests.py | 5 | 4060 | #!/usr/bin/env python2.7
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import subprocess
import os
import tempfile
import sys
import time
import signal
import yaml
argp = argparse.ArgumentParser(description='Runs a DNS server for LB interop tests')
argp.add_argument('-l', '--grpclb_ips', default=None, type=str,
help='Comma-separated list of IP addresses of balancers')
argp.add_argument('-f', '--fallback_ips', default=None, type=str,
help='Comma-separated list of IP addresses of fallback servers')
argp.add_argument('-c', '--cause_no_error_no_data_for_balancer_a_record',
default=False, action='store_const', const=True,
help=('Used for testing the case in which the grpclb '
'balancer A record lookup results in a DNS NOERROR response '
'but with no ANSWER section i.e. no addresses'))
args = argp.parse_args()
balancer_records = []
grpclb_ips = args.grpclb_ips.split(',')
if grpclb_ips[0]:
for ip in grpclb_ips:
balancer_records.append({
'TTL': '2100',
'data': ip,
'type': 'A',
})
fallback_records = []
fallback_ips = args.fallback_ips.split(',')
if fallback_ips[0]:
for ip in fallback_ips:
fallback_records.append({
'TTL': '2100',
'data': ip,
'type': 'A',
})
records_config_yaml = {
'resolver_tests_common_zone_name':
'test.google.fr.',
'resolver_component_tests': [{
'records': {
'_grpclb._tcp.server': [
{
'TTL': '2100',
'data': '0 0 12000 balancer',
'type': 'SRV'
},
],
'balancer':
balancer_records,
'server':
fallback_records,
}
}]
}
if args.cause_no_error_no_data_for_balancer_a_record:
balancer_records = records_config_yaml[
'resolver_component_tests'][0]['records']['balancer']
assert not balancer_records
# Insert a TXT record at the balancer.test.google.fr. domain.
# This TXT record won't actually be resolved or used by gRPC clients;
# inserting this record is just a way get the balancer.test.google.fr.
# A record queries to return NOERROR DNS responses that also have no
# ANSWER section, in order to simulate this failure case.
balancer_records.append({
'TTL': '2100',
'data': 'arbitrary string that wont actually be resolved',
'type': 'TXT',
})
# Generate the actual DNS server records config file
records_config_path = tempfile.mktemp()
with open(records_config_path, 'w') as records_config_generated:
records_config_generated.write(yaml.dump(records_config_yaml))
with open(records_config_path, 'r') as records_config_generated:
sys.stderr.write('===== DNS server records config: =====\n')
sys.stderr.write(records_config_generated.read())
sys.stderr.write('======================================\n')
# Run the DNS server
# Note that we need to add the extra
# A record for metadata.google.internal in order for compute engine
# OAuth creds and ALTS creds to work.
# TODO(apolcyn): should metadata.google.internal always resolve
# to 169.254.169.254?
subprocess.check_output([
'/var/local/git/grpc/test/cpp/naming/utils/dns_server.py', '--port=53',
'--records_config_path', records_config_path,
'--add_a_record=metadata.google.internal:169.254.169.254',
])
| bsd-3-clause | -3,485,082,946,779,672,600 | 36.247706 | 85 | 0.632512 | false | 3.80863 | true | false | false |
jackzhao-mj/ok-client | client/utils/timer.py | 1 | 1403 | """Timeout mechanism."""
from client import exceptions
import threading
import traceback
def timed(timeout, fn, args=(), kargs={}):
"""Evaluates expr in the given frame.
PARAMETERS:
fn -- function; Python function to be evaluated
args -- tuple; positional arguments for fn
kargs -- dict; keyword arguments for fn
timeout -- int; number of seconds before timer interrupt
RETURN:
Result of calling fn(*args, **kargs).
RAISES:
Timeout -- if thread takes longer than timeout to execute
Error -- if calling fn raises an error, raise it
"""
submission = __ReturningThread(fn, args, kargs)
submission.start()
submission.join(timeout)
if submission.is_alive():
raise exceptions.Timeout(timeout)
if submission.error is not None:
raise submission.error
return submission.result
class __ReturningThread(threading.Thread):
"""Creates a daemon Thread with a result variable."""
def __init__(self, fn, args, kargs):
super().__init__()
self.daemon = True
self.result = None
self.error = None
self.fn = fn
self.args = args
self.kargs = kargs
def run(self):
try:
self.result = self.fn(*self.args, **self.kargs)
except Exception as e:
e._message = traceback.format_exc(limit=2)
self.error = e
| apache-2.0 | -3,628,445,723,920,096,000 | 27.632653 | 61 | 0.623664 | false | 4.277439 | false | false | false |
ekholabs/ekholabs-es | service/ElasticsearchConnection.py | 1 | 1129 | import pkg_resources
from requests_aws4auth import AWS4Auth
from elasticsearch import Elasticsearch, RequestsHttpConnection
import json
class Resource:
__instance = None
__elasticsearch = None
def __new__(cls, val):
if Resource.__instance is None:
Resource.__instance = object.__new__(cls)
Resource.__instance.val = val
return Resource.__instance
def connect(self):
if self.__elasticsearch is not None:
return self.__elasticsearch
config_package = 'config'
file_path = 'properties.json'
properties = pkg_resources.resource_string(config_package, file_path)
configuration = json.loads(properties)
awsauth = AWS4Auth(configuration['access_key'], configuration['secret_key'], configuration['region'], 'es')
self.__elasticsearch = Elasticsearch(
hosts=[{'host': configuration['host'], 'port': 443}],
http_auth=awsauth,
use_ssl=True,
verify_certs=True,
connection_class=RequestsHttpConnection
)
return self.__elasticsearch
| mit | -8,098,267,506,412,627,000 | 27.948718 | 115 | 0.627104 | false | 4.704167 | true | false | false |
gitcoinco/web | app/grants/sync/zil.py | 1 | 2614 |
import time
from django.conf import settings
import requests
from grants.sync.helpers import is_txn_done_recently, record_contribution_activity, txn_already_used
headers = {
"X-APIKEY" : settings.VIEW_BLOCK_API_KEY
}
DECIMALS = 12
def find_txn_on_zil_explorer(contribution):
subscription = contribution.subscription
grant = subscription.grant
token_symbol = subscription.token_symbol
if subscription.tenant != 'ZIL':
return None
if token_symbol != 'ZIL':
return None
to_address = grant.zil_payout_address
from_address = subscription.contributor_address
amount = subscription.amount_per_period
url = f'https://api.viewblock.io/v1/zilliqa/addresses/{to_address}/txs?network=mainnet'
response = requests.get(url, headers=headers).json()
if len(response):
for txn in response:
if (
txn['from'] == from_address.lower() and
txn['to'] == to_address.lower() and
txn['direction'] == 'in' and
float(txn['value']) / 10 ** DECIMALS == float(amount) and
is_txn_done_recently(txn['timestamp']/1000) and
not txn_already_used(txn['hash'], token_symbol)
):
return txn['hash']
return None
def get_zil_txn_status(txnid, network='mainnet'):
if not txnid or txnid == "0x0":
return None
url = f'https://api.viewblock.io/v1/zilliqa/txs/{txnid}?network={network}'
view_block_response = requests.get(url, headers=headers).json()
if view_block_response:
response = {
'blockHeight': int(view_block_response['blockHeight']),
'receiptSuccess': view_block_response['receiptSuccess']
}
if response['receiptSuccess']:
response['has_mined'] = True
else:
response['has_mined'] = False
return response
return None
def sync_zil_payout(contribution):
time.sleep(0.5) # to avoid rate limit
if not contribution.tx_id or contribution.tx_id == '0x0':
txn = find_txn_on_zil_explorer(contribution)
if txn:
contribution.tx_id = txn
contribution.save()
if contribution.tx_id and contribution.tx_id != '0x0':
txn_status = get_zil_txn_status(contribution.tx_id)
if txn_status and txn_status.get('has_mined'):
contribution.success = True
contribution.tx_cleared = True
contribution.checkout_type = 'zil_std'
record_contribution_activity(contribution)
contribution.save()
| agpl-3.0 | 526,148,667,346,131,840 | 29.045977 | 100 | 0.612854 | false | 3.728959 | false | false | false |
ikben/troposphere | tests/test_logs.py | 2 | 1275 | import unittest
from troposphere import Retain
from troposphere.logs import LogGroup, Destination
class TestLogs(unittest.TestCase):
def test_loggroup_deletionpolicy_is_preserved(self):
log_group = LogGroup(
"LogGroupWithDeletionPolicy",
DeletionPolicy=Retain
)
self.assertIn('DeletionPolicy', log_group.to_dict())
def test_loggroup_retention(self):
for days in [7, "7"]:
LogGroup(
"LogGroupWithDeletionPolicy",
RetentionInDays=days,
)
for days in [6, "6"]:
with self.assertRaises(ValueError):
LogGroup(
"LogGroupWithDeletionPolicy",
RetentionInDays=days,
)
def test_log_destination(self):
log_destination = Destination(
'MyLogDestination',
DestinationName='destination-name',
RoleArn='role-arn',
TargetArn='target-arn',
DestinationPolicy='destination-policy'
)
log_destination_json = log_destination.to_dict()
self.assertIn('Type', log_destination_json)
self.assertIn('Properties', log_destination_json)
if __name__ == '__main__':
unittest.main()
| bsd-2-clause | -4,810,851,735,974,841,000 | 29.357143 | 60 | 0.581961 | false | 4.396552 | true | false | false |
NuSTAR/nustar_lunar_pointing | nustar_lunar_pointing/filter.py | 1 | 3932 | import numpy as np
def bad_pix(evtdata, fpm='A'):
"""Do some basic filtering on known bad pixels.
Parameters
----------
evtdata: FITS data class
This should be an hdu.data structure from a NuSTAR FITS file.
fpm: {"FPMA" | "FPMB"}
Which FPM you're filtering on. Assumes A if not set.
Returns
-------
goodinds: iterable
Index of evtdata that passes the filtering.
"""
# Hot pixel filters
# FPMA or FPMB
if fpm.find('B') == -1 :
pix_filter = np.invert( ( (evtdata['DET_ID'] == 2) & (evtdata['RAWX'] == 16) & (evtdata['RAWY'] == 5) |
(evtdata['DET_ID'] == 2) & (evtdata['RAWX'] == 24) & (evtdata['RAWY'] == 22) |
(evtdata['DET_ID'] == 2) & (evtdata['RAWX'] == 27) & (evtdata['RAWY'] == 6) |
(evtdata['DET_ID'] == 2) & (evtdata['RAWX'] == 27) & (evtdata['RAWY'] == 21) |
(evtdata['DET_ID'] == 3) & (evtdata['RAWX'] == 22) & (evtdata['RAWY'] == 1) |
(evtdata['DET_ID'] == 3) & (evtdata['RAWX'] == 15) & (evtdata['RAWY'] == 3) |
(evtdata['DET_ID'] == 3) & (evtdata['RAWX'] == 5) & (evtdata['RAWY'] == 5) |
(evtdata['DET_ID'] == 3) & (evtdata['RAWX'] == 22) & (evtdata['RAWY'] == 7) |
(evtdata['DET_ID'] == 3) & (evtdata['RAWX'] == 16) & (evtdata['RAWY'] == 11) |
(evtdata['DET_ID'] == 3) & (evtdata['RAWX'] == 18) & (evtdata['RAWY'] == 3) |
(evtdata['DET_ID'] == 3) & (evtdata['RAWX'] == 24) & (evtdata['RAWY'] == 4) |
(evtdata['DET_ID'] == 3) & (evtdata['RAWX'] == 25) & (evtdata['RAWY'] == 5) ) )
else:
pix_filter = np.invert( ( (evtdata['DET_ID'] == 0) & (evtdata['RAWX'] == 24) & (evtdata['RAWY'] == 24)) )
inds = (pix_filter).nonzero()
goodinds=inds[0]
return goodinds
def by_energy(evtdata, energy_low=2.5, energy_high=10.):
""" Apply energy filtering to the data.
Parameters
----------
evtdata: FITS data class
This should be an hdu.data structure from a NuSTAR FITS file.
energy_low: float
Low-side energy bound for the map you want to produce (in keV).
Defaults to 2.5 keV.
energy_high: float
High-side energy bound for the map you want to produce (in keV).
Defaults to 10 keV.
"""
pilow = (energy_low - 1.6) / 0.04
pihigh = (energy_high - 1.6) / 0.04
pi_filter = ( ( evtdata['PI']>pilow ) & ( evtdata['PI']<pihigh))
inds = (pi_filter).nonzero()
goodinds=inds[0]
return goodinds
def gradezero(evtdata):
""" Only accept counts with GRADE==0.
Parameters
----------
evtdata: FITS data class
This should be an hdu.data structure from a NuSTAR FITS file.
Returns
-------
goodinds: iterable
Index of evtdata that passes the filtering.
"""
# Grade filter
grade_filter = ( evtdata['GRADE'] == 0)
inds = (grade_filter).nonzero()
goodinds = inds[0]
return goodinds
def event_filter(evtdata, fpm='FPMA',
energy_low=2.5, energy_high=10):
""" All in one filter module. By default applies an energy cut,
selects only events with grade == 0, and removes known hot pixel.
Note that this module returns a cleaned eventlist rather than
the indices to the cleaned events.
Parameters
----------
evtdata: FITS data structure
This should be an hdu.data structure from a NuSTAR FITS file.
fpm: {"FPMA" | "FPMB"}
Which FPM you're filtering on. Defaults to FPMA.
energy_low: float
Low-side energy bound for the map you want to produce (in keV).
Defaults to 2.5 keV.
energy_high: float
High-side energy bound for the map you want to produce (in keV).
Defaults to 10 keV.
Returns
-------
cleanevt: FITS data class.
This is the subset of evtdata that pass the data selection cuts.
"""
goodinds = bad_pix(evtdata, fpm=fpm)
evt_badfilter = evtdata[goodinds]
goodinds = by_energy(evt_badfilter,
energy_low=energy_low, energy_high = energy_high)
evt_energy = evt_badfilter[goodinds]
goodinds = gradezero(evt_energy)
cleanevt = evt_energy[goodinds]
return cleanevt
| mit | 262,317,687,391,917,280 | 27.492754 | 107 | 0.606053 | false | 2.814603 | false | false | false |
gorakhargosh/pyoauth | pyoauth/oauth1/client/twitter.py | 1 | 1860 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <[email protected]>
# Copyright 2012 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from mom.builtins import b
from pyoauth.oauth1.client import Client
from pyoauth.oauth1.client.mixins import OAuthMixin
class TwitterClient(Client):
"""
Creates an instance of a Twitter OAuth 1.0 client.
"""
_TEMP_URI = b("https://api.twitter.com/oauth/request_token")
_TOKEN_URI = b("https://api.twitter.com/oauth/access_token")
_AUTHORIZATION_URI = b("https://api.twitter.com/oauth/authorize")
_AUTHENTICATION_URI = b("https://api.twitter.com/oauth/authenticate")
def __init__(self,
http_client,
client_credentials,
use_authorization_header=True,
strict=False):
super(TwitterClient, self).__init__(
http_client,
client_credentials,
self._TEMP_URI,
self._TOKEN_URI,
self._AUTHORIZATION_URI,
self._AUTHENTICATION_URI,
use_authorization_header=use_authorization_header,
strict=strict
)
class TwitterMixin(OAuthMixin):
"""
OAuth handler mixin. Use with an HttpAdapterMixin for your framework.
"""
pass
| apache-2.0 | -3,775,858,283,001,651,000 | 31.068966 | 75 | 0.662903 | false | 4.025974 | false | false | false |
obriencj/python-javatools | javatools/dirutils.py | 2 | 5709 | # This library is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, see
# <http://www.gnu.org/licenses/>.
"""
Utility module for discovering the differences between two directory
trees
:author: Christopher O'Brien <[email protected]>
:license: LGPL
"""
from filecmp import dircmp
from fnmatch import fnmatch
from os import makedirs, walk
from os.path import exists, isdir, join, relpath
from shutil import copy
LEFT = "left only"
RIGHT = "right only"
DIFF = "changed"
SAME = "same"
BOTH = SAME # meh, synonyms
def fnmatches(entry, *pattern_list):
"""
returns true if entry matches any of the glob patterns, false
otherwise
"""
for pattern in pattern_list:
if pattern and fnmatch(entry, pattern):
return True
return False
def makedirsp(dirname):
"""
create dirname if it doesn't exist
"""
if dirname and not exists(dirname):
makedirs(dirname)
def copydir(orig, dest):
"""
copies directory orig to dest. Returns a list of tuples of
relative filenames which were copied from orig to dest
"""
copied = list()
makedirsp(dest)
for root, dirs, files in walk(orig):
for d in dirs:
# ensure directories exist
makedirsp(join(dest, d))
for f in files:
root_f = join(root, f)
dest_f = join(dest, relpath(root_f, orig))
copy(root_f, dest_f)
copied.append((root_f, dest_f))
return copied
def compare(left, right):
"""
generator emiting pairs indicating the contents of the left and
right directories. The pairs are in the form of (difference,
filename) where difference is one of the LEFT, RIGHT, DIFF, or
BOTH constants. This generator recursively walks both trees.
"""
dc = dircmp(left, right, ignore=[])
return _gen_from_dircmp(dc, left, right)
def _gen_from_dircmp(dc, lpath, rpath):
"""
do the work of comparing the dircmp
"""
left_only = dc.left_only
left_only.sort()
for f in left_only:
fp = join(dc.left, f)
if isdir(fp):
for r, _ds, fs in walk(fp):
r = relpath(r, lpath)
for f in fs:
yield(LEFT, join(r, f))
else:
yield (LEFT, relpath(fp, lpath))
right_only = dc.right_only
right_only.sort()
for f in right_only:
fp = join(dc.right, f)
if isdir(fp):
for r, _ds, fs in walk(fp):
r = relpath(r, rpath)
for f in fs:
yield(RIGHT, join(r, f))
else:
yield (RIGHT, relpath(fp, rpath))
diff_files = dc.diff_files
diff_files.sort()
for f in diff_files:
yield (DIFF, join(relpath(dc.right, rpath), f))
same_files = dc.same_files
same_files.sort()
for f in same_files:
yield (BOTH, join(relpath(dc.left, lpath), f))
subdirs = dc.subdirs.values()
subdirs = sorted(subdirs)
for sub in subdirs:
for event in _gen_from_dircmp(sub, lpath, rpath):
yield event
def collect_compare(left, right):
"""
returns a tuple of four lists describing the file paths that have
been (in order) added, removed, altered, or left the same
"""
return collect_compare_into(left, right, [], [], [], [])
def collect_compare_into(left, right, added, removed, altered, same):
"""
collect the results of compare into the given lists (or None if
you do not wish to collect results of that type. Returns a tuple
of (added, removed, altered, same)
"""
for event, filename in compare(left, right):
if event == LEFT:
group = removed
elif event == RIGHT:
group = added
elif event == DIFF:
group = altered
elif event == BOTH:
group = same
else:
assert False
if group is not None:
group.append(filename)
return added, removed, altered, same
class ClosingContext(object):
# pylint: disable=R0903
# too few public methods (none)
"""
A simple context manager which is created with an object instance,
and will return that instance from __enter__ and call the close
method on the instance in __exit__
"""
def __init__(self, managed):
self.managed = managed
def __enter__(self):
return self.managed
def __exit__(self, exc_type, _exc_val, _exc_tb):
managed = self.managed
self.managed = None
if managed is not None and hasattr(managed, "close"):
managed.close()
return exc_type is None
def closing(managed):
"""
If the managed object already provides its own context management
via the __enter__ and __exit__ methods, it is returned
unchanged. However, if the instance does not, a ClosingContext
will be created to wrap it. When the ClosingContext exits, it will
call managed.close()
"""
if managed is None or hasattr(managed, "__enter__"):
return managed
else:
return ClosingContext(managed)
#
# The end.
| lgpl-3.0 | -9,175,640,514,125,958,000 | 23.930131 | 70 | 0.616745 | false | 3.945404 | false | false | false |
FederatedAI/FATE | python/federatedml/secureprotol/fixedpoint.py | 1 | 8060 | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import math
import sys
import numpy as np
class FixedPointNumber(object):
"""Represents a float or int fixedpoit encoding;.
"""
BASE = 16
LOG2_BASE = math.log(BASE, 2)
FLOAT_MANTISSA_BITS = sys.float_info.mant_dig
Q = 293973345475167247070445277780365744413
def __init__(self, encoding, exponent, n=None, max_int=None):
self.n = n
self.max_int = max_int
if self.n is None:
self.n = self.Q
self.max_int = self.Q // 3 - 1
self.encoding = encoding
self.exponent = exponent
@classmethod
def encode(cls, scalar, n=None, max_int=None, precision=None, max_exponent=None):
"""return an encoding of an int or float.
"""
# Calculate the maximum exponent for desired precision
exponent = None
# Too low value preprocess;
# avoid "OverflowError: int too large to convert to float"
if np.abs(scalar) < 1e-200:
scalar = 0
if n is None:
n = cls.Q
max_int = cls.Q // 3 - 1
if precision is None:
if isinstance(scalar, int) or isinstance(scalar, np.int16) or \
isinstance(scalar, np.int32) or isinstance(scalar, np.int64):
exponent = 0
elif isinstance(scalar, float) or isinstance(scalar, np.float16) \
or isinstance(scalar, np.float32) or isinstance(scalar, np.float64):
flt_exponent = math.frexp(scalar)[1]
lsb_exponent = cls.FLOAT_MANTISSA_BITS - flt_exponent
exponent = math.floor(lsb_exponent / cls.LOG2_BASE)
else:
raise TypeError("Don't know the precision of type %s."
% type(scalar))
else:
exponent = math.floor(math.log(precision, cls.BASE))
if max_exponent is not None:
exponent = max(max_exponent, exponent)
int_fixpoint = int(round(scalar * pow(cls.BASE, exponent)))
if abs(int_fixpoint) > max_int:
raise ValueError('Integer needs to be within +/- %d but got %d'
% (max_int, int_fixpoint))
return cls(int_fixpoint % n, exponent, n, max_int)
def decode(self):
"""return decode plaintext.
"""
if self.encoding >= self.n:
# Should be mod n
raise ValueError('Attempted to decode corrupted number')
elif self.encoding <= self.max_int:
# Positive
mantissa = self.encoding
elif self.encoding >= self.n - self.max_int:
# Negative
mantissa = self.encoding - self.n
else:
raise OverflowError('Overflow detected in decode number')
return mantissa * pow(self.BASE, -self.exponent)
def increase_exponent_to(self, new_exponent):
"""return FixedPointNumber: new encoding with same value but having great exponent.
"""
if new_exponent < self.exponent:
raise ValueError('New exponent %i should be greater than'
'old exponent %i' % (new_exponent, self.exponent))
factor = pow(self.BASE, new_exponent - self.exponent)
new_encoding = self.encoding * factor % self.n
return FixedPointNumber(new_encoding, new_exponent, self.n, self.max_int)
def __align_exponent(self, x, y):
"""return x,y with same exponet
"""
if x.exponent < y.exponent:
x = x.increase_exponent_to(y.exponent)
elif x.exponent > y.exponent:
y = y.increase_exponent_to(x.exponent)
return x, y
def __truncate(self, a):
scalar = a.decode()
return FixedPointNumber.encode(scalar)
def __add__(self, other):
if isinstance(other, FixedPointNumber):
return self.__add_fixpointnumber(other)
else:
return self.__add_scalar(other)
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
if isinstance(other, FixedPointNumber):
return self.__sub_fixpointnumber(other)
else:
return self.__sub_scalar(other)
def __rsub__(self, other):
x = self.__sub__(other)
x = -1 * x.decode()
return self.encode(x)
def __rmul__(self, other):
return self.__mul__(other)
def __mul__(self, other):
if isinstance(other, FixedPointNumber):
return self.__mul_fixpointnumber(other)
else:
return self.__mul_scalar(other)
def __truediv__(self, other):
if isinstance(other, FixedPointNumber):
scalar = other.decode()
else:
scalar = other
return self.__mul__(1 / scalar)
def __rtruediv__(self, other):
res = 1.0 / self.__truediv__(other).decode()
return FixedPointNumber.encode(res)
def __lt__(self, other):
x = self.decode()
if isinstance(other, FixedPointNumber):
y = other.decode()
else:
y = other
if x < y:
return True
else:
return False
def __gt__(self, other):
x = self.decode()
if isinstance(other, FixedPointNumber):
y = other.decode()
else:
y = other
if x > y:
return True
else:
return False
def __le__(self, other):
x = self.decode()
if isinstance(other, FixedPointNumber):
y = other.decode()
else:
y = other
if x <= y:
return True
else:
return False
def __ge__(self, other):
x = self.decode()
if isinstance(other, FixedPointNumber):
y = other.decode()
else:
y = other
if x >= y:
return True
else:
return False
def __eq__(self, other):
x = self.decode()
if isinstance(other, FixedPointNumber):
y = other.decode()
else:
y = other
if x == y:
return True
else:
return False
def __ne__(self, other):
x = self.decode()
if isinstance(other, FixedPointNumber):
y = other.decode()
else:
y = other
if x != y:
return True
else:
return False
def __add_fixpointnumber(self, other):
x, y = self.__align_exponent(self, other)
encoding = (x.encoding + y.encoding) % self.Q
return FixedPointNumber(encoding, x.exponent)
def __add_scalar(self, scalar):
encoded = self.encode(scalar)
return self.__add_fixpointnumber(encoded)
def __sub_fixpointnumber(self, other):
scalar = -1 * other.decode()
return self.__add_scalar(scalar)
def __sub_scalar(self, scalar):
scalar = -1 * scalar
return self.__add_scalar(scalar)
def __mul_fixpointnumber(self, other):
encoding = (self.encoding * other.encoding) % self.Q
exponet = self.exponent + other.exponent
mul_fixedpoint = FixedPointNumber(encoding, exponet)
truncate_mul_fixedpoint = self.__truncate(mul_fixedpoint)
return truncate_mul_fixedpoint
def __mul_scalar(self, scalar):
encoded = self.encode(scalar)
return self.__mul_fixpointnumber(encoded)
| apache-2.0 | 7,395,149,713,591,440,000 | 29.646388 | 91 | 0.561538 | false | 4.08308 | false | false | false |
badele/pyRFXtrx | RFXtrx/lowlevel.py | 1 | 52885 | # This file is part of pyRFXtrx, a Python library to communicate with
# the RFXtrx family of devices from http://www.rfxcom.com/
# See https://github.com/woudt/pyRFXtrx for the latest version.
#
# Copyright (C) 2012 Edwin Woudt <[email protected]>
#
# pyRFXtrx is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyRFXtrx is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with pyRFXtrx. See the file COPYING.txt in the distribution.
# If not, see <http://www.gnu.org/licenses/>.
"""
This module provides low level packet parsing and generation code for the
RFXtrx.
"""
# pylint: disable=C0302,R0902,R0903,R0911,R0913
def parse(data):
""" Parse a packet from a bytearray """
if data[0] == 0:
# null length packet - sometimes happens on initialization
return None
if data[1] == 0x01:
pkt = Status()
pkt.load_receive(data)
return pkt
if data[1] == 0x10:
pkt = Lighting1()
pkt.load_receive(data)
return pkt
if data[1] == 0x11:
pkt = Lighting2()
pkt.load_receive(data)
return pkt
if data[1] == 0x12:
pkt = Lighting3()
pkt.load_receive(data)
return pkt
if data[1] == 0x13:
pkt = Lighting4()
pkt.load_receive(data)
return pkt
if data[1] == 0x14:
pkt = Lighting5()
pkt.load_receive(data)
return pkt
if data[1] == 0x15:
pkt = Lighting6()
pkt.load_receive(data)
return pkt
if data[1] == 0x50:
pkt = Temp()
pkt.load_receive(data)
return pkt
if data[1] == 0x52:
pkt = TempHumid()
pkt.load_receive(data)
return pkt
if data[1] == 0x54:
pkt = TempHumidBaro()
pkt.load_receive(data)
return pkt
if data[1] == 0x55:
pkt = RainGauge()
pkt.load_receive(data)
return pkt
if data[1] == 0x56:
pkt = Wind()
pkt.load_receive(data)
return pkt
###############################################################################
# Packet class
###############################################################################
class Packet(object):
""" Abstract superclass for all low level packets """
_UNKNOWN_TYPE = "Unknown type ({0:#04x}/{1:#04x})"
_UNKNOWN_CMND = "Unknown command ({0:#04x})"
def __init__(self):
"""Constructor"""
self.data = None
self.packetlength = None
self.packettype = None
self.subtype = None
self.seqnbr = None
self.rssi = None
self.rssi_byte = None
self.type_string = None
self.id_string = None
def has_value(self, datatype):
"""Return True if the sensor supports the given data type.
sensor.has_value(RFXCOM_TEMPERATURE) is identical to calling
sensor.has_temperature().
"""
return hasattr(self, dataype)
def value(self, datatype):
"""Return the :class:`SensorValue` for the given data type.
sensor.value(RFXCOM_TEMPERATURE) is identical to calling
sensor.temperature().
"""
return getattr(self, datatype, None)
def __getattr__(self, name):
typename = name.replace("has_", "", 1)
if not name == typename:
return lambda: self.has_value(datatype)
raise AttributeError(name)
def __eq__(self, other):
if not isinstance(other, Sensor):
return False
return self.id_string == other.id_string
def __repr__(self):
return(self.id_string + ": " + str(self.temperature))
###############################################################################
# Status class
###############################################################################
def _decode_flags(v, words):
words = words.split()
s = set()
for w in words:
if v % 2:
s.add(w)
v//= 2
return s
class Status(Packet):
"""
Data class for the Status packet type
"""
TYPES = {
0x50: '310MHz',
0x51: '315MHz',
0x53: '433.92MHz',
0x55: '868.00MHz',
0x56: '868.00MHz FSK',
0x57: '868.30MHz',
0x58: '868.30MHz FSK',
0x59: '868.35MHz',
0x5A: '868.35MHz FSK',
0x5B: '868.95MHz'
}
"""
Mapping of numeric subtype values to strings, used in type_string
"""
def __str__(self):
return ("Status [subtype={0}, firmware={1}, devices={2}]") \
.format(self.type_string, self.firmware_version, self.devices)
def __init__(self):
"""Constructor"""
super(Status, self).__init__()
self.tranceiver_type = None
self.firmware_version = None
self.devices = None
def load_receive(self, data):
"""Load data from a bytearray"""
self.data = data
self.packetlength = data[0]
self.packettype = data[1]
self.tranceiver_type = data[5]
self.firmware_version = data[6]
devs = set()
devs.update(_decode_flags(data[7] / 0x80,
'undecoded'))
devs.update(_decode_flags(data[8],
'mertik lightwarerf hideki lacrosse fs20 proguard'))
devs.update(_decode_flags(data[9],
'x10 arc ac homeeasy ikeakoppla oregon ati visonic'))
self.devices = sorted(devs)
self._set_strings()
def _set_strings(self):
"""Translate loaded numeric values into convenience strings"""
if self.tranceiver_type in self.TYPES:
self.type_string = self.TYPES[self.tranceiver_type]
else:
#Degrade nicely for yet unknown subtypes
self.type_string = 'Unknown'
###############################################################################
# Lighting1 class
###############################################################################
class Lighting1(Packet):
"""
Data class for the Lighting1 packet type
"""
TYPES = {0x00: 'X10 lighting',
0x01: 'ARC',
0x02: 'ELRO AB400D',
0x03: 'Waveman',
0x04: 'Chacon EMW200',
0x05: 'IMPULS',
0x06: 'RisingSun',
0x07: 'Philips SBC',
0x08: 'Energenie',
}
"""
Mapping of numeric subtype values to strings, used in type_string
"""
ALIAS_TYPES = {'KlikAanKlikUit code wheel': 0x01,
'NEXA code wheel': 0x01,
'CHACON code wheel': 0x01,
'HomeEasy code wheel': 0x01,
'Proove': 0x01,
'DomiaLite': 0x01,
'InterTechno': 0x01,
'AB600': 0x01,
}
"""
Mapping of subtype aliases to the corresponding subtype value
"""
HOUSECODES = {0x41: 'A', 0x42: 'B', 0x43: 'C', 0x44: 'D',
0x45: 'E', 0x46: 'F', 0x47: 'G', 0x48: 'H',
0x49: 'I', 0x4A: 'J', 0x4B: 'K', 0x4C: 'L',
0x4D: 'M', 0x4E: 'N', 0x4F: 'O', 0x50: 'P'}
"""
Mapping of housecode numeric values to strings, used in id_string
"""
COMMANDS = {0x00: 'Off',
0x01: 'On',
0x02: 'Dim',
0x03: 'Bright',
0x05: 'All/group Off',
0x06: 'All/group On',
0x07: 'Chime',
0xFF: 'Illegal command'}
"""
Mapping of command numeric values to strings, used for cmnd_string
"""
def __str__(self):
return ("Lighting1 [subtype={0}, seqnbr={1}, id={2}, cmnd={3}, " +
"rssi={4}]") \
.format(self.type_string, self.seqnbr, self.id_string,
self.cmnd_string, self.rssi)
def __init__(self):
"""Constructor"""
super(Lighting1, self).__init__()
self.housecode = None
self.unitcode = None
self.cmnd = None
self.cmnd_string = None
def parse_id(self, subtype, id_string):
"""Parse a string id into individual components"""
try:
self.packettype = 0x10
self.subtype = subtype
hcode = id_string[0:1]
for hcode_num in self.HOUSECODES:
if self.HOUSECODES[hcode_num] == hcode:
self.housecode = hcode_num
self.unitcode = int(id_string[1:])
self._set_strings()
except:
raise ValueError("Invalid id_string")
if self.id_string != id_string:
raise ValueError("Invalid id_string")
def load_receive(self, data):
"""Load data from a bytearray"""
self.data = data
self.packetlength = data[0]
self.packettype = data[1]
self.subtype = data[2]
self.seqnbr = data[3]
self.housecode = data[4]
self.unitcode = data[5]
self.cmnd = data[6]
self.rssi_byte = data[7]
self.rssi = self.rssi_byte >> 4
self._set_strings()
def set_transmit(self, subtype, seqnbr, housecode, unitcode, cmnd):
"""Load data from individual data fields"""
self.packetlength = 7
self.packettype = 0x10
self.subtype = subtype
self.seqnbr = seqnbr
self.housecode = housecode
self.unitcode = unitcode
self.cmnd = cmnd
self.rssi_byte = 0
self.rssi = 0
self.data = bytearray([self.packetlength, self.packettype,
self.subtype, self.seqnbr, self.housecode,
self.unitcode, self.cmnd, self.rssi_byte])
self._set_strings()
def _set_strings(self):
"""Translate loaded numeric values into convenience strings"""
self.id_string = self.HOUSECODES[self.housecode] + str(self.unitcode)
if self.subtype in self.TYPES:
self.type_string = self.TYPES[self.subtype]
else:
#Degrade nicely for yet unknown subtypes
self.type_string = self._UNKNOWN_TYPE.format(self.packettype,
self.subtype)
if self.cmnd is not None:
if self.cmnd in self.COMMANDS:
self.cmnd_string = self.COMMANDS[self.cmnd]
else:
self.cmnd_string = self._UNKNOWN_CMND.format(self.cmnd)
###############################################################################
# Lighting2 class
###############################################################################
class Lighting2(Packet):
"""
Data class for the Lighting2 packet type
"""
TYPES = {0x00: 'AC',
0x01: 'HomeEasy EU',
0x02: 'ANSLUT',
}
"""
Mapping of numeric subtype values to strings, used in type_string
"""
ALIAS_TYPES = {'KlikAanKlikUit automatic': 0x00,
'NEXA automatic': 0x00,
'CHACON autometic': 0x00,
'HomeEasy UK': 0x00,
}
"""
Mapping of subtype aliases to the corresponding subtype value
"""
COMMANDS = {0x00: 'Off',
0x01: 'On',
0x02: 'Set level',
0x03: 'Group off',
0x04: 'Group on',
0x05: 'Set group level',
}
"""
Mapping of command numeric values to strings, used for cmnd_string
"""
def __str__(self):
return ("Lighting2 [subtype={0}, seqnbr={1}, id={2}, cmnd={3}, " +
"level={4}, rssi={5}]") \
.format(self.type_string, self.seqnbr, self.id_string,
self.cmnd_string, self.level, self.rssi)
def __init__(self):
"""Constructor"""
super(Lighting2, self).__init__()
self.id1 = None
self.id2 = None
self.id3 = None
self.id4 = None
self.id_combined = None
self.unitcode = None
self.cmnd = None
self.level = None
self.cmnd_string = None
def parse_id(self, subtype, id_string):
"""Parse a string id into individual components"""
try:
self.packettype = 0x11
self.subtype = subtype
self.id_combined = int(id_string[:7], 16)
self.id1 = self.id_combined >> 24
self.id2 = self.id_combined >> 16 & 0xff
self.id3 = self.id_combined >> 8 & 0xff
self.id4 = self.id_combined & 0xff
self.unitcode = int(id_string[8:])
self._set_strings()
except:
raise ValueError("Invalid id_string")
if self.id_string != id_string:
raise ValueError("Invalid id_string")
def load_receive(self, data):
"""Load data from a bytearray"""
self.data = data
self.packetlength = data[0]
self.packettype = data[1]
self.subtype = data[2]
self.seqnbr = data[3]
self.id1 = data[4]
self.id2 = data[5]
self.id3 = data[6]
self.id4 = data[7]
self.id_combined = (self.id1 << 24) + (self.id2 << 16) \
+ (self.id3 << 8) + self.id4
self.unitcode = data[8]
self.cmnd = data[9]
self.level = data[10]
self.rssi_byte = data[11]
self.rssi = self.rssi_byte >> 4
self._set_strings()
def set_transmit(self, subtype, seqnbr, id_combined, unitcode, cmnd,
level):
"""Load data from individual data fields"""
self.packetlength = 0x0b
self.packettype = 0x11
self.subtype = subtype
self.seqnbr = seqnbr
self.id_combined = id_combined
self.id1 = id_combined >> 24
self.id2 = id_combined >> 16 & 0xff
self.id3 = id_combined >> 8 & 0xff
self.id4 = id_combined & 0xff
self.unitcode = unitcode
self.cmnd = cmnd
self.level = level
self.rssi_byte = 0
self.rssi = 0
self.data = bytearray([self.packetlength, self.packettype,
self.subtype, self.seqnbr, self.id1, self.id2,
self.id3, self.id4, self.unitcode, self.cmnd,
self.level, self.rssi_byte])
self._set_strings()
def _set_strings(self):
"""Translate loaded numeric values into convenience strings"""
self.id_string = "{0:07x}:{1}".format(self.id_combined, self.unitcode)
if self.subtype in self.TYPES:
self.type_string = self.TYPES[self.subtype]
else:
#Degrade nicely for yet unknown subtypes
self.type_string = self._UNKNOWN_TYPE.format(self.packettype,
self.subtype)
if self.cmnd is not None:
if self.cmnd in self.COMMANDS:
self.cmnd_string = self.COMMANDS[self.cmnd]
else:
self.cmnd_string = self._UNKNOWN_CMND.format(self.cmnd)
###############################################################################
# Lighting3 class
###############################################################################
class Lighting3(Packet):
"""
Data class for the Lighting3 packet type
"""
TYPES = {0x00: 'Ikea Koppla',
}
"""
Mapping of numeric subtype values to strings, used in type_string
"""
COMMANDS = {0x00: 'Bright',
0x08: 'Dim',
0x10: 'On',
0x11: 'Level 1',
0x12: 'Level 2',
0x13: 'Level 3',
0x14: 'Level 4',
0x15: 'Level 5',
0x16: 'Level 6',
0x17: 'Level 7',
0x18: 'Level 8',
0x19: 'Level 9',
0x1a: 'Off',
0x1c: 'Program',
}
"""
Mapping of command numeric values to strings, used for cmnd_string
"""
def __str__(self):
return ("Lighting3 [subtype={0}, seqnbr={1}, id={2}, cmnd={3}, " +
"battery={4}, rssi={5}]") \
.format(self.type_string, self.seqnbr, self.id_string,
self.cmnd_string, self.battery, self.rssi)
def __init__(self):
"""Constructor"""
super(Lighting3, self).__init__()
self.system = None
self.channel1 = None
self.channel2 = None
self.channel = None
self.cmnd = None
self.battery = None
self.cmnd_string = None
def parse_id(self, subtype, id_string):
"""Parse a string id into individual components"""
try:
self.packettype = 0x12
self.subtype = subtype
self.system = int(id_string[:1], 16)
self.channel = int(id_string[2:], 16)
self.channel1 = self.channel & 0xff
self.channel2 = self.channel >> 8
self._set_strings()
except:
raise ValueError("Invalid id_string")
if self.id_string != id_string:
raise ValueError("Invalid id_string")
def load_receive(self, data):
"""Load data from a bytearray"""
self.data = data
self.packetlength = data[0]
self.packettype = data[1]
self.subtype = data[2]
self.seqnbr = data[3]
self.system = data[4]
self.channel1 = data[5]
self.channel2 = data[6]
self.channel = (self.channel2 << 8) + self.channel1
self.cmnd = data[7]
self.rssi_byte = data[8]
self.battery = self.rssi_byte & 0x0f
self.rssi = self.rssi_byte >> 4
self._set_strings()
def set_transmit(self, subtype, seqnbr, system, channel, cmnd):
"""Load data from individual data fields"""
self.packetlength = 0x08
self.packettype = 0x12
self.subtype = subtype
self.seqnbr = seqnbr
self.system = system
self.channel = channel
self.channel1 = channel & 0xff
self.channel2 = channel >> 8
self.cmnd = cmnd
self.rssi_byte = 0
self.battery = 0
self.rssi = 0
self.data = bytearray([self.packetlength, self.packettype,
self.subtype, self.seqnbr, self.system,
self.channel1, self.channel2, self.cmnd,
self.rssi_byte])
self._set_strings()
def _set_strings(self):
"""Translate loaded numeric values into convenience strings"""
self.id_string = "{0:1x}:{1:03x}".format(self.system, self.channel)
if self.subtype in self.TYPES:
self.type_string = self.TYPES[self.subtype]
else:
#Degrade nicely for yet unknown subtypes
self.type_string = self._UNKNOWN_TYPE.format(self.packettype,
self.subtype)
if self.cmnd is not None:
if self.cmnd in self.COMMANDS:
self.cmnd_string = self.COMMANDS[self.cmnd]
else:
self.cmnd_string = self._UNKNOWN_CMND.format(self.cmnd)
###############################################################################
# Lighting4 class
###############################################################################
class Lighting4(Packet):
"""
Data class for the Lighting4 packet type
"""
TYPES = {0x00: 'PT2262',
}
"""
Mapping of numeric subtype values to strings, used in type_string
"""
def __str__(self):
return ("Lighting4 [subtype={0}, seqnbr={1}, cmd={2}, pulse={3}, " +
"rssi={4}]") \
.format(self.type_string, self.seqnbr, self.id_string,
self.pulse, self.rssi)
def __init__(self):
"""Constructor"""
super(Lighting4, self).__init__()
self.cmd1 = None
self.cmd2 = None
self.cmd3 = None
self.cmd = None
self.pulsehigh = None
self.pulselow = None
self.pulse = None
def parse_id(self, subtype, id_string):
"""Parse a string id into individual components"""
try:
self.packettype = 0x13
self.subtype = subtype
self.cmd = int(id_string, 16)
self.cmd1 = self.cmd >> 16
self.cmd2 = (self.cmd >> 8) & 0xff
self.cmd3 = self.cmd & 0xff
self._set_strings()
except:
raise ValueError("Invalid id_string")
if self.id_string != id_string:
raise ValueError("Invalid id_string")
def load_receive(self, data):
"""Load data from a bytearray"""
self.data = data
self.packetlength = data[0]
self.packettype = data[1]
self.subtype = data[2]
self.seqnbr = data[3]
self.cmd1 = data[4]
self.cmd2 = data[5]
self.cmd3 = data[6]
self.cmd = (self.cmd1 << 16) + (self.cmd2 << 8) + self.cmd3
self.pulsehigh = data[7]
self.pulselow = data[8]
self.pulse = (self.pulsehigh << 8) + self.pulselow
self.rssi_byte = data[9]
self.rssi = self.rssi_byte >> 4
self._set_strings()
def set_transmit(self, subtype, seqnbr, cmd, pulse):
"""Load data from individual data fields"""
self.packetlength = 0x09
self.packettype = 0x13
self.subtype = subtype
self.seqnbr = seqnbr
self.cmd = cmd
self.cmd1 = self.cmd >> 16
self.cmd2 = (self.cmd >> 8) & 0xff
self.cmd3 = self.cmd & 0xff
self.pulse = pulse
self.pulsehigh = self.pulse >> 8
self.pulselow = self.pulse & 0xff
self.rssi_byte = 0
self.rssi = 0
self.data = bytearray([self.packetlength, self.packettype,
self.subtype, self.seqnbr,
self.cmd1, self.cmd2, self.cmd3,
self.pulsehigh, self.pulselow, self.rssi_byte])
self._set_strings()
def _set_strings(self):
"""Translate loaded numeric values into convenience strings"""
self.id_string = "{0:06x}".format(self.cmd)
if self.subtype in self.TYPES:
self.type_string = self.TYPES[self.subtype]
else:
#Degrade nicely for yet unknown subtypes
self.type_string = self._UNKNOWN_TYPE.format(self.packettype,
self.subtype)
###############################################################################
# Lighting5 class
###############################################################################
class Lighting5(Packet):
"""
Data class for the Lighting5 packet type
"""
TYPES = {0x00: 'LightwaveRF, Siemens',
0x01: 'EMW100 GAO/Everflourish',
0x02: 'BBSB new types',
0x03: 'MDREMOTE LED dimmer',
0x04: 'Conrad RSL2',
}
"""
Mapping of numeric subtype values to strings, used in type_string
"""
ALIAS_TYPES = {'LightwaveRF': 0x00,
'Siemens': 0x00,
'EMW100 GAO': 0x01,
'Everflourish': 0x01,
}
"""
Mapping of subtype aliases to the corresponding subtype value
"""
COMMANDS_00 = {0x00: 'Off',
0x01: 'On',
0x02: 'Group off',
0x03: 'Mood1',
0x04: 'Mood2',
0x05: 'Mood3',
0x06: 'Mood4',
0x07: 'Mood5',
0x0a: 'Unlock',
0x0b: 'Lock',
0x0c: 'All lock',
0x0d: 'Close (inline relay)',
0x0e: 'Stop (inline relay)',
0x0f: 'Open (inline relay)',
0x10: 'Set level',
}
"""
Mapping of command numeric values to strings, used for cmnd_string
"""
COMMANDS_01 = {0x00: 'Off',
0x01: 'On',
0x02: 'Learn',
}
"""
Mapping of command numeric values to strings, used for cmnd_string
"""
COMMANDS_02_04 = {0x00: 'Off',
0x01: 'On',
0x02: 'Group off',
0x03: 'Group on',
}
"""
Mapping of command numeric values to strings, used for cmnd_string
"""
COMMANDS_03 = {0x00: 'Power',
0x01: 'Light',
0x02: 'Bright',
0x03: 'Dim',
0x04: '100%',
0x05: '50%',
0x06: '25%',
0x07: 'Mode+',
0x08: 'Speed-',
0x09: 'Speed+',
0x0a: 'Mode-',
}
"""
Mapping of command numeric values to strings, used for cmnd_string
"""
COMMANDS_XX = {0x00: 'Off',
0x01: 'On',
}
"""
Mapping of command numeric values to strings, used for cmnd_string
"""
def __str__(self):
return ("Lighting5 [subtype={0}, seqnbr={1}, id={2}, cmnd={3}, " +
"level={4}, rssi={5}]") \
.format(self.type_string, self.seqnbr, self.id_string,
self.cmnd_string, self.level, self.rssi)
def __init__(self):
"""Constructor"""
super(Lighting5, self).__init__()
self.id1 = None
self.id2 = None
self.id3 = None
self.id_combined = None
self.unitcode = None
self.cmnd = None
self.level = None
self.cmnd_string = None
def parse_id(self, subtype, id_string):
"""Parse a string id into individual components"""
try:
self.packettype = 0x14
self.subtype = subtype
self.id_combined = int(id_string[:6], 16)
self.id1 = self.id_combined >> 16
self.id2 = self.id_combined >> 8 & 0xff
self.id3 = self.id_combined & 0xff
self.unitcode = int(id_string[7:])
self._set_strings()
except:
raise ValueError("Invalid id_string")
if self.id_string != id_string:
raise ValueError("Invalid id_string")
def load_receive(self, data):
"""Load data from a bytearray"""
self.data = data
self.packetlength = data[0]
self.packettype = data[1]
self.subtype = data[2]
self.seqnbr = data[3]
self.id1 = data[4]
self.id2 = data[5]
self.id3 = data[6]
self.id_combined = (self.id1 << 16) + (self.id2 << 8) + self.id3
self.unitcode = data[7]
self.cmnd = data[8]
self.level = data[9]
self.rssi_byte = data[10]
self.rssi = self.rssi_byte >> 4
self._set_strings()
def set_transmit(self, subtype, seqnbr, id_combined, unitcode, cmnd,
level):
"""Load data from individual data fields"""
self.packetlength = 0x0a
self.packettype = 0x14
self.subtype = subtype
self.seqnbr = seqnbr
self.id_combined = id_combined
self.id1 = id_combined >> 16
self.id2 = id_combined >> 8 & 0xff
self.id3 = id_combined & 0xff
self.unitcode = unitcode
self.cmnd = cmnd
self.level = level
self.rssi_byte = 0
self.rssi = 0
self.data = bytearray([self.packetlength, self.packettype,
self.subtype, self.seqnbr, self.id1, self.id2,
self.id3, self.unitcode, self.cmnd,
self.level, self.rssi_byte])
self._set_strings()
def _set_strings(self):
"""Translate loaded numeric values into convenience strings"""
self.id_string = "{0:06x}:{1}".format(self.id_combined, self.unitcode)
if self.subtype in self.TYPES:
self.type_string = self.TYPES[self.subtype]
else:
#Degrade nicely for yet unknown subtypes
self.type_string = self._UNKNOWN_TYPE.format(self.packettype,
self.subtype)
if self.cmnd is not None:
if self.subtype == 0x00 and self.cmnd in self.COMMANDS_00:
self.cmnd_string = self.COMMANDS_00[self.cmnd]
elif self.subtype == 0x01 and self.cmnd in self.COMMANDS_01:
self.cmnd_string = self.COMMANDS_01[self.cmnd]
elif self.subtype == 0x02 and self.cmnd in self.COMMANDS_02_04:
self.cmnd_string = self.COMMANDS_02_04[self.cmnd]
elif self.subtype == 0x03 and self.cmnd in self.COMMANDS_03:
self.cmnd_string = self.COMMANDS_03[self.cmnd]
elif self.subtype == 0x04 and self.cmnd in self.COMMANDS_02_04:
self.cmnd_string = self.COMMANDS_02_04[self.cmnd]
elif self.subtype >= 0x05 and self.cmnd in self.COMMANDS_XX:
self.cmnd_string = self.COMMANDS_XX[self.cmnd]
else:
self.cmnd_string = self._UNKNOWN_CMND.format(self.cmnd)
###############################################################################
# Lighting6 class
###############################################################################
class Lighting6(Packet):
"""
Data class for the Lighting6 packet type
"""
TYPES = {0x00: 'Blyss',
}
"""
Mapping of numeric subtype values to strings, used in type_string
"""
COMMANDS = {0x00: 'On',
0x01: 'Off',
0x02: 'Group on',
0x03: 'Group off',
}
"""
Mapping of command numeric values to strings, used for cmnd_string
"""
def __str__(self):
return ("Lighting6 [subtype={0}, seqnbr={1}, id={2}, cmnd={3}, " +
"cmndseqnbr={4}, rssi={5}]") \
.format(self.type_string, self.seqnbr, self.id_string,
self.cmnd_string, self.cmndseqnbr, self.rssi)
def __init__(self):
"""Constructor"""
super(Lighting6, self).__init__()
self.id1 = None
self.id2 = None
self.id_combined = None
self.groupcode = None
self.unitcode = None
self.cmnd = None
self.cmndseqnbr = None
self.rfu = None
self.level = None
self.cmnd_string = None
def parse_id(self, subtype, id_string):
"""Parse a string id into individual components"""
try:
self.packettype = 0x15
self.subtype = subtype
self.id_combined = int(id_string[:4], 16)
self.id1 = self.id_combined >> 8 & 0xff
self.id2 = self.id_combined & 0xff
self.groupcode = ord(id_string[5])
self.unitcode = int(id_string[6:])
self._set_strings()
except:
raise ValueError("Invalid id_string")
if self.id_string != id_string:
raise ValueError("Invalid id_string")
def load_receive(self, data):
"""Load data from a bytearray"""
self.data = data
self.packetlength = data[0]
self.packettype = data[1]
self.subtype = data[2]
self.seqnbr = data[3]
self.id1 = data[4]
self.id2 = data[5]
self.id_combined = (self.id1 << 8) + self.id2
self.groupcode = data[6]
self.unitcode = data[7]
self.cmnd = data[8]
self.cmndseqnbr = data[9]
self.rfu = data[10]
self.rssi_byte = data[11]
self.rssi = self.rssi_byte >> 4
self._set_strings()
def set_transmit(self, subtype, seqnbr, id_combined, groupcode, unitcode,
cmnd, cmndseqnbr):
"""Load data from individual data fields"""
self.packetlength = 0x0b
self.packettype = 0x15
self.subtype = subtype
self.seqnbr = seqnbr
self.id_combined = id_combined
self.id1 = id_combined >> 8 & 0xff
self.id2 = id_combined & 0xff
self.groupcode = groupcode
self.unitcode = unitcode
self.cmnd = cmnd
self.cmndseqnbr = cmndseqnbr
self.rfu = 0
self.rssi_byte = 0
self.rssi = 0
self.data = bytearray([self.packetlength, self.packettype,
self.subtype, self.seqnbr, self.id1, self.id2,
self.groupcode, self.unitcode, self.cmnd,
self.cmndseqnbr, self.rfu, self.rssi_byte])
self._set_strings()
def _set_strings(self):
"""Translate loaded numeric values into convenience strings"""
self.id_string = "{0:04x}:{1}{2}".format(self.id_combined,
chr(self.groupcode),
self.unitcode)
if self.subtype in self.TYPES:
self.type_string = self.TYPES[self.subtype]
else:
#Degrade nicely for yet unknown subtypes
self.type_string = self._UNKNOWN_TYPE.format(self.packettype,
self.subtype)
if self.cmnd is not None:
if self.cmnd in self.COMMANDS:
self.cmnd_string = self.COMMANDS[self.cmnd]
else:
self.cmnd_string = self._UNKNOWN_CMND.format(self.cmnd)
###############################################################################
# SensorPacket class
###############################################################################
class SensorPacket(Packet):
"""
Abstract superclass for all sensor related packets
"""
HUMIDITY_TYPES = {0x00: 'dry',
0x01: 'comfort',
0x02: 'normal',
0x03: 'wet',
-1: 'unknown humidity'}
"""
Mapping of humidity types to string
"""
FORECAST_TYPES = {0x00: 'no forecast available',
0x01: 'sunny',
0x02: 'partly cloudy',
0x03: 'cloudy',
0x04: 'rain',
-1: 'unknown forecast'}
"""
Mapping of forecast types to string
"""
def __init__(self):
"""Constructor"""
super(SensorPacket, self).__init__()
###############################################################################
# Temp class
###############################################################################
class Temp(SensorPacket):
"""
Data class for the Temp1 packet type
"""
TYPES = {0x01: 'THR128/138, THC138',
0x02: 'THC238/268,THN132,THWR288,THRN122,THN122,AW129/131',
0x03: 'THWR800',
0x04: 'RTHN318',
0x05: 'La Crosse TX2, TX3, TX4, TX17',
0x06: 'TS15C',
0x07: 'Viking 02811',
0x08: 'La Crosse WS2300',
0x09: 'RUBiCSON',
0x0a: 'TFA 30.3133',
}
"""
Mapping of numeric subtype values to strings, used in type_string
"""
def __str__(self):
return ("Temp [subtype={0}, seqnbr={1}, id={2}, temp={3}, " +
"battery={4}, rssi={5}]") \
.format(self.type_string, self.seqnbr, self.id_string,
self.temp, self.battery, self.rssi)
def __init__(self):
"""Constructor"""
super(Temp, self).__init__()
self.id1 = None
self.id2 = None
self.temphigh = None
self.templow = None
self.temp = None
self.battery = None
def load_receive(self, data):
"""Load data from a bytearray"""
self.data = data
self.packetlength = data[0]
self.packettype = data[1]
self.subtype = data[2]
self.seqnbr = data[3]
self.id1 = data[4]
self.id2 = data[5]
self.temphigh = data[6]
self.templow = data[7]
self.temp = float(((self.temphigh & 0x7f) << 8) + self.templow) / 10
if self.temphigh >= 0x80:
self.temp = -self.temp
self.rssi_byte = data[8]
self.battery = self.rssi_byte & 0x0f
self.rssi = self.rssi_byte >> 4
self._set_strings()
def _set_strings(self):
"""Translate loaded numeric values into convenience strings"""
self.id_string = "{0:02x}:{1:02x}".format(self.id1, self.id2)
if self.subtype in self.TYPES:
self.type_string = self.TYPES[self.subtype]
else:
#Degrade nicely for yet unknown subtypes
self.type_string = self._UNKNOWN_TYPE.format(self.packettype,
self.subtype)
###############################################################################
# Humid class
###############################################################################
class Humid(SensorPacket):
"""
Data class for the Humid packet type
"""
TYPES = {0x01: 'LaCrosse TX3',
0x02: 'LaCrosse WS2300',
}
"""
Mapping of numeric subtype values to strings, used in type_string
"""
def __str__(self):
return ("Humid [subtype={0}, seqnbr={1}, id={2}, " +
"humidity={3}, humidity_status={4}, battery={5}, rssi={6}]") \
.format(self.type_string, self.seqnbr, self.id_string,
self.humidity, self.humidity_status,
self.battery, self.rssi)
def __init__(self):
"""Constructor"""
super(Humid, self).__init__()
self.id1 = None
self.id2 = None
self.humidity = None
self.humidity_status = None
self.humidity_status_string = None
self.battery = None
def load_receive(self, data):
"""Load data from a bytearray"""
self.data = data
self.packetlength = data[0]
self.packettype = data[1]
self.subtype = data[2]
self.seqnbr = data[3]
self.id1 = data[4]
self.id2 = data[5]
self.humidity = data[6]
self.humidity_status = data[7]
self.rssi_byte = data[8]
self.battery = self.rssi_byte & 0x0f
self.rssi = self.rssi_byte >> 4
self._set_strings()
def _set_strings(self):
"""Translate loaded numeric values into convenience strings"""
self.id_string = "{0:02x}:{1:02x}".format(self.id1, self.id2)
if self.subtype in self.TYPES:
self.type_string = self.TYPES[self.subtype]
else:
#Degrade nicely for yet unknown subtypes
self.type_string = self._UNKNOWN_TYPE.format(self.packettype,
self.subtype)
if self.humidity_status in self.HUMIDITY_TYPES:
self.humidity_status_string = \
self.HUMIDITY_TYPES[self.humidity_status]
else:
self.humidity_status_string = self.HUMIDITY_TYPES[-1]
###############################################################################
# TempHumid class
###############################################################################
class TempHumid(SensorPacket):
"""
Data class for the TempHumid packet type
"""
TYPES = {0x01: 'THGN122/123, THGN132, THGR122/228/238/268',
0x02: 'THGR810, THGN800',
0x03: 'RTGR328',
0x04: 'THGR328',
0x05: 'WTGR800',
0x06: 'THGR918/928, THGRN228, THGN500',
0x07: 'TFA TS34C, Cresta',
0x08: 'WT260,WT260H,WT440H,WT450,WT450H',
0x09: 'Viking 02035,02038',
0x0a: 'Rubicson',
}
"""
Mapping of numeric subtype values to strings, used in type_string
"""
def __str__(self):
return ("TempHumid [subtype={0}, seqnbr={1}, id={2}, temp={3}, " +
"humidity={4}, humidity_status={5}, battery={6}, rssi={7}]") \
.format(self.type_string, self.seqnbr, self.id_string,
self.temp, self.humidity, self.humidity_status,
self.battery, self.rssi)
def __init__(self):
"""Constructor"""
super(TempHumid, self).__init__()
self.id1 = None
self.id2 = None
self.temphigh = None
self.templow = None
self.temp = None
self.humidity = None
self.humidity_status = None
self.humidity_status_string = None
self.battery = None
def load_receive(self, data):
"""Load data from a bytearray"""
self.data = data
self.packetlength = data[0]
self.packettype = data[1]
self.subtype = data[2]
self.seqnbr = data[3]
self.id1 = data[4]
self.id2 = data[5]
self.temphigh = data[6]
self.templow = data[7]
self.temp = float(((self.temphigh & 0x7f) << 8) + self.templow) / 10
if self.temphigh >= 0x80:
self.temp = -self.temp
self.humidity = data[8]
self.humidity_status = data[9]
self.rssi_byte = data[10]
self.battery = self.rssi_byte & 0x0f
self.rssi = self.rssi_byte >> 4
self._set_strings()
def _set_strings(self):
"""Translate loaded numeric values into convenience strings"""
self.id_string = "{0:02x}:{1:02x}".format(self.id1, self.id2)
if self.subtype in self.TYPES:
self.type_string = self.TYPES[self.subtype]
else:
#Degrade nicely for yet unknown subtypes
self.type_string = self._UNKNOWN_TYPE.format(self.packettype,
self.subtype)
if self.humidity_status in self.HUMIDITY_TYPES:
self.humidity_status_string = \
self.HUMIDITY_TYPES[self.humidity_status]
else:
self.humidity_status_string = self.HUMIDITY_TYPES[-1]
###############################################################################
# Baro class
###############################################################################
class Baro(SensorPacket):
"""
Data class for the Baro packet type
"""
TYPES = {}
"""
Mapping of numeric subtype values to strings, used in type_string
"""
def __str__(self):
return ("Baro [subtype={0}, seqnbr={1}, id={2}, baro={3}, " +
"forecast={4}, battery={5}, rssi={6}]") \
.format(self.type_string, self.seqnbr, self.id_string, self.baro,
self.forecast, self.battery, self.rssi)
def __init__(self):
"""Constructor"""
super(Baro, self).__init__()
self.id1 = None
self.id2 = None
self.baro1 = None
self.baro2 = None
self.baro = None
self.forecast = None
self.forecast_string = None
self.battery = None
def load_receive(self, data):
"""Load data from a bytearray"""
self.data = data
self.packetlength = data[0]
self.packettype = data[1]
self.subtype = data[2]
self.seqnbr = data[3]
self.id1 = data[4]
self.id2 = data[5]
self.baro1 = data[6]
self.baro2 = data[7]
self.baro = (self.baro1 << 8) + self.baro2
self.forecast = data[8]
self.rssi_byte = data[9]
self.battery = self.rssi_byte & 0x0f
self.rssi = self.rssi_byte >> 4
self._set_strings()
def _set_strings(self):
"""Translate loaded numeric values into convenience strings"""
self.id_string = "{0:02x}:{1:02x}".format(self.id1, self.id2)
if self.subtype in self.TYPES:
self.type_string = self.TYPES[self.subtype]
else:
#Degrade nicely for yet unknown subtypes
self.type_string = self._UNKNOWN_TYPE.format(self.packettype,
self.subtype)
if self.forecast in self.FORECAST_TYPES:
self.forecast_string = self.FORECAST_TYPES[self.forecast]
else:
self.forecast_string = self.FORECAST_TYPES[-1]
###############################################################################
# TempHumidBaro class
###############################################################################
class TempHumidBaro(SensorPacket):
"""
Data class for the TempHumidBaro packet type
"""
TYPES = {0x01: 'BTHR918',
0x02: 'BTHR918N, BTHR968',
}
"""
Mapping of numeric subtype values to strings, used in type_string
"""
def __str__(self):
return ("TempHumidBaro [subtype={0}, seqnbr={1}, id={2}, temp={3}, " +
"humidity={4}, humidity_status={5}, baro={6}, forecast={7}, " +
"battery={8}, rssi={9}]") \
.format(self.type_string, self.seqnbr, self.id_string, self.temp,
self.humidity, self.humidity_status, self.baro,
self.forecast, self.battery, self.rssi)
def __init__(self):
"""Constructor"""
super(TempHumidBaro, self).__init__()
self.id1 = None
self.id2 = None
self.temphigh = None
self.templow = None
self.temp = None
self.humidity = None
self.humidity_status = None
self.humidity_status_string = None
self.baro1 = None
self.baro2 = None
self.baro = None
self.forecast = None
self.forecast_string = None
self.battery = None
def load_receive(self, data):
"""Load data from a bytearray"""
self.data = data
self.packetlength = data[0]
self.packettype = data[1]
self.subtype = data[2]
self.seqnbr = data[3]
self.id1 = data[4]
self.id2 = data[5]
self.temphigh = data[6]
self.templow = data[7]
self.temp = float(((self.temphigh & 0x7f) << 8) + self.templow) / 10
if self.temphigh >= 0x80:
self.temp = -self.temp
self.humidity = data[8]
self.humidity_status = data[9]
self.baro1 = data[10]
self.baro2 = data[11]
self.baro = (self.baro1 << 8) + self.baro2
self.forecast = data[12]
self.rssi_byte = data[13]
self.battery = self.rssi_byte & 0x0f
self.rssi = self.rssi_byte >> 4
self._set_strings()
def _set_strings(self):
"""Translate loaded numeric values into convenience strings"""
self.id_string = "{0:02x}:{1:02x}".format(self.id1, self.id2)
if self.subtype in self.TYPES:
self.type_string = self.TYPES[self.subtype]
else:
#Degrade nicely for yet unknown subtypes
self.type_string = self._UNKNOWN_TYPE.format(self.packettype,
self.subtype)
if self.humidity_status in self.HUMIDITY_TYPES:
self.humidity_status_string = \
self.HUMIDITY_TYPES[self.humidity_status]
else:
self.humidity_status_string = self.HUMIDITY_TYPES[-1]
if self.forecast in self.FORECAST_TYPES:
self.forecast_string = self.FORECAST_TYPES[self.forecast]
else:
self.forecast_string = self.FORECAST_TYPES[-1]
###############################################################################
# Rain class
###############################################################################
class Rain(SensorPacket):
TYPES = {
0x01: "RGR126/682/918",
0x02: "PCR800",
0x03: "TFA",
0x04: "UPM RG700",
0x05: "WS2300",
0x06: "La Crosse TX5"
}
def __str__(self):
return ("Rain [subtype={0}, seqnbr={1}, id={2}, rainrate={3}, " +
"raintotal={4}, battery={5}, rssi={6}]") \
.format(self.type_string, self.seqnbr, self.id_string,
self.rainrate, self.raintotal, self.battery, self.rssi)
def __init__(self):
"""Constructor"""
super(Rain, self).__init__()
self.id1 = None
self.id2 = None
self.rainrate1 = None
self.rainrate2 = None
self.rainrate = None
self.raintotal1 = None
self.raintotal2 = None
self.raintotal3 = None
self.raintotal = None
self.battery = None
def load_receive(self, data):
"""Load data from a bytearray"""
self.data = data
self.packetlength = data[0]
self.packettype = data[1]
self.subtype = data[2]
self.seqnbr = data[3]
self.id1 = data[4]
self.id2 = data[5]
self.rainrate1 = data[6]
self.rainrate2 = data[7]
self.rainrate = (self.rainrate1 << 8) + self.rainrate2
if self.subtype == 2:
self.rainrate = float(self.rainrate) / 100
self.raintotal1 = data[8]
self.raintotal2 = data[9]
self.raintotal3 = data[10]
self.raintotal = float((self.raintotal1 << 16) +
(self.raintotal2 << 8) +
self.raintotal3) / 10
self.rssi_byte = data[11]
self.battery = self.rssi_byte & 0x0f
self.rssi = self.rssi_byte >> 4
self._set_strings()
def _set_strings(self):
"""Translate loaded numeric values into convenience strings"""
self.id_string = "{0:02x}:{1:02x}".format(self.id1, self.id2)
if self.subtype in self.TYPES:
self.type_string = self.TYPES[self.subtype]
else:
#Degrade nicely for yet unknown subtypes
self.type_string = self._UNKNOWN_TYPE.format(self.packettype,
self.subtype)
###############################################################################
# Wind class
###############################################################################
class Wind(SensorPacket):
"""
Data class for the Wind packet type
"""
TYPES = {0x01: 'WTGR800',
0x02: 'WGR800',
0x03: 'STR918, WGR918, WGR928',
0x04: 'TFA',
0x05: 'UPM WDS500',
0x06: 'WS2300',
}
"""
Mapping of numeric subtype values to strings, used in type_string
"""
def __str__(self):
return ("Wind [subtype={0}, seqnbr={1}, id={2}, direction={3}, " +
"average_speed={4}, gust={5}, temperature={6}, chill={7}, " +
"battery={8}, rssi={9}]") \
.format(self.type_string, self.seqnbr, self.id_string,
self.direction, self.average_speed, self.gust,
self.temperature, self.chill, self.battery, self.rssi)
def __init__(self):
"""Constructor"""
super(Wind, self).__init__()
self.id1 = None
self.id2 = None
self.direction = None
self.average_speed = None
self.gust = None
self.temperature = None
self.chill = None
self.battery = None
self.rssi = None
def load_receive(self, data):
"""Load data from a bytearray"""
self.data = data
self.packetlength = data[0]
self.packettype = data[1]
self.subtype = data[2]
self.seqnbr = data[3]
self.id1 = data[4]
self.id2 = data[5]
self.direction = data[6] * 256 + data[7]
self.average_speed = data[8] * 256.0 + data[9] / 10.0
self.gust = data[10] * 256.0 + data[11] / 10.0
self.temperature = (-1 * (data[12] >> 7)) * (
(data[12] & 0x7f) * 256.0 + data[13]) / 10.0
self.chill = (-1 * (data[14] >> 7)) * (
(data[14] & 0x7f) * 256.0 + data[15]) / 10.0
if self.subtype == 0x03:
self.battery = data[16] + 1 * 10
else:
self.rssi_byte = data[16]
self.battery = self.rssi_byte & 0x0f
self.rssi = self.rssi_byte >> 4
self._set_strings()
def _set_strings(self):
"""Translate loaded numeric values into convenience strings"""
self.id_string = "{0:02x}:{1:02x}".format(self.id1, self.id2)
if self.subtype in self.TYPES:
self.type_string = self.TYPES[self.subtype]
else:
#Degrade nicely for yet unknown subtypes
self.type_string = self._UNKNOWN_TYPE.format(self.packettype,
self.subtype)
| gpl-3.0 | -4,641,288,713,968,356,000 | 33.251943 | 79 | 0.499593 | false | 3.802488 | false | false | false |
oknuutti/visnav-py | visnav/iotools/make-const-noise-shapemodel.py | 1 | 2362 | import math
import pickle
import sys
import numpy as np
import numba as nb
from visnav.algo import tools
from visnav.iotools import objloader
from visnav.settings import *
# data/ryugu+tex-d1-80k.obj data/ryugu+tex-d1-16k.obj data/ryugu+tex-d1-16k.nsm
# data/ryugu+tex-d1-80k.obj data/ryugu+tex-d1-4k.obj data/ryugu+tex-d1-4k.nsm
# data/ryugu+tex-d1-80k.obj data/ryugu+tex-d1-1k.obj data/ryugu+tex-d1-1k.nsm
# data/ryugu+tex-d2-80k.obj data/ryugu+tex-d2-16k.obj data/ryugu+tex-d2-16k.nsm
# data/ryugu+tex-d2-80k.obj data/ryugu+tex-d2-4k.obj data/ryugu+tex-d2-4k.nsm
# data/ryugu+tex-d2-80k.obj data/ryugu+tex-d2-1k.obj data/ryugu+tex-d2-1k.nsm
# data/67p+tex-80k.obj data/67p+tex-1k.obj data/67p+tex-1k.nsm
# data/67p+tex-80k.obj data/67p+tex-4k.obj data/67p+tex-4k.nsm
# data/67p+tex-80k.obj data/67p+tex-16k.obj data/67p+tex-16k.nsm
if __name__ == '__main__':
if False:
res = tools.poly_line_intersect(((0, 0, 1), (0, 1, 1), (1, 0, 1)), ((0, 0, 0), (.3, .7, 1)))
print('%s' % res)
quit()
assert len(sys.argv) == 4, 'USAGE: %s [full-res-model] [target-model] [output]' % sys.argv[0]
full_res_model = os.path.join(BASE_DIR, sys.argv[1])
infile = os.path.join(BASE_DIR, sys.argv[2])
outfile = os.path.join(BASE_DIR, sys.argv[3])
sc = 1000 # bennu in meters, ryugu & 67P in km
# load shape models
obj_fr = objloader.ShapeModel(fname=full_res_model)
obj = objloader.ShapeModel(fname=infile)
timer = tools.Stopwatch()
timer.start()
devs = tools.point_cloud_vs_model_err(np.array(obj_fr.vertices), obj)
timer.stop()
# doesnt work: tools.intersections.parallel_diagnostics(level=4)
p50 = np.median(devs)
p68, p95, p99 = np.percentile(np.abs(devs-p50), (68, 95, 99.7))
idxs = np.abs(devs-p50) < p95
clean_devs = devs[idxs]
dev_mean = np.mean(clean_devs)
dev_std = np.std(clean_devs)
print('\n\n(%.2fms/vx) dev mean %.6fm/%.6fm, std %.6fm/%.6fm, 2s %.6fm/%.6fm, 3s %.6fm/%.6fm' % tuple(
map(lambda x: sc*x, (
timer.elapsed/len(obj_fr.vertices),
dev_mean, p50,
dev_std*1, p68,
dev_std*2, p95,
dev_std*3, p99,
))
))
with open(outfile, 'wb') as fh:
pickle.dump((obj.as_dict(), dev_mean), fh, -1)
| mit | 395,161,987,018,459,400 | 34.90625 | 106 | 0.612616 | false | 2.35964 | false | true | false |
heroku/django-heroku-connect | setup.py | 1 | 1282 | #!/usr/bin/env python
import os
import sys
import connect_client
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
packages = [
'connect_client',
'connect_client.tests',
]
requires = [
'shams==0.0.2',
'Django>=1.6',
]
with open('README.md') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
setup(
name='connect_client',
version=connect_client.__version__,
description='Heroku Connect client django app',
long_description=readme,
author='David Gouldin',
author_email='[email protected]',
url='https://github.com/heroku/django-heroku-connect',
packages=packages,
package_data={'': ['LICENSE']},
package_dir={'connect_client': 'connect_client'},
include_package_data=True,
install_requires=requires,
license=license,
zip_safe=False,
classifiers=(
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
),
)
| mit | 4,392,016,882,430,501,400 | 22.309091 | 58 | 0.634945 | false | 3.591036 | false | true | false |
julienledem/arrow | python/pyarrow/parquet.py | 2 | 7044 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import six
from pyarrow._parquet import (ParquetReader, FileMetaData, # noqa
RowGroupMetaData, Schema, ParquetWriter)
import pyarrow._parquet as _parquet # noqa
from pyarrow.table import concat_tables
EXCLUDED_PARQUET_PATHS = {'_metadata', '_common_metadata', '_SUCCESS'}
class ParquetFile(object):
"""
Open a Parquet binary file for reading
Parameters
----------
source : str or pyarrow.io.NativeFile
Readable source. For passing Python file objects or byte buffers,
see pyarrow.io.PythonFileInterface or pyarrow.io.BufferReader.
metadata : ParquetFileMetadata, default None
Use existing metadata object, rather than reading from file.
"""
def __init__(self, source, metadata=None):
self.reader = ParquetReader()
self.reader.open(source, metadata=metadata)
@property
def metadata(self):
return self.reader.metadata
@property
def schema(self):
return self.metadata.schema
def read(self, nrows=None, columns=None, nthreads=1):
"""
Read a Table from Parquet format
Parameters
----------
columns: list
If not None, only these columns will be read from the file.
nthreads : int, default 1
Number of columns to read in parallel. If > 1, requires that the
underlying file source is threadsafe
Returns
-------
pyarrow.table.Table
Content of the file as a table (of columns)
"""
if nrows is not None:
raise NotImplementedError("nrows argument")
if columns is None:
column_indices = None
else:
column_indices = [self.reader.column_name_idx(column)
for column in columns]
return self.reader.read(column_indices=column_indices,
nthreads=nthreads)
def read_table(source, columns=None, nthreads=1, metadata=None):
"""
Read a Table from Parquet format
Parameters
----------
source: str or pyarrow.io.NativeFile
Location of Parquet dataset. If a string passed, can be a single file
name or directory name. For passing Python file objects or byte
buffers, see pyarrow.io.PythonFileInterface or pyarrow.io.BufferReader.
columns: list
If not None, only these columns will be read from the file.
nthreads : int, default 1
Number of columns to read in parallel. Requires that the underlying
file source is threadsafe
metadata : FileMetaData
If separately computed
Returns
-------
pyarrow.Table
Content of the file as a table (of columns)
"""
from pyarrow.filesystem import LocalFilesystem
if isinstance(source, six.string_types):
fs = LocalFilesystem.get_instance()
if fs.isdir(source):
return fs.read_parquet(source, columns=columns,
metadata=metadata)
pf = ParquetFile(source, metadata=metadata)
return pf.read(columns=columns, nthreads=nthreads)
def read_multiple_files(paths, columns=None, filesystem=None, nthreads=1,
metadata=None, schema=None):
"""
Read multiple Parquet files as a single pyarrow.Table
Parameters
----------
paths : List[str]
List of file paths
columns : List[str]
Names of columns to read from the file
filesystem : Filesystem, default None
If nothing passed, paths assumed to be found in the local on-disk
filesystem
nthreads : int, default 1
Number of columns to read in parallel. Requires that the underlying
file source is threadsafe
metadata : pyarrow.parquet.FileMetaData
Use metadata obtained elsewhere to validate file schemas
schema : pyarrow.parquet.Schema
Use schema obtained elsewhere to validate file schemas. Alternative to
metadata parameter
Returns
-------
pyarrow.Table
Content of the file as a table (of columns)
"""
if filesystem is None:
def open_file(path, meta=None):
return ParquetFile(path, metadata=meta)
else:
def open_file(path, meta=None):
return ParquetFile(filesystem.open(path, mode='rb'), metadata=meta)
if len(paths) == 0:
raise ValueError('Must pass at least one file path')
if metadata is None and schema is None:
schema = open_file(paths[0]).schema
elif schema is None:
schema = metadata.schema
# Verify schemas are all equal
all_file_metadata = []
for path in paths:
file_metadata = open_file(path).metadata
if not schema.equals(file_metadata.schema):
raise ValueError('Schema in {0} was different. {1!s} vs {2!s}'
.format(path, file_metadata.schema, schema))
all_file_metadata.append(file_metadata)
# Read the tables
tables = []
for path, path_metadata in zip(paths, all_file_metadata):
reader = open_file(path, meta=path_metadata)
table = reader.read(columns=columns, nthreads=nthreads)
tables.append(table)
all_data = concat_tables(tables)
return all_data
def write_table(table, sink, chunk_size=None, version='1.0',
use_dictionary=True, compression='snappy'):
"""
Write a Table to Parquet format
Parameters
----------
table : pyarrow.Table
sink: string or pyarrow.io.NativeFile
chunk_size : int
The maximum number of rows in each Parquet RowGroup. As a default,
we will write a single RowGroup per file.
version : {"1.0", "2.0"}, default "1.0"
The Parquet format version, defaults to 1.0
use_dictionary : bool or list
Specify if we should use dictionary encoding in general or only for
some columns.
compression : str or dict
Specify the compression codec, either on a general basis or per-column.
"""
writer = ParquetWriter(sink, use_dictionary=use_dictionary,
compression=compression,
version=version)
writer.write_table(table, row_group_size=chunk_size)
| apache-2.0 | 6,071,365,985,521,752,000 | 33.529412 | 79 | 0.644946 | false | 4.397004 | false | false | false |
xyuanmu/XX-Net | python3.8.2/Lib/site-packages/pip/_internal/commands/show.py | 14 | 6273 | from __future__ import absolute_import
import logging
import os
from email.parser import FeedParser
from pip._vendor import pkg_resources
from pip._vendor.packaging.utils import canonicalize_name
from pip._internal.cli.base_command import Command
from pip._internal.cli.status_codes import ERROR, SUCCESS
logger = logging.getLogger(__name__)
class ShowCommand(Command):
"""
Show information about one or more installed packages.
The output is in RFC-compliant mail header format.
"""
name = 'show'
usage = """
%prog [options] <package> ..."""
summary = 'Show information about installed packages.'
ignore_require_venv = True
def __init__(self, *args, **kw):
super(ShowCommand, self).__init__(*args, **kw)
self.cmd_opts.add_option(
'-f', '--files',
dest='files',
action='store_true',
default=False,
help='Show the full list of installed files for each package.')
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
if not args:
logger.warning('ERROR: Please provide a package name or names.')
return ERROR
query = args
results = search_packages_info(query)
if not print_results(
results, list_files=options.files, verbose=options.verbose):
return ERROR
return SUCCESS
def search_packages_info(query):
"""
Gather details from installed distributions. Print distribution name,
version, location, and installed files. Installed files requires a
pip generated 'installed-files.txt' in the distributions '.egg-info'
directory.
"""
installed = {}
for p in pkg_resources.working_set:
installed[canonicalize_name(p.project_name)] = p
query_names = [canonicalize_name(name) for name in query]
for dist in [installed[pkg] for pkg in query_names if pkg in installed]:
package = {
'name': dist.project_name,
'version': dist.version,
'location': dist.location,
'requires': [dep.project_name for dep in dist.requires()],
}
file_list = None
metadata = None
if isinstance(dist, pkg_resources.DistInfoDistribution):
# RECORDs should be part of .dist-info metadatas
if dist.has_metadata('RECORD'):
lines = dist.get_metadata_lines('RECORD')
paths = [l.split(',')[0] for l in lines]
paths = [os.path.join(dist.location, p) for p in paths]
file_list = [os.path.relpath(p, dist.location) for p in paths]
if dist.has_metadata('METADATA'):
metadata = dist.get_metadata('METADATA')
else:
# Otherwise use pip's log for .egg-info's
if dist.has_metadata('installed-files.txt'):
paths = dist.get_metadata_lines('installed-files.txt')
paths = [os.path.join(dist.egg_info, p) for p in paths]
file_list = [os.path.relpath(p, dist.location) for p in paths]
if dist.has_metadata('PKG-INFO'):
metadata = dist.get_metadata('PKG-INFO')
if dist.has_metadata('entry_points.txt'):
entry_points = dist.get_metadata_lines('entry_points.txt')
package['entry_points'] = entry_points
if dist.has_metadata('INSTALLER'):
for line in dist.get_metadata_lines('INSTALLER'):
if line.strip():
package['installer'] = line.strip()
break
# @todo: Should pkg_resources.Distribution have a
# `get_pkg_info` method?
feed_parser = FeedParser()
feed_parser.feed(metadata)
pkg_info_dict = feed_parser.close()
for key in ('metadata-version', 'summary',
'home-page', 'author', 'author-email', 'license'):
package[key] = pkg_info_dict.get(key)
# It looks like FeedParser cannot deal with repeated headers
classifiers = []
for line in metadata.splitlines():
if line.startswith('Classifier: '):
classifiers.append(line[len('Classifier: '):])
package['classifiers'] = classifiers
if file_list:
package['files'] = sorted(file_list)
yield package
def print_results(distributions, list_files=False, verbose=False):
"""
Print the informations from installed distributions found.
"""
results_printed = False
for i, dist in enumerate(distributions):
results_printed = True
if i > 0:
logger.info("---")
name = dist.get('name', '')
required_by = [
pkg.project_name for pkg in pkg_resources.working_set
if name in [required.name for required in pkg.requires()]
]
logger.info("Name: %s", name)
logger.info("Version: %s", dist.get('version', ''))
logger.info("Summary: %s", dist.get('summary', ''))
logger.info("Home-page: %s", dist.get('home-page', ''))
logger.info("Author: %s", dist.get('author', ''))
logger.info("Author-email: %s", dist.get('author-email', ''))
logger.info("License: %s", dist.get('license', ''))
logger.info("Location: %s", dist.get('location', ''))
logger.info("Requires: %s", ', '.join(dist.get('requires', [])))
logger.info("Required-by: %s", ', '.join(required_by))
if verbose:
logger.info("Metadata-Version: %s",
dist.get('metadata-version', ''))
logger.info("Installer: %s", dist.get('installer', ''))
logger.info("Classifiers:")
for classifier in dist.get('classifiers', []):
logger.info(" %s", classifier)
logger.info("Entry-points:")
for entry in dist.get('entry_points', []):
logger.info(" %s", entry.strip())
if list_files:
logger.info("Files:")
for line in dist.get('files', []):
logger.info(" %s", line.strip())
if "files" not in dist:
logger.info("Cannot locate installed-files.txt")
return results_printed
| bsd-2-clause | -1,771,152,429,629,429,200 | 36.339286 | 78 | 0.575801 | false | 4.159814 | false | false | false |
medunigraz/outpost | src/outpost/django/campusonline/migrations/0047_finalthesis.py | 1 | 4551 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-04-25 06:50
from __future__ import unicode_literals
from django.db import migrations
from django.conf import settings
class Migration(migrations.Migration):
ops = [
(
'''
CREATE FOREIGN TABLE "campusonline"."abschlussarbeiten" (
ID numeric,
STUDIENBEZEICHNUNG varchar,
LETZTE_AENDERUNG timestamp,
AUTOR_ID numeric,
VORNAME_AUTOR varchar,
NACHNAME_AUTOR varchar,
AUTOREN varchar,
TITEL varchar,
KURZFASSUNG text,
SEITEN_ANZAHL numeric,
BETREUER_ID numeric,
BETREUER varchar,
ERSCHEINUNGSJAHR numeric,
LINK varchar,
WERK_TYP numeric,
PUBLIZIERT varchar,
ORGANISATION numeric,
ORGANISATIONS_ID numeric
)
SERVER sqlalchemy OPTIONS (
tablename 'F_ABSCHLUSSARBEIT',
db_url '{}'
);
'''.format(settings.MULTICORN.get('campusonline')),
'''
DROP FOREIGN TABLE IF EXISTS "campusonline"."abschlussarbeiten";
''',
),
(
'''
CREATE MATERIALIZED VIEW "public"."campusonline_finalthesis" AS SELECT
aa.ID::integer AS id,
aa.STUDIENBEZEICHNUNG AS study_designation,
aa.LETZTE_AENDERUNG::timestamptz AS modified,
s.STUD_NR::integer AS author_id,
regexp_split_to_array(trim(both ' ' from aa.AUTOREN), ';\s*') AS authors,
aa.TITEL AS title,
aa.KURZFASSUNG AS abstract,
aa.SEITEN_ANZAHL::integer AS pages,
p.PERS_NR::integer AS tutor_id,
aa.ERSCHEINUNGSJAHR::integer AS year,
aa.LINK AS url,
aa.PUBLIZIERT AS category,
o.NR::integer AS organization_id
FROM "campusonline"."abschlussarbeiten" aa
INNER JOIN "campusonline"."stud" s
ON aa.AUTOR_ID::integer = s.STUD_NR::integer
INNER JOIN "campusonline"."personen" p
ON aa.BETREUER_ID::integer = p.PERS_NR::integer
INNER JOIN "campusonline"."organisationen" o
ON aa.ORGANISATIONS_ID::integer = o.NR::integer
WITH DATA;
''',
'''
DROP MATERIALIZED VIEW IF EXISTS "public"."campusonline_finalthesis";
''',
),
(
'''
CREATE INDEX campusonline_finalthesis_id_idx ON "public"."campusonline_finalthesis" ("id");
''',
'''
DROP INDEX IF EXISTS campusonline_finalthesis_id_idx;
''',
),
(
'''
CREATE INDEX campusonline_finalthesis_modified_idx ON "public"."campusonline_finalthesis" ("modified");
''',
'''
DROP INDEX IF EXISTS campusonline_finalthesis_modified_idx;
''',
),
(
'''
CREATE INDEX campusonline_finalthesis_author_id_idx ON "public"."campusonline_finalthesis" ("author_id");
''',
'''
DROP INDEX IF EXISTS campusonline_finalthesis_author_id_idx;
''',
),
(
'''
CREATE INDEX campusonline_finalthesis_tutor_id_idx ON "public"."campusonline_finalthesis" ("tutor_id");
''',
'''
DROP INDEX IF EXISTS campusonline_finalthesis_tutor_id_idx;
''',
),
(
'''
CREATE INDEX campusonline_finalthesis_year_idx ON "public"."campusonline_finalthesis" ("year");
''',
'''
DROP INDEX IF EXISTS campusonline_finalthesis_year_idx;
''',
),
(
'''
CREATE INDEX campusonline_finalthesis_organization_id_idx ON "public"."campusonline_finalthesis" ("organization_id");
''',
'''
DROP INDEX IF EXISTS campusonline_finalthesis_organization_id_idx;
''',
),
]
dependencies = [
('campusonline', '0046_student_username'),
]
operations = [
migrations.RunSQL(
[forward for forward, reverse in ops],
[reverse for forward, reverse in reversed(ops)]
)
]
| bsd-2-clause | 4,544,212,875,813,517,300 | 33.740458 | 129 | 0.504285 | false | 4.202216 | false | false | false |
pypa/warehouse | warehouse/sessions.py | 1 | 12344 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import functools
import time
import msgpack
import msgpack.exceptions
import redis
from pyramid import viewderivers
from pyramid.interfaces import ISession, ISessionFactory
from zope.interface import implementer
import warehouse.utils.otp as otp
import warehouse.utils.webauthn as webauthn
from warehouse.cache.http import add_vary
from warehouse.utils import crypto
from warehouse.utils.msgpack import object_encode
def _invalid_method(method):
@functools.wraps(method)
def wrapped(self, *args, **kwargs):
self._error_message()
return wrapped
@implementer(ISession)
class InvalidSession(dict):
__contains__ = _invalid_method(dict.__contains__)
__delitem__ = _invalid_method(dict.__delitem__)
__getitem__ = _invalid_method(dict.__getitem__)
__iter__ = _invalid_method(dict.__iter__)
__len__ = _invalid_method(dict.__len__)
__setitem__ = _invalid_method(dict.__setitem__)
clear = _invalid_method(dict.clear)
copy = _invalid_method(dict.copy)
fromkeys = _invalid_method(dict.fromkeys)
get = _invalid_method(dict.get)
items = _invalid_method(dict.items)
keys = _invalid_method(dict.keys)
pop = _invalid_method(dict.pop)
popitem = _invalid_method(dict.popitem)
setdefault = _invalid_method(dict.setdefault)
update = _invalid_method(dict.update)
values = _invalid_method(dict.values)
def _error_message(self):
raise RuntimeError(
"Cannot use request.session in a view without uses_session=True."
)
def __getattr__(self, name):
self._error_message()
@property
def created(self):
self._error_message()
def _changed_method(method):
@functools.wraps(method)
def wrapped(self, *args, **kwargs):
self.changed()
return method(self, *args, **kwargs)
return wrapped
@implementer(ISession)
class Session(dict):
time_to_reauth = 30 * 60 # 30 minutes
_csrf_token_key = "_csrf_token"
_flash_key = "_flash_messages"
_totp_secret_key = "_totp_secret"
_webauthn_challenge_key = "_webauthn_challenge"
_reauth_timestamp_key = "_reauth_timestamp"
# A number of our methods need to be decorated so that they also call
# self.changed()
__delitem__ = _changed_method(dict.__delitem__)
__setitem__ = _changed_method(dict.__setitem__)
clear = _changed_method(dict.clear)
pop = _changed_method(dict.pop)
popitem = _changed_method(dict.popitem)
setdefault = _changed_method(dict.setdefault)
update = _changed_method(dict.update)
def __init__(self, data=None, session_id=None, new=True):
# Brand new sessions don't have any data, so we'll just create an empty
# dictionary for them.
if data is None:
data = {}
# Initialize our actual dictionary here.
super().__init__(data)
# We need to track the state of our Session.
self._sid = session_id
self._changed = False
self.new = new
self.created = int(time.time())
# We'll track all of the IDs that have been invalidated here
self.invalidated = set()
@property
def sid(self):
if self._sid is None:
self._sid = crypto.random_token()
return self._sid
def changed(self):
self._changed = True
def invalidate(self):
self.clear()
self.new = True
self.created = int(time.time())
self._changed = False
# If the current session id isn't None we'll want to record it as one
# of the ones that have been invalidated.
if self._sid is not None:
self.invalidated.add(self._sid)
self._sid = None
def should_save(self):
return self._changed
def record_auth_timestamp(self):
self[self._reauth_timestamp_key] = datetime.datetime.now().timestamp()
self.changed()
def needs_reauthentication(self):
reauth_timestamp = self.get(self._reauth_timestamp_key, 0)
current_time = datetime.datetime.now().timestamp()
return current_time - reauth_timestamp >= self.time_to_reauth
# Flash Messages Methods
def _get_flash_queue_key(self, queue):
return ".".join(filter(None, [self._flash_key, queue]))
def flash(self, msg, queue="", allow_duplicate=True):
queue_key = self._get_flash_queue_key(queue)
# If we're not allowing duplicates check if this message is already
# in the queue, and if it is just return immediately.
if not allow_duplicate and msg in self[queue_key]:
return
self.setdefault(queue_key, []).append(msg)
def peek_flash(self, queue=""):
return self.get(self._get_flash_queue_key(queue), [])
def pop_flash(self, queue=""):
queue_key = self._get_flash_queue_key(queue)
messages = self.get(queue_key, [])
self.pop(queue_key, None)
return messages
# CSRF Methods
def new_csrf_token(self):
self[self._csrf_token_key] = crypto.random_token()
return self[self._csrf_token_key]
def get_csrf_token(self):
token = self.get(self._csrf_token_key)
if token is None:
token = self.new_csrf_token()
return token
def get_totp_secret(self):
totp_secret = self.get(self._totp_secret_key)
if totp_secret is None:
totp_secret = self[self._totp_secret_key] = otp.generate_totp_secret()
return totp_secret
def clear_totp_secret(self):
self[self._totp_secret_key] = None
def get_webauthn_challenge(self):
webauthn_challenge = self.get(self._webauthn_challenge_key)
if webauthn_challenge is None:
self[self._webauthn_challenge_key] = webauthn.generate_webauthn_challenge()
webauthn_challenge = self[self._webauthn_challenge_key]
return webauthn_challenge
def clear_webauthn_challenge(self):
self[self._webauthn_challenge_key] = None
@implementer(ISessionFactory)
class SessionFactory:
cookie_name = "session_id"
max_age = 12 * 60 * 60 # 12 hours
def __init__(self, secret, url):
self.redis = redis.StrictRedis.from_url(url)
self.signer = crypto.TimestampSigner(secret, salt="session")
def __call__(self, request):
return self._process_request(request)
def _redis_key(self, session_id):
return "warehouse/session/data/{}".format(session_id)
def _process_request(self, request):
# Register a callback with the request so we can save the session once
# it's finished.
request.add_response_callback(self._process_response)
# Load our session ID from the request.
session_id = request.cookies.get(self.cookie_name)
# If we do not have a session ID then we'll just use a new empty
# session.
if session_id is None:
return Session()
# Check to make sure we have a valid session id
try:
session_id = self.signer.unsign(session_id, max_age=self.max_age)
session_id = session_id.decode("utf8")
except crypto.BadSignature:
return Session()
# Fetch the serialized data from redis
bdata = self.redis.get(self._redis_key(session_id))
# If the session didn't exist in redis, we'll give the user a new
# session.
if bdata is None:
return Session()
# De-serialize our session data
try:
data = msgpack.unpackb(bdata, raw=False, use_list=True)
except (msgpack.exceptions.UnpackException, msgpack.exceptions.ExtraData):
# If the session data was invalid we'll give the user a new session
return Session()
# If we were able to load existing session data, load it into a
# Session class
session = Session(data, session_id, False)
return session
def _process_response(self, request, response):
# If the request has an InvalidSession, then the view can't have
# accessed the session, and we can just skip all of this anyways.
if isinstance(request.session, InvalidSession):
return
# Check to see if the session has been marked to be deleted, if it has
# benn then we'll delete it, and tell our response to delete the
# session cookie as well.
if request.session.invalidated:
for session_id in request.session.invalidated:
self.redis.delete(self._redis_key(session_id))
if not request.session.should_save():
response.delete_cookie(self.cookie_name)
# Check to see if the session has been marked to be saved, generally
# this means that the session data has been modified and thus we need
# to store the new data.
if request.session.should_save():
# Save our session in Redis
self.redis.setex(
self._redis_key(request.session.sid),
self.max_age,
msgpack.packb(
request.session, default=object_encode, use_bin_type=True
),
)
# Send our session cookie to the client
response.set_cookie(
self.cookie_name,
self.signer.sign(request.session.sid.encode("utf8")),
max_age=self.max_age,
httponly=True,
secure=request.scheme == "https",
samesite=b"lax",
)
def session_view(view, info):
if info.options.get("uses_session"):
# If we're using the session, then we'll just return the original view
# with a small wrapper around it to ensure that it has a Vary: Cookie
# header.
return add_vary("Cookie")(view)
elif info.exception_only:
return view
else:
# If we're not using the session on this view, then we'll wrap the view
# with a wrapper that just ensures that the session cannot be used.
@functools.wraps(view)
def wrapped(context, request):
# This whole method is a little bit of an odd duck, we want to make
# sure that we don't actually *access* request.session, because
# doing so triggers the machinery to create a new session. So
# instead we will dig into the request object __dict__ to
# effectively do the same thing, just without triggering an access
# on request.session.
# Save the original session so that we can restore it once the
# inner views have been called.
nothing = object()
original_session = request.__dict__.get("session", nothing)
# This particular view hasn't been set to allow access to the
# session, so we'll just assign an InvalidSession to
# request.session
request.__dict__["session"] = InvalidSession()
try:
# Invoke the real view
return view(context, request)
finally:
# Restore the original session so that things like
# pyramid_debugtoolbar can access it.
if original_session is nothing:
del request.__dict__["session"]
else:
request.__dict__["session"] = original_session
return wrapped
session_view.options = {"uses_session"}
def includeme(config):
config.set_session_factory(
SessionFactory(
config.registry.settings["sessions.secret"],
config.registry.settings["sessions.url"],
)
)
config.add_view_deriver(session_view, over="csrf_view", under=viewderivers.INGRESS)
| apache-2.0 | -3,302,074,025,038,041,600 | 33.099448 | 87 | 0.623218 | false | 4.060526 | false | false | false |
openqt/algorithms | leetcode/python/ac/lc002-add-two-numbers.py | 1 | 1931 | # coding=utf-8
import unittest
"""2. Add Two Numbers
https://leetcode.com/problems/add-two-numbers/description/
You are given two **non-empty** linked lists representing two non-negative
integers. The digits are stored in **reverse order** and each of their nodes
contain a single digit. Add the two numbers and return it as a linked list.
You may assume the two numbers do not contain any leading zero, except the
number 0 itself.
**Example**
**Input:** (2 -> 4 -> 3) + (5 -> 6 -> 4)
**Output:** 7 -> 0 -> 8
**Explanation:** 342 + 465 = 807.
Similar Questions:
Multiply Strings (multiply-strings)
Add Binary (add-binary)
Sum of Two Integers (sum-of-two-integers)
Add Strings (add-strings)
Add Two Numbers II (add-two-numbers-ii)
"""
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
from link import ListNode, to_node, compare
class Solution(unittest.TestCase):
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
def toint(node):
val, p, level = 0, node, 1
while p:
val += p.val * level
level *= 10
p = p.next
return val
def tolist(n):
head = ListNode(0)
p = head
while n > 0:
p.next = ListNode(n % 10)
p = p.next
n /= 10
return head.next or head
return tolist(toint(l1) + toint(l2))
def test(self):
self.assertTrue(compare(
self.addTwoNumbers(to_node([0]), to_node([0])), to_node([0])))
self.assertTrue(compare(
self.addTwoNumbers(to_node([2, 4, 3]), to_node([5, 6, 4])),
to_node([7, 0, 8])))
if __name__ == "__main__":
unittest.main()
| gpl-3.0 | -17,463,024,899,245,644 | 24.746667 | 76 | 0.553081 | false | 3.517304 | false | false | false |
TDaglis/argus | argus/argus/handler.py | 1 | 6968 | """
Argus websocket handler and event handler
"""
import os
from re import sub
from json import dumps
from tornado import ioloop
from tornado import websocket
from watchdog.events import RegexMatchingEventHandler
from watchdog.observers import Observer
from settings import ARGUS_ROOT
active_handlers = {}
active_observers = {}
def define_options(enable=[], disable=[]):
"""
Define the options for the subscribed events.
Valid options:
- CRfile: file created
- CRdir: directory created
- MDfile: file modified
- MDdir: directory modified
- MVfile: file moved
- MVdir: directory moved
- DLfile: file deleted
- DLdir: directory deleted
- all: for disable only. Disables all options.
By default all options are enabled.
If all options are disabled, 'enable' options are applied.
If all options are not disabled, 'disable' options are disabled.
"""
default_options = [
'CRfile', 'CRdir', 'MDfile', 'MDdir',
'MVfile', 'MVdir', 'DLfile', 'DLdir'
]
if disable == enable == []:
return default_options
elif 'all' in disable:
return list(set(enable) & set(default_options))
else:
return list(set(default_options) - set(disable))
class Argus(RegexMatchingEventHandler):
def __init__(self, web_socket, root, options, *args, **kwargs):
super(Argus, self).__init__(*args, **kwargs)
self.websockets = [web_socket]
self.root = root
self.options = options
def write_msg(self, message):
for wbsocket in self.websockets:
wbsocket.write_message(message)
def on_created(self, event):
is_directory = event.is_directory
if (
(is_directory and 'CRdir' in self.options) or
(not is_directory and 'CRfile' in self.options)
):
self.write_msg(
dumps(
{
'event_type': 'created',
'is_directory': event.is_directory,
'src_path': sub(self.root, '', event.src_path)
}
)
)
def on_modified(self, event):
is_directory = event.is_directory
if (
(is_directory and 'MDdir' in self.options) or
(not is_directory and 'MDfile' in self.options)
):
self.write_msg(
dumps(
{
'event_type': 'modified',
'is_directory': event.is_directory,
'src_path': sub(self.root, '', event.src_path)
}
)
)
def on_deleted(self, event):
is_directory = event.is_directory
if (
(is_directory and 'DLdir' in self.options) or
(not is_directory and 'DLfile' in self.options)
):
self.write_msg(
dumps(
{
'event_type': 'deleted',
'is_directory': event.is_directory,
'src_path': sub(self.root, '', event.src_path)
}
)
)
def on_moved(self, event):
is_directory = event.is_directory
if (
(is_directory and 'MVdir' in self.options) or
(not is_directory and 'MVfile' in self.options)
):
self.write_msg(
dumps(
{
'event_type': 'moved',
'is_directory': event.is_directory,
'src_path': sub(self.root, '', event.src_path),
'dest_path': sub(self.root, '', event.dest_path)
}
)
)
def add_socket(self, wbsocket):
self.websockets.append(wbsocket)
def remove_socket(self, wbsocket):
if wbsocket in self.websockets:
self.websockets.remove(wbsocket)
class ArgusWebSocketHandler(websocket.WebSocketHandler):
def __init__(self, *args, **kwargs):
super(ArgusWebSocketHandler, self).__init__(*args, **kwargs)
self.started_observer = False
self.observer = None
self.path = None
self.args = []
self.kwargs = {}
def check_origin(self, origin):
return True
def initiation_handler(self):
"""
Observers are unique per watched path.
If an observer already exists for the requested path,
the new web socket is added in the observer's sockets via the
handler.
In order to achieve this, both the handler and the observer objects
are stored in a global dict.
"""
self.path = os.path.join(ARGUS_ROOT, self.kwargs.get('path'))
if not os.path.exists(self.path):
self.write_message('Path does not exist.')
self.close()
return
if self.path in active_observers:
event_handler = active_handlers[self.path]
event_handler.add_socket(self)
self.observer = active_observers[self.path]
self.started_observer = True
else:
enable = self.get_arguments('enable', strip=True)
disable = self.get_arguments('disable', strip=True)
options = define_options(enable, disable)
if options == []:
return
event_handler = Argus(
web_socket=self, root=self.path, options=options,
case_sensitive=True
)
self.observer = Observer()
self.observer.schedule(
event_handler, path=self.path, recursive=True
)
print '- Starting fs observer for path {}'.format(self.path)
try:
self.observer.start()
except OSError:
self.write_message('Cannot start observer')
self.close()
return
active_handlers[self.path] = event_handler
active_observers[self.path] = self.observer
self.started_observer = True
def open(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.callback = ioloop.PeriodicCallback(lambda: self.ping(''), 60000)
self.callback.start()
self.initiation_handler()
def on_message(self, message):
pass
def data_received(self, chunk):
pass
def on_close(self):
self.callback.stop()
if self.started_observer:
event_handler = active_handlers[self.path]
event_handler.remove_socket(self)
if event_handler.websockets == []:
print '- Stopping fs observer'
self.observer.stop()
del active_observers[self.path]
del active_handlers[self.path]
| mit | 788,149,961,402,902,500 | 32.180952 | 77 | 0.532577 | false | 4.368652 | false | false | false |
lpechacek/cpuset | cpuset/commands/set.py | 1 | 18675 | """Cpuset manipulation command
"""
from __future__ import unicode_literals
from builtins import str
__copyright__ = """
Copyright (C) 2007-2010 Novell Inc.
Copyright (C) 2013-2018 SUSE
Author: Alex Tsariounov <[email protected]>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import sys, os, logging, time
from optparse import OptionParser, make_option
from cpuset import config
from cpuset import cset
from cpuset.util import *
from cpuset.commands.common import *
try: from cpuset.commands import proc
except SyntaxError:
raise
except:
pass
global log
log = logging.getLogger('set')
help = 'create, modify and destroy cpusets'
usage = """%prog [options] [cpuset name]
This command is used to create, modify, and destroy cpusets.
Cpusets form a tree-like structure rooted at the root cpuset
which always includes all system CPUs and all system memory
nodes.
A cpuset is an organizational unit that defines a group of CPUs
and a group of memory nodes where a process or thread (i.e. task)
is allowed to run on. For non-NUMA machines, the memory node is
always 0 (zero) and cannot be set to anything else. For NUMA
machines, the memory node can be set to a similar specification
as the CPU definition and will tie those memory nodes to that
cpuset. You will usually want the memory nodes that belong to
the CPUs defined to be in the same cpuset.
A cpuset can have exclusive right to the CPUs defined in it.
This means that only this cpuset can own these CPUs. Similarly,
a cpuset can have exclusive right to the memory nodes defined in
it. This means that only this cpuset can own these memory
nodes.
Cpusets can be specified by name or by path; however, care
should be taken when specifying by name if the name is not
unique. This tool will generally not let you do destructive
things to non-unique cpuset names.
Cpusets are uniquely specified by path. The path starts at where
the cpusets filesystem is mounted so you generally do not have to
know where that is. For example, so specify a cpuset that is
called "two" which is a subset of "one" which in turn is a subset
of the root cpuset, use the path "/one/two" regardless of where
the cpusets filesystem is mounted.
When specifying CPUs, a so-called CPUSPEC is used. The CPUSPEC
will accept a comma-separated list of CPUs and inclusive range
specifications. For example, --cpu=1,3,5-7 will assign CPU1,
CPU3, CPU5, CPU6, and CPU7 to the specified cpuset.
Note that cpusets follow certain rules. For example, children
can only include CPUs that the parents already have. If you do
not follow those rules, the kernel cpuset subsystem will not let
you create that cpuset. For example, if you create a cpuset that
contains CPU3, and then attempt to create a child of that cpuset
with a CPU other than 3, you will get an error, and the cpuset
will not be active. The error is somewhat cryptic in that it is
usually a "Permission denied" error.
Memory nodes are specified with a MEMSPEC in a similar way to
the CPUSPEC. For example, --mem=1,3-6 will assign MEM1, MEM3,
MEM4, MEM5, and MEM6 to the specified cpuset.
Note that if you attempt to create or modify a cpuset with a
memory node specification that is not valid, you may get a
cryptic error message, "No space left on device", and the
modification will not be allowed.
When you destroy a cpuset, then the tasks running in that set are
moved to the parent of that cpuset. If this is not what you
want, then manually move those tasks to the cpuset of your choice
with the 'cset proc' command (see 'cset proc --help' for more
information).
EXAMPLES
Create a cpuset with the default memory specification:
# cset set --cpu=2,4,6-8 --set=new_set
This command creates a cpuset called "new_set" located
off the root cpuset which holds CPUS 2,4,6,7,8 and node 0
(interleaved) memory. Note that --set is optional, and
you can just specify the name for the new cpuset after
all arguments.
Create a cpuset that specifies both CPUs and memory nodes:
# cset set --cpu=3 --mem=3 /rad/set_one
Note that this command uses the full path method to
specify the name of the new cpuset "/rad/set_one". It
also names the new cpuset implicitly (i.e. no --set
option, although you can use that if you want to). If
the "set_one" name is unique, you can subsequently refer
to is just by that. Memory node 3 is assigned to this
cpuset as well as CPU 3.
The above commands will create the new cpusets, or if they
already exist, they will modify them to the new specifications."""
verbose = 0
options = [make_option('-l', '--list',
help = 'list the named cpuset(s); recursive list if also -r',
action = 'store_true'),
make_option('-c', '--cpu',
help = 'create or modify cpuset in the specified '
'cpuset with CPUSPEC specification',
metavar = 'CPUSPEC'),
make_option('-m', '--mem',
help = 'specify which memory nodes to assign '
'to the created or modified cpuset (optional)',
metavar = 'MEMSPEC'),
make_option('-n', '--newname',
help = 'rename cpuset specified with --set to NEWNAME'),
make_option('-d', '--destroy',
help = 'destroy specified cpuset',
action = 'store_true'),
make_option('-s', '--set',
metavar = 'CPUSET',
help = 'specify cpuset'),
make_option('-r', '--recurse',
help = 'do things recursively, use with --list and --destroy',
action = 'store_true'),
make_option('--force',
help = 'force recursive deletion even if processes are running '
'in those cpusets (they will be moved to parent cpusets)',
action = 'store_true'),
make_option('-x', '--usehex',
help = 'use hexadecimal value for CPUSPEC and MEMSPEC when '
'listing cpusets',
action = 'store_true'),
make_option('-v', '--verbose',
help = 'prints more detailed output, additive',
action = 'count'),
make_option('--cpu_exclusive',
help = 'mark this cpuset as owning its CPUs exclusively',
action = 'store_true'),
make_option('--mem_exclusive',
help = 'mark this cpuset as owning its MEMs exclusively',
action = 'store_true'),
]
def func(parser, options, args):
log.debug("entering func, options=%s, args=%s", options, args)
global verbose
if options.verbose: verbose = options.verbose
cset.rescan()
if options.list:
if options.set:
list_sets(options.set, options.recurse, options.usehex)
return
if len(args): list_sets(args, options.recurse, options.usehex)
else: list_sets('root', options.recurse, options.usehex)
return
if options.cpu or options.mem:
# create or modify cpuset
create_from_options(options, args)
return
if options.newname:
rename_set(options, args)
return
if options.destroy:
if options.set: destroy_sets(options.set, options.recurse, options.force)
else: destroy_sets(args, options.recurse, options.force)
return
if options.cpu_exclusive or options.mem_exclusive:
# FIXME: modification of existing cpusets for exclusivity
log.info("Modification of cpu_exclusive and mem_exclusive not implemented.")
return
# default behavior if no options specified is list
log.debug('no options set, default is listing cpusets')
if options.set: list_sets(options.set, options.recurse, options.usehex)
elif len(args): list_sets(args, options.recurse, options.usehex)
else: list_sets('root', options.recurse, options.usehex)
def list_sets(tset, recurse=None, usehex=False):
"""list cpusets specified in tset as cpuset or list of cpusets, recurse if true"""
log.debug('entering list_sets, tset=%s recurse=%s', tset, recurse)
sl = []
if isinstance(tset, list):
for s in tset: sl.extend(cset.find_sets(s))
else:
sl.extend(cset.find_sets(tset))
log.debug('total unique sets in passed tset: %d', len(sl))
sl2 = []
for s in sl:
sl2.append(s)
if len(s.subsets) > 0:
sl2.extend(s.subsets)
if recurse:
for node in s.subsets:
for nd in cset.walk_set(node):
sl2.append(nd)
sl = sl2
if config.mread:
pl = ['cpuset_list_start']
else:
pl = ['']
pl.extend(set_header(' '))
for s in sl:
if verbose:
pl.append(set_details(s,' ', None, usehex))
else:
pl.append(set_details(s,' ', 78, usehex))
if config.mread:
pl.append('cpuset_list_end')
log.info("\n".join(pl))
def destroy_sets(sets, recurse=False, force=False):
"""destroy cpusets in list of sets, recurse if true, if force destroy if tasks running"""
log.debug('enter destroy_sets, sets=%s, force=%s', sets, force)
nl = []
if isinstance(sets, list):
nl.extend(sets)
else:
nl.append(sets)
# check that sets passed are ok, will raise if one is bad
sl2 = []
for s in nl:
st = cset.unique_set(s)
sl2.append(st)
if len(st.subsets) > 0:
if not recurse:
raise CpusetException(
'cpuset "%s" has subsets, delete them first, or use --recurse'
% st.path)
elif not force:
raise CpusetException(
'cpuset "%s" has subsets, use --force to destroy'
% st.path)
sl2.extend(st.subsets)
for node in st.subsets:
for nd in cset.walk_set(node):
sl2.append(nd)
# ok, good to go
if recurse: sl2.reverse()
for s in sl2:
s = cset.unique_set(s)
# skip the root set!!! or you'll have problems...
if s.path == '/': continue
log.info('--> processing cpuset "%s", moving %s tasks to parent "%s"...',
s.name, len(s.tasks), s.parent.path)
proc.move(s, s.parent)
log.info('--> deleting cpuset "%s"', s.path)
destroy(s)
log.info('done')
def destroy(name):
"""destroy one cpuset by name as cset or string"""
log.debug('entering destroy, name=%s', name)
if isstr(name):
set = cset.unique_set(name)
elif not isinstance(name, cset.CpuSet):
raise CpusetException(
"passed name=%s, which is not a string or CpuSet" % name)
else:
set = name
tsks = set.tasks
if len(tsks) > 0:
# wait for tasks, sometimes it takes a little while to
# have them leave the set
ii = 0
while len(tsks)>0:
log.debug('%i tasks still running in set %s, waiting interval %s...',
len(tsks), set.name, ii+1)
time.sleep(0.5)
tsks = set.tasks
ii += 1
if (ii) > 6:
# try it for 3 seconds, bail if tasks still there
raise CpusetException(
"trying to destroy cpuset %s with tasks running: %s" %
(set.path, set.tasks))
log.debug("tasks expired, deleting set %s" % set.path)
os.rmdir(cset.CpuSet.basepath+set.path)
# fixme: perhaps reparsing the all the sets is not so efficient...
cset.rescan()
def rename_set(options, args):
"""rename cpuset as specified in options and args lists"""
log.debug('entering rename_set, options=%s args=%s', options, args)
# figure out target cpuset name, if --set not used, use first arg
name = options.newname
if options.set:
tset = cset.unique_set(options.set)
elif len(args) > 0:
tset = cset.unique_set(args[0])
else:
raise CpusetException('desired cpuset not specified')
path = tset.path[0:tset.path.rfind('/')+1]
log.debug('target set="%s", path="%s", name="%s"', tset.path, path, name)
try:
if name.find('/') == -1:
chk = cset.unique_set(path+name)
else:
if name[0:name.rfind('/')+1] != path:
raise CpusetException('desired name cannot have different path')
chk = cset.unique_set(name)
raise CpusetException('cpuset "'+chk.path+'" already exists')
except CpusetNotFound:
pass
except:
raise
if name.rfind('/') != -1:
name = name[name.rfind('/')+1:]
log.info('--> renaming "%s" to "%s"', cset.CpuSet.basepath+tset.path, name)
os.rename(cset.CpuSet.basepath+tset.path, cset.CpuSet.basepath+path+name)
cset.rescan()
def create_from_options(options, args):
"""create cpuset as specified by options and args lists"""
log.debug('entering create_from_options, options=%s args=%s', options, args)
# figure out target cpuset name, if --set not used, use first arg
if options.set:
tset = options.set
elif len(args) > 0:
tset = args[0]
else:
raise CpusetException('cpuset not specified')
cspec = None
mspec = None
cx = None
mx = None
if options.cpu:
cset.cpuspec_check(options.cpu)
cspec = options.cpu
if options.mem:
cset.memspec_check(options.mem)
mspec = options.mem
if options.cpu_exclusive: cx = options.cpu_exclusive
if options.mem_exclusive: mx = options.mem_exclusive
try:
create(tset, cspec, mspec, cx, mx)
if not mspec: modify(tset, memspec='0') # always need at least this
log.info('--> created cpuset "%s"', tset)
except CpusetExists:
modify(tset, cspec, mspec, cx, mx)
log.info('--> modified cpuset "%s"', tset)
active(tset)
def create(name, cpuspec, memspec, cx, mx):
"""create one cpuset by name, cpuspec, memspec, cpu and mem exclusive flags"""
log.debug('entering create, name=%s cpuspec=%s memspec=%s cx=%s mx=%s',
name, cpuspec, memspec, cx, mx)
try:
cset.unique_set(name)
except CpusetNotFound:
pass
except:
raise CpusetException('cpuset "%s" not unique, please specify by path' % name)
else:
raise CpusetExists('attempt to create already existing set: "%s"' % name)
# FIXME: check if name is a path here
os.mkdir(cset.CpuSet.basepath+'/'+name)
# fixme: perhaps reparsing the all the sets is not so efficient...
cset.rescan()
log.debug('created new cpuset "%s"', name)
modify(name, cpuspec, memspec, cx, mx)
def modify(name, cpuspec=None, memspec=None, cx=None, mx=None):
"""modify one cpuset by name, cpuspec, memspec, cpu and mem exclusive flags"""
log.debug('entering modify, name=%s cpuspec=%s memspec=%s cx=%s mx=%s',
name, cpuspec, memspec, cx, mx)
if isstr(name):
nset = cset.unique_set(name)
elif not isinstance(name, cset.CpuSet):
raise CpusetException(
"passed name=%s, which is not a string or CpuSet" % name)
else:
nset = name
log.debug('modifying cpuset "%s"', nset.name)
if cpuspec: nset.cpus = cpuspec
if memspec: nset.mems = memspec
if cx: nset.cpu_exclusive = cx
if mx: nset.mem_exclusive = mx
def active(name):
"""check that cpuset by name or cset is ready to be used"""
log.debug("entering active, name=%s", name)
if isstr(name):
set = cset.unique_set(name)
elif not isinstance(name, cset.CpuSet):
raise CpusetException("passing bogus name=%s" % name)
else:
set = name
if set.cpus == '':
raise CpusetException('"%s" cpuset not active, no cpus defined' % set.path)
if set.mems == '':
raise CpusetException('"%s" cpuset not active, no mems defined' % set.path)
def set_header(indent=None):
"""return list of cpuset output header"""
if indent: istr = indent
else: istr = ''
l = []
# '123456789-123456789-123456789-123456789-123456789-123456789-'
l.append(istr + ' Name CPUs-X MEMs-X Tasks Subs Path')
l.append(istr + '------------ ---------- - ------- - ----- ---- ----------')
return l
def set_details(name, indent=None, width=None, usehex=False):
"""return string of cpuset details"""
if width == None: width = 0
if isstr(name):
set = cset.unique_set(name)
elif not isinstance(name, cset.CpuSet):
raise CpusetException("passing bogus set=%s" % name)
else:
set = name
l = []
l.append(set.name.rjust(12))
cs = set.cpus
if cs == '': cs = '*****'
elif usehex: cs = cset.cpuspec_to_hex(cs)
l.append(cs.rjust(10))
if set.cpu_exclusive:
l.append('y')
else:
l.append('n')
cs = set.mems
if cs == '': cs = '*****'
elif usehex: cs = cset.cpuspec_to_hex(cs)
l.append(cs.rjust(7))
if set.mem_exclusive:
l.append('y')
else:
l.append('n')
l.append(str(len(set.tasks)).rjust(5))
l.append(str(len(set.subsets)).rjust(4))
if config.mread:
l.append(set.path)
l2 = []
for line in l:
l2.append(line.strip())
return ';'.join(l2)
out = ' '.join(l) + ' '
tst = out + set.path
if width != 0 and len(tst) > width:
target = width - len(out)
patha = set.path[:len(set.path)//2-3]
pathb = set.path[len(set.path)//2:]
patha = patha[:target//2-3]
pathb = pathb[-target//2:]
out += patha + '...' + pathb
else:
out = tst
if indent: istr = indent
else: istr = ''
return istr + out
| gpl-2.0 | -4,110,054,007,725,781,500 | 36.65121 | 93 | 0.610977 | false | 3.740236 | false | false | false |
neldom/qessera | careers/models.py | 1 | 2634 | from __future__ import unicode_literals
from django.utils.encoding import python_2_unicode_compatible
from django.conf import settings
from django.db import models
from django.core.urlresolvers import reverse
from django.db.models.signals import pre_save
from django.utils import timezone
from django.utils.text import slugify
class CareerManager(models.Manager):
def active(self, *args, **kwargs):
return super(CareerManager, self).filter(draft = False).filter(published_at__lte = timezone.now())
@python_2_unicode_compatible
class Career(models.Model):
FULLTIME = 'Full-time'
PARTTIME = 'Part-time'
INTERNSHIP = 'Internship'
RESEARCH = 'Research'
ROLE_CATEGORY_CHOICES = (
(FULLTIME, 'Full-time'),
(PARTTIME, 'Part-time'),
(INTERNSHIP, 'Internship'),
(RESEARCH, 'Research'),
)
role_category = models.CharField(
max_length=12,
choices=ROLE_CATEGORY_CHOICES,
default=FULLTIME,
)
# Role
role = models.CharField(max_length = 120)
# Location
city = models.CharField(max_length=255)
# Plain text and urlify slug
career_slug = models.SlugField(unique = True)
career_offer_title = models.CharField(max_length=255, default="")
career_offer_description = models.TextField(default="")
career_experience = models.TextField(default="")
career_terms = models.TextField(default="")
# Time and meta staff
draft = models.BooleanField(default = False)
published_at = models.DateField(auto_now = False, auto_now_add = False)
updated = models.DateTimeField(auto_now = True, auto_now_add = False)
timestamp = models.DateTimeField(auto_now = False, auto_now_add = True)
objects = CareerManager()
def __unicode__(self):
return self.role
def __str__(self):
return self.role
def get_absolute_url(self):
return reverse('careers:detail', kwargs = {'slug':self.career_slug})
class Meta:
ordering = ["-timestamp", "-updated"]
def create_slug(instance, new_slug = None):
career_slug = slugify(instance.title)
if new_slug is not None:
career_slug = new_slug
qs = Career.objects.filter(career_slug = career_slug).order_by("-id")
exists = qs.exists()
if exists:
new_slug = "%s-%s" %(career_slug, qs.first().id)
return create_slug(instance, slug = new_slug)
return career_slug
def pre_save_post_receiver(sender, instance, *args, **kwargs):
if not instance.career_slug:
instance.career_slug = create_slug(instance)
pre_save.connect(pre_save_post_receiver, sender = Career)
| mit | 6,271,758,748,411,158,000 | 29.275862 | 106 | 0.669324 | false | 3.549865 | false | false | false |
Grumbel/dirtool | dirtools/fileview/sorter.py | 1 | 1839 | # dirtool.py - diff tool for directories
# Copyright (C) 2018 Ingo Ruhnke <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from typing import TYPE_CHECKING, Callable, Any
from dirtools.util import numeric_sort_key
from dirtools.fileview.file_info import FileInfo
if TYPE_CHECKING:
from dirtools.fileview.file_collection import FileCollection # noqa: F401
class Sorter:
def __init__(self) -> None:
self.directories_first = True
self.reverse = False
self.key_func: Callable[[FileInfo], Any] = lambda x: numeric_sort_key(x.basename().lower())
def set_directories_first(self, v: bool) -> None:
self.directories_first = v
def set_sort_reversed(self, rev: bool) -> None:
self.reverse = rev
def set_key_func(self, key_func: Callable[[FileInfo], Any]) -> None:
self.key_func = key_func
def get_key_func(self) -> Callable[[FileInfo], Any]:
if self.directories_first:
return lambda fileinfo: (not fileinfo.isdir(), self.key_func(fileinfo))
else:
return self.key_func
# def apply(self, file_collection: 'FileCollection') -> None:
# file_collection.sort(self.get_key_func(), reverse=self.reverse)
# EOF #
| gpl-3.0 | 2,956,501,709,050,638,300 | 33.698113 | 99 | 0.695487 | false | 3.692771 | false | false | false |
ktan2020/legacy-automation | win/Lib/site-packages/junitxml/tests/test_runner.py | 1 | 3623 | """Test XmlTestRunner functionality for junitxml.
:Author: Duncan Findlay <[email protected]>
"""
import xml.dom.minidom
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
try:
import unittest2 as unittest
except ImportError:
import unittest
import junitxml.runner
# Old versions of unittest don't have these "fancy" types of results.
_FANCY_UNITTEST = (hasattr(unittest, 'skip') and
hasattr(unittest, 'expectedFailure'))
class TestXMLTestRunner(unittest.TestCase):
class DummyTestCase(unittest.TestCase):
def test_pass(self):
pass
def test_fail(self):
self.fail()
def test_error(self):
raise Exception()
if _FANCY_UNITTEST:
@unittest.skip('skipped')
def test_skip(self):
pass
@unittest.expectedFailure
def test_xfail(self):
self.fail('all is good')
@unittest.expectedFailure
def test_unexpected_success(self):
pass
def _run_runner(self, test_suite):
xml_out = StringIO()
console = StringIO()
runner = junitxml.runner.JUnitXmlTestRunner(
xml_stream=xml_out, txt_stream=console)
result = runner.run(test_suite)
return (result, xml_out, console)
def test_xml_output(self):
"""Tests that runner properly gives XML output."""
test_suite = unittest.TestLoader().loadTestsFromTestCase(
self.DummyTestCase)
result, xml_out, console = self._run_runner(test_suite)
num_tests = test_suite.countTestCases()
# Make sure the XML output looks correct.
value = xml_out.getvalue()
document = xml.dom.minidom.parseString(value)
self.assertEqual(document.documentElement.tagName, 'testsuite')
self.assertEqual(document.documentElement.getAttribute('tests'),
str(num_tests))
def test_console_output_fail(self):
"""Tests that failure is reported properly on stderr."""
test_suite = unittest.TestLoader().loadTestsFromTestCase(
self.DummyTestCase)
result, xml_out, console = self._run_runner(test_suite)
num_tests = test_suite.countTestCases()
# Make sure the console output looks correct.
value = console.getvalue()
self.assertTrue('Ran %d tests in ' % (num_tests,) in value,
'Output was:\n%s' % (value,))
self.assertTrue('FAILED (failures=1' in value,
'Output was:\n%s' % (value,))
self.assertTrue('errors=1' in value,
'Output was:\n%s' % (value,))
if _FANCY_UNITTEST:
self.assertTrue('expected failures=1' in value,
'Output was:\n%s' % (value,))
self.assertTrue('skipped=1' in value,
'Output was:\n%s' % (value,))
self.assertTrue('unexpected successes=1' in value,
'Output was:\n%s' % (value,))
def test_console_output_ok(self):
"""Tests that success is reported properly on stderr."""
test_suite = unittest.TestSuite()
test_suite.addTest(self.DummyTestCase('test_pass'))
result, xml_out, console = self._run_runner(test_suite)
value = console.getvalue()
self.assertTrue('Ran 1 test in ' in value,
'Output was:\n%s' % (value,))
self.assertTrue('OK\n' in value,
'Output was:\n%s' % (value,))
| mit | 1,243,737,621,717,691,600 | 30.504348 | 72 | 0.581562 | false | 4.282506 | true | false | false |
nvarini/espresso_iohpc | test-suite/testcode/lib/testcode2/__init__.py | 5 | 29590 | '''
testcode2
---------
A framework for regression testing numerical programs.
:copyright: (c) 2012 James Spencer.
:license: modified BSD; see LICENSE for more details.
'''
import glob
import os
import pipes
import shutil
import subprocess
import sys
try:
import yaml
_HAVE_YAML = True
except ImportError:
_HAVE_YAML = False
import testcode2.dir_lock as dir_lock
import testcode2.exceptions as exceptions
import testcode2.queues as queues
import testcode2.compatibility as compat
import testcode2.util as util
import testcode2.validation as validation
DIR_LOCK = dir_lock.DirLock()
# Do not change! Bad things will happen...
_FILESTEM_TUPLE = (
('test', 'test.out'),
('error', 'test.err'),
('benchmark', 'benchmark.out'),
)
_FILESTEM_DICT = dict( _FILESTEM_TUPLE )
# We can change FILESTEM if needed.
# However, this should only be done to compare two sets of test output or two
# sets of benchmarks.
# Bad things will happen if tests are run without the default FILESTEM!
FILESTEM = dict( _FILESTEM_TUPLE )
class TestProgram:
'''Store and access information about the program being tested.'''
def __init__(self, name, exe, test_id, benchmark, **kwargs):
# Set sane defaults (mostly null) for keyword arguments.
self.name = name
# Running
self.exe = exe
self.test_id = test_id
self.run_cmd_template = ('tc.program tc.args tc.input > '
'tc.output 2> tc.error')
self.launch_parallel = 'mpirun -np tc.nprocs'
self.submit_pattern = 'testcode.run_cmd'
# dummy job with default settings (e.g tolerance)
self.default_test_settings = None
# Analysis
self.benchmark = benchmark
self.ignore_fields = []
self.data_tag = None
self.extract_cmd_template = 'tc.extract tc.args tc.file'
self.extract_program = None
self.extract_args = ''
self.extract_fmt = 'table'
self.skip_cmd_template = 'tc.skip tc.args tc.test'
self.skip_program = None
self.skip_args = ''
self.verify = False
# Info
self.vcs = None
# Set values passed in as keyword options.
for (attr, val) in kwargs.items():
setattr(self, attr, val)
# If using an external verification program, then set the default
# extract command template.
if self.verify and 'extract_cmd_template' not in kwargs:
self.extract_cmd_template = 'tc.extract tc.args tc.test tc.bench'
# Can we actually extract the data?
if self.extract_fmt == 'yaml' and not _HAVE_YAML:
err = 'YAML data format cannot be used: PyYAML is not installed.'
raise exceptions.TestCodeError(err)
def run_cmd(self, input_file, args, nprocs=0):
'''Create run command.'''
output_file = util.testcode_filename(FILESTEM['test'], self.test_id,
input_file, args)
error_file = util.testcode_filename(FILESTEM['error'], self.test_id,
input_file, args)
# Need to escape filenames for passing them to the shell.
exe = pipes.quote(self.exe)
output_file = pipes.quote(output_file)
error_file = pipes.quote(error_file)
cmd = self.run_cmd_template.replace('tc.program', exe)
if type(input_file) is str:
input_file = pipes.quote(input_file)
cmd = cmd.replace('tc.input', input_file)
else:
cmd = cmd.replace('tc.input', '')
if type(args) is str:
cmd = cmd.replace('tc.args', args)
else:
cmd = cmd.replace('tc.args', '')
cmd = cmd.replace('tc.output', output_file)
cmd = cmd.replace('tc.error', error_file)
if nprocs > 0 and self.launch_parallel:
cmd = '%s %s' % (self.launch_parallel, cmd)
cmd = cmd.replace('tc.nprocs', str(nprocs))
return cmd
def extract_cmd(self, path, input_file, args):
'''Create extraction command(s).'''
test_file = util.testcode_filename(FILESTEM['test'], self.test_id,
input_file, args)
bench_file = self.select_benchmark_file(path, input_file, args)
cmd = self.extract_cmd_template
cmd = cmd.replace('tc.extract', pipes.quote(self.extract_program))
cmd = cmd.replace('tc.args', self.extract_args)
if self.verify:
# Single command to compare benchmark and test outputs.
cmd = cmd.replace('tc.test', pipes.quote(test_file))
cmd = cmd.replace('tc.bench', pipes.quote(bench_file))
return (cmd,)
else:
# Need to return commands to extract data from the test and
# benchmark outputs.
test_cmd = cmd.replace('tc.file', pipes.quote(test_file))
bench_cmd = cmd.replace('tc.file', pipes.quote(bench_file))
return (bench_cmd, test_cmd)
def skip_cmd(self, input_file, args):
'''Create skip command.'''
test_file = util.testcode_filename(FILESTEM['test'], self.test_id,
input_file, args)
cmd = self.skip_cmd_template
cmd = cmd.replace('tc.skip', pipes.quote(self.skip_program))
cmd = cmd.replace('tc.args', self.skip_args)
cmd = cmd.replace('tc.test', pipes.quote(test_file))
return cmd
def select_benchmark_file(self, path, input_file, args):
'''Find the first benchmark file out of all benchmark IDs which exists.'''
benchmark = None
benchmarks = []
for bench_id in self.benchmark:
benchfile = util.testcode_filename(FILESTEM['benchmark'], bench_id,
input_file, args)
benchmarks.append(benchfile)
if os.path.exists(os.path.join(path, benchfile)):
benchmark = benchfile
break
if not benchmark:
err = 'No benchmark found in %s. Checked for: %s.'
raise exceptions.TestCodeError(err % (path, ', '.join(benchmarks)))
return benchmark
class Test:
'''Store and execute a test.'''
def __init__(self, name, test_program, path, **kwargs):
self.name = name
# program
self.test_program = test_program
# running
self.path = path
self.inputs_args = None
self.output = None
self.nprocs = 0
self.min_nprocs = 0
self.max_nprocs = compat.maxint
self.submit_template = None
# Run jobs in this concurrently rather than consecutively?
# Only used when setting tests up in testcode2.config: if true then
# each pair of input file and arguments are assigned to a different
# Test object rather than a single Test object.
self.run_concurrent = False
# Analysis
self.default_tolerance = None
self.tolerances = {}
# Set values passed in as keyword options.
for (attr, val) in kwargs.items():
setattr(self, attr, val)
if not self.inputs_args:
self.inputs_args = [('', '')]
self.status = dict( (inp_arg, None) for inp_arg in self.inputs_args )
# 'Decorate' functions which require a directory lock in order for file
# access to be thread-safe.
# As we use the in_dir decorator, which requires knowledge of the test
# directory (a per-instance property), we cannot use the @decorator
# syntactic sugar. Fortunately we can still modify them at
# initialisation time. Thank you python for closures!
self.start_job = DIR_LOCK.in_dir(self.path)(self._start_job)
self.move_output_to_test_output = DIR_LOCK.in_dir(self.path)(
self._move_output_to_test_output)
self.move_old_output_files = DIR_LOCK.in_dir(self.path)(
self._move_old_output_files)
self.verify_job = DIR_LOCK.in_dir(self.path)(self._verify_job)
self.skip_job = DIR_LOCK.in_dir(self.path)(self._skip_job)
def __hash__(self):
return hash(self.path)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
else:
# Compare values we care about...
cmp_vals = ['test_program', 'path', 'inputs_args', 'output',
'nprocs', 'min_nprocs', 'max_nprocs', 'submit_template',
'default_tolerance', 'tolerances', 'status']
comparison = tuple(getattr(other, cmp_val) == getattr(self, cmp_val) for cmp_val in cmp_vals)
return compat.compat_all(comparison)
def run_test(self, verbose=1, cluster_queue=None, rundir=None):
'''Run all jobs in test.'''
try:
# Construct tests.
test_cmds = []
test_files = []
for (test_input, test_arg) in self.inputs_args:
if (test_input and
not os.path.exists(os.path.join(self.path,test_input))):
err = 'Input file does not exist: %s' % (test_input,)
raise exceptions.RunError(err)
test_cmds.append(self.test_program.run_cmd(test_input, test_arg,
self.nprocs))
test_files.append(util.testcode_filename(FILESTEM['test'],
self.test_program.test_id, test_input, test_arg))
# Move files matching output pattern out of the way.
self.move_old_output_files(verbose)
# Run tests one-at-a-time locally or submit job in single submit
# file to a queueing system.
if cluster_queue:
if self.output:
for (ind, test) in enumerate(test_cmds):
# Don't quote self.output if it contains any wildcards
# (assume the user set it up correctly!)
out = self.output
if not compat.compat_any(wild in self.output for wild in
['*', '?', '[', '{']):
out = pipes.quote(self.output)
test_cmds[ind] = '%s; mv %s %s' % (test_cmds[ind],
out, pipes.quote(test_files[ind]))
test_cmds = ['\n'.join(test_cmds)]
for (ind, test) in enumerate(test_cmds):
job = self.start_job(test, cluster_queue, verbose)
job.wait()
# Analyse tests as they finish.
if cluster_queue:
# Did all of them at once.
for (test_input, test_arg) in self.inputs_args:
self.verify_job(test_input, test_arg, verbose, rundir)
else:
# Did one job at a time.
(test_input, test_arg) = self.inputs_args[ind]
err = []
if self.output:
try:
self.move_output_to_test_output(test_files[ind])
except exceptions.RunError:
err.append(sys.exc_info()[1])
status = validation.Status()
if job.returncode != 0:
err.insert(0, 'Error running job. Return code: %i'
% job.returncode)
(status, msg) = self.skip_job(test_input, test_arg,
verbose)
if status.skipped():
self._update_status(status, (test_input, test_arg))
if verbose > 0 and verbose < 3:
sys.stdout.write(
util.info_line(self.path,
test_input, test_arg, rundir)
)
status.print_status(msg, verbose)
elif err:
# re-raise first error we hit.
raise exceptions.RunError(err[0])
else:
self.verify_job(test_input, test_arg, verbose, rundir)
except exceptions.RunError:
err = sys.exc_info()[1]
if verbose > 2:
err = 'Test(s) in %s failed.\n%s' % (self.path, err)
status = validation.Status([False])
self._update_status(status, (test_input, test_arg))
if verbose > 0 and verbose < 3:
info_line = util.info_line(self.path, test_input, test_arg, rundir)
sys.stdout.write(info_line)
status.print_status(err, verbose)
# Shouldn't run remaining tests after such a catastrophic failure.
# Mark all remaining tests as skipped so the user knows that they
# weren't run.
err = 'Previous test in %s caused a system failure.' % (self.path)
status = validation.Status(name='skipped')
for ((test_input, test_arg), stat) in self.status.items():
if not self.status[(test_input,test_arg)]:
self._update_status(status, (test_input, test_arg))
if verbose > 2:
cmd = self.test_program.run_cmd(test_input, test_arg,
self.nprocs)
print('Test using %s in %s' % (cmd, self.path))
elif verbose > 0:
info_line = util.info_line(self.path, test_input,
test_arg, rundir)
sys.stdout.write(info_line)
status.print_status(err, verbose)
def _start_job(self, cmd, cluster_queue=None, verbose=1):
'''Start test running. Requires directory lock.
IMPORTANT: use self.start_job rather than self._start_job if using multiple
threads.
Decorated to start_job, which acquires directory lock and enters self.path
first, during initialisation.'''
if cluster_queue:
tp_ptr = self.test_program
submit_file = '%s.%s' % (os.path.basename(self.submit_template),
tp_ptr.test_id)
job = queues.ClusterQueueJob(submit_file, system=cluster_queue)
job.create_submit_file(tp_ptr.submit_pattern, cmd,
self.submit_template)
if verbose > 2:
print('Submitting tests using %s (template submit file) in %s'
% (self.submit_template, self.path))
job.start_job()
else:
# Run locally via subprocess.
if verbose > 2:
print('Running test using %s in %s\n' % (cmd, self.path))
try:
job = subprocess.Popen(cmd, shell=True)
except OSError:
# slightly odd syntax in order to be compatible with python 2.5
# and python 2.6/3
err = 'Execution of test failed: %s' % (sys.exc_info()[1],)
raise exceptions.RunError(err)
# Return either Popen object or ClusterQueueJob object. Both have
# a wait method which returns only once job has finished.
return job
def _move_output_to_test_output(self, test_files_out):
'''Move output to the testcode output file. Requires directory lock.
This is used when a program writes to standard output rather than to STDOUT.
IMPORTANT: use self.move_output_to_test_output rather than
self._move_output_to_test_output if using multiple threads.
Decorated to move_output_to_test_output, which acquires the directory lock and
enters self.path.
'''
# self.output might be a glob which works with e.g.
# mv self.output test_files[ind]
# if self.output matches only one file. Reproduce that
# here so that running tests through the queueing system
# and running tests locally have the same behaviour.
out_files = glob.glob(self.output)
if len(out_files) == 1:
shutil.move(out_files[0], test_files_out)
else:
err = ('Output pattern (%s) matches %s files (%s).'
% (self.output, len(out_files), out_files))
raise exceptions.RunError(err)
def _move_old_output_files(self, verbose=1):
'''Move output to the testcode output file. Requires directory lock.
This is used when a program writes to standard output rather than to STDOUT.
IMPORTANT: use self.move_oold_output_files rather than
self._move_old_output_files if using multiple threads.
Decorated to move_old_output_files, which acquires the directory lock and
enters self.path.
'''
if self.output:
old_out_files = glob.glob(self.output)
if old_out_files:
out_dir = 'test.prev.output.%s' % (self.test_program.test_id)
if verbose > 2:
print('WARNING: found existing files matching output '
'pattern: %s.' % self.output)
print('WARNING: moving existing output files (%s) to %s.\n'
% (', '.join(old_out_files), out_dir))
if not os.path.exists(out_dir):
os.mkdir(out_dir)
for out_file in old_out_files:
shutil.move(out_file, out_dir)
def _verify_job(self, input_file, args, verbose=1, rundir=None):
'''Check job against benchmark.
Assume function is executed in self.path.
IMPORTANT: use self.verify_job rather than self._verify_job if using multiple
threads.
Decorated to verify_job, which acquires directory lock and enters self.path
first, during initialisation.'''
# We already have DIR_LOCK, so use _skip_job instead of skip_job.
(status, msg) = self._skip_job(input_file, args, verbose)
try:
if self.test_program.verify and not status.skipped():
(status, msg) = self.verify_job_external(input_file, args,
verbose)
elif not status.skipped():
(bench_out, test_out) = self.extract_data(input_file, args,
verbose)
(comparable, status, msg) = validation.compare_data(bench_out,
test_out, self.default_tolerance, self.tolerances,
self.test_program.ignore_fields)
if verbose > 2:
# Include data tables in output.
if comparable:
# Combine test and benchmark dictionaries.
data_table = util.pretty_print_table(
['benchmark', 'test'],
[bench_out, test_out])
else:
# Print dictionaries separately--couldn't even compare
# them!
data_table = '\n'.join((
util.pretty_print_table(['benchmark'], [bench_out]),
util.pretty_print_table(['test '], [test_out])))
if msg.strip():
# join data table with error message from
# validation.compare_data.
msg = '\n'.join((msg, data_table))
else:
msg = data_table
except (exceptions.AnalysisError, exceptions.TestCodeError):
if msg.strip():
msg = '%s\n%s' % (msg, sys.exc_info()[1])
else:
msg = sys.exc_info()[1]
status = validation.Status([False])
self._update_status(status, (input_file, args))
if verbose > 0 and verbose < 3:
info_line = util.info_line(self.path, input_file, args, rundir)
sys.stdout.write(info_line)
status.print_status(msg, verbose)
return (status, msg)
def _skip_job(self, input_file, args, verbose=1):
'''Run user-supplied command to check if test should be skipped.
IMPORTANT: use self.skip_job rather than self._skip_job if using multiple
threads.
Decorated to skip_job, which acquires directory lock and enters self.path
first, during initialisation.'''
status = validation.Status()
if self.test_program.skip_program:
cmd = self.test_program.skip_cmd(input_file, args)
try:
if verbose > 2:
print('Testing whether to skip test using %s in %s.' %
(cmd, self.path))
skip_popen = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
skip_popen.wait()
if skip_popen.returncode == 0:
# skip this test
status = validation.Status(name='skipped')
except OSError:
# slightly odd syntax in order to be compatible with python
# 2.5 and python 2.6/3
if verbose > 2:
print('Test to skip test: %s' % (sys.exc_info()[1],))
return (status, '')
def verify_job_external(self, input_file, args, verbose=1):
'''Run user-supplied verifier script.
Assume function is executed in self.path.'''
verify_cmd, = self.test_program.extract_cmd(self.path, input_file, args)
try:
if verbose > 2:
print('Analysing test using %s in %s.' %
(verify_cmd, self.path))
verify_popen = subprocess.Popen(verify_cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
verify_popen.wait()
except OSError:
# slightly odd syntax in order to be compatible with python 2.5
# and python 2.6/3
err = 'Analysis of test failed: %s' % (sys.exc_info()[1],)
raise exceptions.AnalysisError(err)
output = verify_popen.communicate()[0].decode('utf-8')
if verbose < 2:
# Suppress output. (hackhack)
output = ''
if verify_popen.returncode == 0:
return (validation.Status([True]), output)
else:
return (validation.Status([False]), output)
def extract_data(self, input_file, args, verbose=1):
'''Extract data from output file.
Assume function is executed in self.path.'''
tp_ptr = self.test_program
if tp_ptr.data_tag:
# Using internal data extraction function.
data_files = [
tp_ptr.select_benchmark_file(self.path, input_file, args),
util.testcode_filename(FILESTEM['test'],
tp_ptr.test_id, input_file, args),
]
if verbose > 2:
print('Analysing output using data_tag %s in %s on files %s.' %
(tp_ptr.data_tag, self.path, ' and '.join(data_files)))
outputs = [util.extract_tagged_data(tp_ptr.data_tag, dfile)
for dfile in data_files]
else:
# Using external data extraction script.
# Get extraction commands.
extract_cmds = tp_ptr.extract_cmd(self.path, input_file, args)
# Extract data.
outputs = []
for cmd in extract_cmds:
try:
if verbose > 2:
print('Analysing output using %s in %s.' %
(cmd, self.path))
# Samuel Ponce: Popen.wait() creates deadlock if the data is too large
# See documented issue for example in:
# https://docs.python.org/2/library/subprocess.html#subprocess.Popen.returncode
#
# Previous code that create deadlock:
#extract_popen = subprocess.Popen(cmd, shell=True,
# stdout=subprocess.PIPE, stderr=subprocess.PIPE)
#extract_popen.wait()
#
# New code (this might not be the best but work for me):
extract_popen = subprocess.Popen(cmd, bufsize=1, shell=True,
stdin=open(os.devnull), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
lines = []
for line in iter(extract_popen.stdout.readline, ''):
#print line,
lines.append(line)
except OSError:
# slightly odd syntax in order to be compatible with python
# 2.5 and python 2.6/3
err = 'Analysing output failed: %s' % (sys.exc_info()[1],)
raise exceptions.AnalysisError(err)
# Convert data string from extract command to dictionary format.
# SP: Because of the above change, the test below cannot be done:
#if extract_popen.returncode != 0:
# err = extract_popen.communicate()[1].decode('utf-8')
# err = 'Analysing output failed: %s' % (err)
# raise exceptions.AnalysisError(err)
#data_string = extract_popen.communicate()[0].decode('utf-8')
data_string = ''.join(lines)
if self.test_program.extract_fmt == 'table':
outputs.append(util.dict_table_string(data_string))
elif self.test_program.extract_fmt == 'yaml':
outputs.append({})
# convert values to be in a tuple so the format matches
# that from dict_table_string.
# ensure all keys are strings so they can be sorted
# (different data types cause problems!)
for (key, val) in yaml.safe_load(data_string).items():
if isinstance(val, list):
outputs[-1][str(key)] = tuple(val)
else:
outputs[-1][str(key)] = tuple((val,))
return tuple(outputs)
def create_new_benchmarks(self, benchmark, copy_files_since=None,
copy_files_path='testcode_data'):
'''Copy the test files to benchmark files.'''
oldcwd = os.getcwd()
os.chdir(self.path)
test_files = []
for (inp, arg) in self.inputs_args:
test_file = util.testcode_filename(FILESTEM['test'],
self.test_program.test_id, inp, arg)
err_file = util.testcode_filename(FILESTEM['error'],
self.test_program.test_id, inp, arg)
bench_file = util.testcode_filename(_FILESTEM_DICT['benchmark'],
benchmark, inp, arg)
test_files.extend((test_file, err_file, bench_file))
shutil.copy(test_file, bench_file)
if copy_files_since:
if not os.path.isdir(copy_files_path):
os.mkdir(copy_files_path)
if os.path.isdir(copy_files_path):
for data_file in glob.glob('*'):
if (os.path.isfile(data_file) and
os.stat(data_file)[-2] >= copy_files_since and
data_file not in test_files):
bench_data_file = os.path.join(copy_files_path,
data_file)
# shutil.copy can't overwrite files so remove old ones
# with the same name.
if os.path.exists(bench_data_file):
os.unlink(bench_data_file)
shutil.copy(data_file, bench_data_file)
os.chdir(oldcwd)
def _update_status(self, status, inp_arg):
'''Update self.status with success of a test.'''
if status:
self.status[inp_arg] = status
else:
# Something went wrong. Store a Status failed object.
self.status[inp_arg] = validation.Status([False])
def get_status(self):
'''Get number of passed and number of ran tasks.'''
# If there's an object (other than None/False) in the corresponding
# dict entry in self.status, then that test must have ran (albeit not
# necessarily successfuly!).
status = {}
status['passed'] = sum(True for stat in self.status.values()
if stat and stat.passed())
status['warning'] = sum(True for stat in self.status.values()
if stat and stat.warning())
status['skipped'] = sum(True for stat in self.status.values()
if stat and stat.skipped())
status['failed'] = sum(True for stat in self.status.values()
if stat and stat.failed())
status['unknown'] = sum(True for stat in self.status.values()
if stat and stat.unknown())
status['ran'] = sum(True for stat in self.status.values() if stat)
return status
| gpl-2.0 | -3,160,720,281,275,061,000 | 43.362819 | 105 | 0.534471 | false | 4.252048 | true | false | false |
enflow-nl/cast-viewer | ansible/roles/network/files/cast_viewer_net_watchdog.py | 1 | 3960 | #!/usr/bin/env python
import configparser
import netifaces
import os
import re
import requests
import sh
import socket
import sys
import time
import logging
from time import sleep
NETWORK_PATH = '/boot/network.ini'
logging.basicConfig(level=logging.INFO,
format='%(message)s')
def get_default_gw():
gws = netifaces.gateways()
return gws['default'][netifaces.AF_INET][0]
def ping_test(host):
ping = sh.ping('-q', '-c', 10, host, _ok_code=[0, 1])
packet_loss = re.findall(r'(\d+)% packet loss', ping.stdout)[0]
if int(packet_loss) > 60:
logging.error('Unable to ping gateway.')
return False
else:
return True
def http_test(host):
try:
r = requests.head(host, allow_redirects=True, verify=False)
if 200 <= r.status_code < 400:
return True
else:
logging.error('Unable to reach Cast server.')
return False
except Exception as e:
logging.error('http_test failed: {}'.format(str(e)))
return False
def restart_networking():
networking = sh.Command('/etc/init.d/networking')
networking('restart')
def restart_interface(interface):
logging.info('Restarting network interface.')
ifdown = sh.Command('/sbin/ifdown')
ifdown('--force', interface)
restart_networking()
def is_static(config, interface):
ip = config.get(interface, 'ip', fallback=False)
netmask = config.get(interface, 'netmask', fallback=False)
gateway = config.get(interface, 'gateway', fallback=False)
return ip and netmask and gateway
def bring_up_interface(interface):
retry_limit = 10
retries = 0
while retries < retry_limit:
restart_interface(interface)
if has_ip(interface):
return True
else:
retries += 1
time.sleep(15)
logging.error('Unable to bring up network interface.')
return False
def bring_down_interface(interface):
logging.info('Bringing down interface %s', interface)
ifdown = sh.Command('/sbin/ifdown')
ifdown('--force', interface)
def has_ip(interface):
"""
Return True if interface has an IP.
"""
try:
ips = netifaces.ifaddresses(interface)
except ValueError:
logging.error('Interface does not exist.')
return False
for k in ips.keys():
ip = ips[k][0].get('addr', False)
if ip:
try:
socket.inet_aton(ip)
return True
except socket.error:
pass
return False
def get_active_iface(config, prefix):
for n in range(10):
iface = '{}{}'.format(prefix, n)
if config.has_section(iface):
return iface
return False
def join_zerotier_network():
os.system('/usr/sbin/zerotier-cli join 17d709436cf23366')
if __name__ == '__main__':
config = configparser.RawConfigParser()
config.read(NETWORK_PATH)
wifi_iface = get_active_iface(config, 'wlan')
if wifi_iface:
logging.info('Found wifi interface {}'.format(wifi_iface))
reaches_internet = http_test('http://example.com')
can_ping_gw = ping_test(get_default_gw())
if reaches_internet and can_ping_gw:
logging.info('WiFi interface is healthy.')
else:
if not reaches_internet and not can_ping_gw:
logging.error('Unable to connect to internet and gateway')
elif can_ping_cw:
logging.error('Unable to connect to gateway')
elif reaches_internet:
logging.error('Unable to connect to the internet')
logging.info('Restarting {}'.format(wifi_iface))
wifi_is_healthy = bring_up_interface(wifi_iface)
if wifi_is_healthy:
logging.info('WiFi is healthy again!')
else:
logging.error('WiFi still isn\'t healthy')
join_zerotier_network()
| gpl-2.0 | -8,181,382,529,827,038,000 | 25.4 | 74 | 0.607323 | false | 3.882353 | true | false | false |
munhyunsu/Hobby | KawiBawiBo/kwb_server.py | 1 | 3895 | #!/usr/bin/env python3
import sys
import asyncio
import threading
import base64
import os
SRCPATH = './src/'
class KWBServerProtocol(asyncio.Protocol):
def connection_made(self, transport):
print('Connected: {}'.format(transport.get_extra_info('peername')))
self.transport = transport
self.method = None
self.authorization = None
self.content_length = None
self.message = str()
def data_received(self, data):
transport = self.transport
message = data.decode('utf-8')
# 헤더 확인
if self.method == None:
self.method = message.split('\r\n')[0]
if not self.method.startswith('POST / HTTP/1.1'):
transport.write('400 Bad Request\r\n'.encode('ascii'))
transport.close()
# 헤더 파싱
headers = message.split('\r\n\r\n')[0]
for header in headers.split('\r\n'):
if header.startswith('Authorization'):
self.authorization = header.split(' ')[-1]
self.authorization = \
base64.b64decode(self.authorization)
self.authorization = self.authorization.decode('utf-8')
print('Autorizing Info.: {}'.format(
self.authorization))
elif header.startswith('Content-Length'):
self.content_length = int(header.split(' ')[-1])
if self.content_length > 10240:
transport.write(
'400 Bad Request\r\n'.encode('ascii'))
transport.close()
if None in (self.method,
self.authorization,
self.content_length):
transport.write('400 Bad Request\r\n'.encode('ascii'))
transport.close()
message = message.split('\r\n\r\n')[1:]
message = ''.join(message)
# 데이터 합치기
self.message = self.message + message
if self.content_length < len(self.message.encode('utf-8')):
transport.write('400 Bad Request\r\n'.encode('ascii'))
transport.close()
if self.content_length == len(self.message.encode('utf-8')):
if 'import random' in self.message:
transport.write(
'400 Bad Request: import random\r\n'.encode(
'ascii'))
transport.close()
else:
transport.write('202 Accepted\r\n'.encode('ascii'))
transport.write(self.message.encode('utf-8'))
filename = (SRCPATH
+ self.authorization.split(':')[0]
+ '_'
+ self.authorization.split(':')[1]
+ '.py')
with open(filename, 'w') as received_file:
received_file.write(self.message)
transport.close()
def eof_received(self):
transport = self.transport
self.transport.close()
def connection_lost(self, exc):
pass
def main():
# 소스 디렉터리 생성
os.makedirs(SRCPATH, exist_ok = True)
loop = asyncio.get_event_loop()
coro = loop.create_server(KWBServerProtocol,
'', 8080,
reuse_address = True,
reuse_port = True)
print('가위바위보 서버가 시작되었습니다.')
server = loop.run_until_complete(coro)
try:
loop.run_forever()
except KeyboardInterrupt:
print('가위바위보 서버가 종료되었습니다.')
pass
server.close()
loop.run_until_complete(server.wait_closed())
loop.close()
if __name__ == '__main__':
main()
| gpl-3.0 | 5,680,759,800,586,388,000 | 36.166667 | 75 | 0.505671 | false | 3.977964 | false | false | false |
hxddh/thefuck | thefuck/rules/ssh_known_hosts.py | 17 | 1068 | import re
from thefuck.utils import for_app
commands = ('ssh', 'scp')
@for_app(*commands)
def match(command):
if not command.script:
return False
if not command.script.startswith(commands):
return False
patterns = (
r'WARNING: REMOTE HOST IDENTIFICATION HAS CHANGED!',
r'WARNING: POSSIBLE DNS SPOOFING DETECTED!',
r"Warning: the \S+ host key for '([^']+)' differs from the key for the IP address '([^']+)'",
)
return any(re.findall(pattern, command.stderr) for pattern in patterns)
def get_new_command(command):
return command.script
def side_effect(old_cmd, command):
offending_pattern = re.compile(
r'(?:Offending (?:key for IP|\S+ key)|Matching host key) in ([^:]+):(\d+)',
re.MULTILINE)
offending = offending_pattern.findall(old_cmd.stderr)
for filepath, lineno in offending:
with open(filepath, 'r') as fh:
lines = fh.readlines()
del lines[int(lineno) - 1]
with open(filepath, 'w') as fh:
fh.writelines(lines)
| mit | -6,222,417,361,600,427,000 | 27.864865 | 101 | 0.618914 | false | 3.59596 | false | false | false |
gkadillak/rockstor-core | src/rockstor/storageadmin/views/share_helpers.py | 2 | 6394 | """
Copyright (c) 2012-2013 RockStor, Inc. <http://rockstor.com>
This file is part of RockStor.
RockStor is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
RockStor is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from datetime import datetime
from django.utils.timezone import utc
from django.conf import settings
from storageadmin.models import (Share, Disk, Snapshot, SFTP)
from smart_manager.models import ShareUsage
from fs.btrfs import (mount_share, mount_snap, is_share_mounted, is_mounted,
umount_root, shares_info, share_usage, snaps_info,
qgroup_create, update_quota)
from storageadmin.util import handle_exception
import logging
logger = logging.getLogger(__name__)
def helper_mount_share(share, mnt_pt=None):
if (not is_share_mounted(share.name)):
if(mnt_pt is None):
mnt_pt = ('%s%s' % (settings.MNT_PT, share.name))
mount_share(share, mnt_pt)
def validate_share(sname, request):
try:
return Share.objects.get(name=sname)
except:
e_msg = ('Share with name: %s does not exist' % sname)
handle_exception(Exception(e_msg), request)
def sftp_snap_toggle(share, mount=True):
for snap in Snapshot.objects.filter(share=share, uvisible=True):
mnt_pt = ('%s/%s/%s/.%s' % (settings.SFTP_MNT_ROOT,
share.owner, share.name,
snap.name))
if (mount and not is_mounted(mnt_pt)):
mount_snap(share, snap.name, mnt_pt)
elif (is_mounted(mnt_pt) and not mount):
umount_root(mnt_pt)
def toggle_sftp_visibility(share, snap_name, on=True):
if (not SFTP.objects.filter(share=share).exists()):
return
mnt_pt = ('%s/%s/%s/.%s' % (settings.SFTP_MNT_ROOT, share.owner,
share.name, snap_name))
if (on):
if (not is_mounted(mnt_pt)):
mount_snap(share, snap_name, mnt_pt)
else:
umount_root(mnt_pt)
def import_shares(pool, request):
disk = Disk.objects.filter(pool=pool)[0].name
shares = [s.name for s in Share.objects.filter(pool=pool)]
shares_d = shares_info(pool)
for s in shares:
if (s not in shares_d):
Share.objects.get(pool=pool, name=s).delete()
for s in shares_d:
if (s in shares):
share = Share.objects.get(name=s)
share.qgroup = shares_d[s]
rusage, eusage = share_usage(pool, share.qgroup)
ts = datetime.utcnow().replace(tzinfo=utc)
if (rusage != share.rusage or eusage != share.eusage):
share.rusage = rusage
share.eusage = eusage
su = ShareUsage(name=s, r_usage=rusage, e_usage=eusage,
ts=ts)
su.save()
else:
try:
su = ShareUsage.objects.filter(name=s).latest('id')
su.ts = ts
su.count += 1
except ShareUsage.DoesNotExist:
su = ShareUsage(name=s, r_usage=rusage,
e_usage=eusage, ts=ts)
finally:
su.save()
share.save()
continue
try:
cshare = Share.objects.get(name=s)
cshares_d = shares_info('%s%s' % (settings.MNT_PT,
cshare.pool.name))
if (s in cshares_d):
e_msg = ('Another pool(%s) has a Share with this same '
'name(%s) as this pool(%s). This configuration is not supported.'
' You can delete one of them manually with this command: '
'btrfs subvol delete %s[pool name]/%s' %
(cshare.pool.name, s, pool.name, settings.MNT_PT, s))
handle_exception(Exception(e_msg), request)
else:
cshare.pool = pool
cshare.qgroup = shares_d[s]
cshare.size = pool.size
cshare.subvol_name = s
cshare.rusage, cshare.eusage = share_usage(pool, cshare.qgroup)
cshare.save()
except Share.DoesNotExist:
pqid = qgroup_create(pool)
update_quota(pool, pqid, pool.size * 1024)
nso = Share(pool=pool, qgroup=shares_d[s], pqgroup=pqid, name=s,
size=pool.size, subvol_name=s)
nso.save()
mount_share(nso, '%s%s' % (settings.MNT_PT, s))
def import_snapshots(share):
snaps_d = snaps_info('%s%s' % (settings.MNT_PT, share.pool.name),
share.name)
disk = Disk.objects.filter(pool=share.pool)[0].name
snaps = [s.name for s in Snapshot.objects.filter(share=share)]
for s in snaps:
if (s not in snaps_d):
Snapshot.objects.get(share=share,name=s).delete()
for s in snaps_d:
if (s in snaps):
so = Snapshot.objects.get(share=share, name=s)
else:
so = Snapshot(share=share, name=s, real_name=s,
writable=snaps_d[s][1], qgroup=snaps_d[s][0])
rusage, eusage = share_usage(share.pool, snaps_d[s][0])
ts = datetime.utcnow().replace(tzinfo=utc)
if (rusage != so.rusage or eusage != so.eusage):
so.rusage = rusage
so.eusage = eusage
su = ShareUsage(name=s, r_usage=rusage, e_usage=eusage, ts=ts)
su.save()
else:
try:
su = ShareUsage.objects.filter(name=s).latest('id')
su.ts = ts
su.count += 1
except ShareUsage.DoesNotExist:
su = ShareUsage(name=s, r_usage=rusage, e_usage=eusage,
ts=ts)
finally:
su.save()
so.save()
| gpl-3.0 | -6,850,788,140,572,486,000 | 38.714286 | 90 | 0.554582 | false | 3.616516 | false | false | false |
naparuba/opsbro | data/core-configuration/packs/core-functions/module/frandom.py | 2 | 1119 | import random as random_lib
import copy
from opsbro.evaluater import export_evaluater_function
FUNCTION_GROUP = 'random'
@export_evaluater_function(function_group=FUNCTION_GROUP)
def random():
"""**random()** -> Returns a random float between 0 and 1
<code>
Example:
random()
Returns:
0.6988342144113194
</code>
"""
return random_lib.random()
@export_evaluater_function(function_group=FUNCTION_GROUP)
def randomint_between(int_start, int_end):
"""**randomint_between(int_start, int_end)** -> Returns a random int between the start and the end
<code>
Example:
randomint_between(1, 100)
Returns:
69
</code>
"""
return random_lib.randint(int_start, int_end)
@export_evaluater_function(function_group=FUNCTION_GROUP)
def shuffle(list):
"""**shuffle(list)** -> Return a copy of the list suffle randomly
<code>
Example:
suffle([ 1, 2, 3, 4 ])
Returns:
[ 3, 1, 4, 2 ]
</code>
"""
# NOTE random.shuffle is in place
n_list = copy.copy(list)
random_lib.shuffle(n_list)
return n_list
| mit | -6,978,043,257,205,839,000 | 15.954545 | 102 | 0.637176 | false | 3.36036 | false | false | false |
luckydonald/pytgbot | pytgbot/api_types/receivable/media.py | 1 | 123436 | # -*- coding: utf-8 -*-
__all__ = ['Media', 'MessageEntity', 'DownloadableMedia', 'PhotoSize', 'Audio', 'Animation', 'Document', 'Sticker', 'Video', 'Voice', 'VideoNote', 'Contact', 'Location', 'Venue', 'UserProfilePhotos', 'File', 'ChatPhoto', 'Game']
from luckydonaldUtils.encoding import unicode_type, to_unicode as u
from luckydonaldUtils.exceptions import assert_type_or_raise
from . import Receivable
from . import Result
__author__ = 'luckydonald'
class Media(Receivable):
pass
# end Media
class MessageEntity(Result):
"""
This object represents one special entity in a text message. For example, hashtags, usernames, URLs, etc.
https://core.telegram.org/bots/api#messageentity
Parameters:
:param type: Type of the entity. Can be "mention" (@username), "hashtag" (#hashtag), "cashtag" ($USD), "bot_command" (/start@jobs_bot), "url" (https://telegram.org), "email" ([email protected]), "phone_number" (+1-212-555-0123), "bold" (bold text), "italic" (italic text), "underline" (underlined text), "strikethrough" (strikethrough text), "code" (monowidth string), "pre" (monowidth block), "text_link" (for clickable text URLs), "text_mention" (for users without usernames)
:type type: str|unicode
:param offset: Offset in UTF-16 code units to the start of the entity
:type offset: int
:param length: Length of the entity in UTF-16 code units
:type length: int
Optional keyword parameters:
:param url: Optional. For "text_link" only, url that will be opened after user taps on the text
:type url: str|unicode
:param user: Optional. For "text_mention" only, the mentioned user
:type user: pytgbot.api_types.receivable.peer.User
:param language: Optional. For "pre" only, the programming language of the entity text
:type language: str|unicode
:param _raw: Optional. Original data this object was generated from. Could be `None`.
:type _raw: None | dict
"""
def __init__(self, type, offset, length, url=None, user=None, language=None, _raw=None):
"""
This object represents one special entity in a text message. For example, hashtags, usernames, URLs, etc.
https://core.telegram.org/bots/api#messageentity
Parameters:
:param type: Type of the entity. Can be "mention" (@username), "hashtag" (#hashtag), "cashtag" ($USD), "bot_command" (/start@jobs_bot), "url" (https://telegram.org), "email" ([email protected]), "phone_number" (+1-212-555-0123), "bold" (bold text), "italic" (italic text), "underline" (underlined text), "strikethrough" (strikethrough text), "code" (monowidth string), "pre" (monowidth block), "text_link" (for clickable text URLs), "text_mention" (for users without usernames)
:type type: str|unicode
:param offset: Offset in UTF-16 code units to the start of the entity
:type offset: int
:param length: Length of the entity in UTF-16 code units
:type length: int
Optional keyword parameters:
:param url: Optional. For "text_link" only, url that will be opened after user taps on the text
:type url: str|unicode
:param user: Optional. For "text_mention" only, the mentioned user
:type user: pytgbot.api_types.receivable.peer.User
:param language: Optional. For "pre" only, the programming language of the entity text
:type language: str|unicode
:param _raw: Optional. Original data this object was generated from. Could be `None`.
:type _raw: None | dict
"""
super(MessageEntity, self).__init__()
from .peer import User
assert_type_or_raise(type, unicode_type, parameter_name="type")
self.type = type
assert_type_or_raise(offset, int, parameter_name="offset")
self.offset = offset
assert_type_or_raise(length, int, parameter_name="length")
self.length = length
assert_type_or_raise(url, None, unicode_type, parameter_name="url")
self.url = url
assert_type_or_raise(user, None, User, parameter_name="user")
self.user = user
assert_type_or_raise(language, None, unicode_type, parameter_name="language")
self.language = language
self._raw = _raw
# end def __init__
def to_array(self):
"""
Serializes this MessageEntity to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
array = super(MessageEntity, self).to_array()
array['type'] = u(self.type) # py2: type unicode, py3: type str
array['offset'] = int(self.offset) # type int
array['length'] = int(self.length) # type int
if self.url is not None:
array['url'] = u(self.url) # py2: type unicode, py3: type str
if self.user is not None:
array['user'] = self.user.to_array() # type User
if self.language is not None:
array['language'] = u(self.language) # py2: type unicode, py3: type str
return array
# end def to_array
@staticmethod
def validate_array(array):
"""
Builds a new array with valid values for the MessageEntity constructor.
:return: new array with valid values
:rtype: dict
"""
assert_type_or_raise(array, dict, parameter_name="array")
from .peer import User
data = Result.validate_array(array)
data['type'] = u(array.get('type'))
data['offset'] = int(array.get('offset'))
data['length'] = int(array.get('length'))
data['url'] = u(array.get('url')) if array.get('url') is not None else None
data['user'] = User.from_array(array.get('user')) if array.get('user') is not None else None
data['language'] = u(array.get('language')) if array.get('language') is not None else None
return data
# end def validate_array
@staticmethod
def from_array(array):
"""
Deserialize a new MessageEntity from a given dictionary.
:return: new MessageEntity instance.
:rtype: MessageEntity
"""
if not array: # None or {}
return None
# end if
data = MessageEntity.validate_array(array)
data['_raw'] = array
return MessageEntity(**data)
# end def from_array
def __str__(self):
"""
Implements `str(messageentity_instance)`
"""
return "MessageEntity(type={self.type!r}, offset={self.offset!r}, length={self.length!r}, url={self.url!r}, user={self.user!r}, language={self.language!r})".format(self=self)
# end def __str__
def __repr__(self):
"""
Implements `repr(messageentity_instance)`
"""
if self._raw:
return "MessageEntity.from_array({self._raw})".format(self=self)
# end if
return "MessageEntity(type={self.type!r}, offset={self.offset!r}, length={self.length!r}, url={self.url!r}, user={self.user!r}, language={self.language!r})".format(self=self)
# end def __repr__
def __contains__(self, key):
"""
Implements `"key" in messageentity_instance`
"""
return (
key in ["type", "offset", "length", "url", "user", "language"]
and hasattr(self, key)
and bool(getattr(self, key, None))
)
# end def __contains__
# end class MessageEntity
class DownloadableMedia(Media):
@staticmethod
def validate_array(array):
"""
Subclass for all :class:`Media` which has a :py:attr:`file_id` and optionally a :py:attr:`file_size`
:param array: a array to parse
:type array: dict
:return: a dict with file_id and file_size extracted from the array
:rtype: dict
"""
data = Media.from_array(array)
data["file_id"] = array.get("file_id")
data["file_size"] = array.get("file_size") # can be None
return data
# end class DownloadableMedia
class PhotoSize(Result):
"""
This object represents one size of a photo or a file / sticker thumbnail.
https://core.telegram.org/bots/api#photosize
Parameters:
:param file_id: Identifier for this file, which can be used to download or reuse the file
:type file_id: str|unicode
:param file_unique_id: Unique identifier for this file, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file.
:type file_unique_id: str|unicode
:param width: Photo width
:type width: int
:param height: Photo height
:type height: int
Optional keyword parameters:
:param file_size: Optional. File size
:type file_size: int
:param _raw: Optional. Original data this object was generated from. Could be `None`.
:type _raw: None | dict
"""
def __init__(self, file_id, file_unique_id, width, height, file_size=None, _raw=None):
"""
This object represents one size of a photo or a file / sticker thumbnail.
https://core.telegram.org/bots/api#photosize
Parameters:
:param file_id: Identifier for this file, which can be used to download or reuse the file
:type file_id: str|unicode
:param file_unique_id: Unique identifier for this file, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file.
:type file_unique_id: str|unicode
:param width: Photo width
:type width: int
:param height: Photo height
:type height: int
Optional keyword parameters:
:param file_size: Optional. File size
:type file_size: int
:param _raw: Optional. Original data this object was generated from. Could be `None`.
:type _raw: None | dict
"""
super(PhotoSize, self).__init__()
assert_type_or_raise(file_id, unicode_type, parameter_name="file_id")
self.file_id = file_id
assert_type_or_raise(file_unique_id, unicode_type, parameter_name="file_unique_id")
self.file_unique_id = file_unique_id
assert_type_or_raise(width, int, parameter_name="width")
self.width = width
assert_type_or_raise(height, int, parameter_name="height")
self.height = height
assert_type_or_raise(file_size, None, int, parameter_name="file_size")
self.file_size = file_size
self._raw = _raw
# end def __init__
def to_array(self):
"""
Serializes this PhotoSize to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
array = super(PhotoSize, self).to_array()
array['file_id'] = u(self.file_id) # py2: type unicode, py3: type str
array['file_unique_id'] = u(self.file_unique_id) # py2: type unicode, py3: type str
array['width'] = int(self.width) # type int
array['height'] = int(self.height) # type int
if self.file_size is not None:
array['file_size'] = int(self.file_size) # type int
return array
# end def to_array
@staticmethod
def validate_array(array):
"""
Builds a new array with valid values for the PhotoSize constructor.
:return: new array with valid values
:rtype: dict
"""
assert_type_or_raise(array, dict, parameter_name="array")
data = Result.validate_array(array)
data['file_id'] = u(array.get('file_id'))
data['file_unique_id'] = u(array.get('file_unique_id'))
data['width'] = int(array.get('width'))
data['height'] = int(array.get('height'))
data['file_size'] = int(array.get('file_size')) if array.get('file_size') is not None else None
return data
# end def validate_array
@staticmethod
def from_array(array):
"""
Deserialize a new PhotoSize from a given dictionary.
:return: new PhotoSize instance.
:rtype: PhotoSize
"""
if not array: # None or {}
return None
# end if
data = PhotoSize.validate_array(array)
data['_raw'] = array
return PhotoSize(**data)
# end def from_array
def __str__(self):
"""
Implements `str(photosize_instance)`
"""
return "PhotoSize(file_id={self.file_id!r}, file_unique_id={self.file_unique_id!r}, width={self.width!r}, height={self.height!r}, file_size={self.file_size!r})".format(self=self)
# end def __str__
def __repr__(self):
"""
Implements `repr(photosize_instance)`
"""
if self._raw:
return "PhotoSize.from_array({self._raw})".format(self=self)
# end if
return "PhotoSize(file_id={self.file_id!r}, file_unique_id={self.file_unique_id!r}, width={self.width!r}, height={self.height!r}, file_size={self.file_size!r})".format(self=self)
# end def __repr__
def __contains__(self, key):
"""
Implements `"key" in photosize_instance`
"""
return (
key in ["file_id", "file_unique_id", "width", "height", "file_size"]
and hasattr(self, key)
and bool(getattr(self, key, None))
)
# end def __contains__
# end class PhotoSize
class Animation(Media):
"""
This object represents an animation file (GIF or H.264/MPEG-4 AVC video without sound).
https://core.telegram.org/bots/api#animation
Parameters:
:param file_id: Identifier for this file, which can be used to download or reuse the file
:type file_id: str|unicode
:param file_unique_id: Unique identifier for this file, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file.
:type file_unique_id: str|unicode
:param width: Video width as defined by sender
:type width: int
:param height: Video height as defined by sender
:type height: int
:param duration: Duration of the video in seconds as defined by sender
:type duration: int
Optional keyword parameters:
:param thumb: Optional. Animation thumbnail as defined by sender
:type thumb: pytgbot.api_types.receivable.media.PhotoSize
:param file_name: Optional. Original animation filename as defined by sender
:type file_name: str|unicode
:param mime_type: Optional. MIME type of the file as defined by sender
:type mime_type: str|unicode
:param file_size: Optional. File size
:type file_size: int
:param _raw: Optional. Original data this object was generated from. Could be `None`.
:type _raw: None | dict
"""
def __init__(self, file_id, file_unique_id, width, height, duration, thumb=None, file_name=None, mime_type=None, file_size=None, _raw=None):
"""
This object represents an animation file (GIF or H.264/MPEG-4 AVC video without sound).
https://core.telegram.org/bots/api#animation
Parameters:
:param file_id: Identifier for this file, which can be used to download or reuse the file
:type file_id: str|unicode
:param file_unique_id: Unique identifier for this file, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file.
:type file_unique_id: str|unicode
:param width: Video width as defined by sender
:type width: int
:param height: Video height as defined by sender
:type height: int
:param duration: Duration of the video in seconds as defined by sender
:type duration: int
Optional keyword parameters:
:param thumb: Optional. Animation thumbnail as defined by sender
:type thumb: pytgbot.api_types.receivable.media.PhotoSize
:param file_name: Optional. Original animation filename as defined by sender
:type file_name: str|unicode
:param mime_type: Optional. MIME type of the file as defined by sender
:type mime_type: str|unicode
:param file_size: Optional. File size
:type file_size: int
:param _raw: Optional. Original data this object was generated from. Could be `None`.
:type _raw: None | dict
"""
super(Animation, self).__init__()
assert_type_or_raise(file_id, unicode_type, parameter_name="file_id")
self.file_id = file_id
assert_type_or_raise(file_unique_id, unicode_type, parameter_name="file_unique_id")
self.file_unique_id = file_unique_id
assert_type_or_raise(width, int, parameter_name="width")
self.width = width
assert_type_or_raise(height, int, parameter_name="height")
self.height = height
assert_type_or_raise(duration, int, parameter_name="duration")
self.duration = duration
assert_type_or_raise(thumb, None, PhotoSize, parameter_name="thumb")
self.thumb = thumb
assert_type_or_raise(file_name, None, unicode_type, parameter_name="file_name")
self.file_name = file_name
assert_type_or_raise(mime_type, None, unicode_type, parameter_name="mime_type")
self.mime_type = mime_type
assert_type_or_raise(file_size, None, int, parameter_name="file_size")
self.file_size = file_size
self._raw = _raw
# end def __init__
def to_array(self):
"""
Serializes this Animation to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
array = super(Animation, self).to_array()
array['file_id'] = u(self.file_id) # py2: type unicode, py3: type str
array['file_unique_id'] = u(self.file_unique_id) # py2: type unicode, py3: type str
array['width'] = int(self.width) # type int
array['height'] = int(self.height) # type int
array['duration'] = int(self.duration) # type int
if self.thumb is not None:
array['thumb'] = self.thumb.to_array() # type PhotoSize
if self.file_name is not None:
array['file_name'] = u(self.file_name) # py2: type unicode, py3: type str
if self.mime_type is not None:
array['mime_type'] = u(self.mime_type) # py2: type unicode, py3: type str
if self.file_size is not None:
array['file_size'] = int(self.file_size) # type int
return array
# end def to_array
@staticmethod
def validate_array(array):
"""
Builds a new array with valid values for the Animation constructor.
:return: new array with valid values
:rtype: dict
"""
assert_type_or_raise(array, dict, parameter_name="array")
data = Media.validate_array(array)
data['file_id'] = u(array.get('file_id'))
data['file_unique_id'] = u(array.get('file_unique_id'))
data['width'] = int(array.get('width'))
data['height'] = int(array.get('height'))
data['duration'] = int(array.get('duration'))
data['thumb'] = PhotoSize.from_array(array.get('thumb')) if array.get('thumb') is not None else None
data['file_name'] = u(array.get('file_name')) if array.get('file_name') is not None else None
data['mime_type'] = u(array.get('mime_type')) if array.get('mime_type') is not None else None
data['file_size'] = int(array.get('file_size')) if array.get('file_size') is not None else None
return data
# end def validate_array
@staticmethod
def from_array(array):
"""
Deserialize a new Animation from a given dictionary.
:return: new Animation instance.
:rtype: Animation
"""
if not array: # None or {}
return None
# end if
data = Animation.validate_array(array)
data['_raw'] = array
return Animation(**data)
# end def from_array
def __str__(self):
"""
Implements `str(animation_instance)`
"""
return "Animation(file_id={self.file_id!r}, file_unique_id={self.file_unique_id!r}, width={self.width!r}, height={self.height!r}, duration={self.duration!r}, thumb={self.thumb!r}, file_name={self.file_name!r}, mime_type={self.mime_type!r}, file_size={self.file_size!r})".format(self=self)
# end def __str__
def __repr__(self):
"""
Implements `repr(animation_instance)`
"""
if self._raw:
return "Animation.from_array({self._raw})".format(self=self)
# end if
return "Animation(file_id={self.file_id!r}, file_unique_id={self.file_unique_id!r}, width={self.width!r}, height={self.height!r}, duration={self.duration!r}, thumb={self.thumb!r}, file_name={self.file_name!r}, mime_type={self.mime_type!r}, file_size={self.file_size!r})".format(self=self)
# end def __repr__
def __contains__(self, key):
"""
Implements `"key" in animation_instance`
"""
return (
key in ["file_id", "file_unique_id", "width", "height", "duration", "thumb", "file_name", "mime_type", "file_size"]
and hasattr(self, key)
and bool(getattr(self, key, None))
)
# end def __contains__
# end class Animation
class Audio(Media):
"""
This object represents an audio file to be treated as music by the Telegram clients.
https://core.telegram.org/bots/api#audio
Parameters:
:param file_id: Identifier for this file, which can be used to download or reuse the file
:type file_id: str|unicode
:param file_unique_id: Unique identifier for this file, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file.
:type file_unique_id: str|unicode
:param duration: Duration of the audio in seconds as defined by sender
:type duration: int
Optional keyword parameters:
:param performer: Optional. Performer of the audio as defined by sender or by audio tags
:type performer: str|unicode
:param title: Optional. Title of the audio as defined by sender or by audio tags
:type title: str|unicode
:param mime_type: Optional. MIME type of the file as defined by sender
:type mime_type: str|unicode
:param file_size: Optional. File size
:type file_size: int
:param thumb: Optional. Thumbnail of the album cover to which the music file belongs
:type thumb: pytgbot.api_types.receivable.media.PhotoSize
:param _raw: Optional. Original data this object was generated from. Could be `None`.
:type _raw: None | dict
"""
def __init__(self, file_id, file_unique_id, duration, performer=None, title=None, mime_type=None, file_size=None, thumb=None, _raw=None):
"""
This object represents an audio file to be treated as music by the Telegram clients.
https://core.telegram.org/bots/api#audio
Parameters:
:param file_id: Identifier for this file, which can be used to download or reuse the file
:type file_id: str|unicode
:param file_unique_id: Unique identifier for this file, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file.
:type file_unique_id: str|unicode
:param duration: Duration of the audio in seconds as defined by sender
:type duration: int
Optional keyword parameters:
:param performer: Optional. Performer of the audio as defined by sender or by audio tags
:type performer: str|unicode
:param title: Optional. Title of the audio as defined by sender or by audio tags
:type title: str|unicode
:param mime_type: Optional. MIME type of the file as defined by sender
:type mime_type: str|unicode
:param file_size: Optional. File size
:type file_size: int
:param thumb: Optional. Thumbnail of the album cover to which the music file belongs
:type thumb: pytgbot.api_types.receivable.media.PhotoSize
:param _raw: Optional. Original data this object was generated from. Could be `None`.
:type _raw: None | dict
"""
super(Audio, self).__init__()
assert_type_or_raise(file_id, unicode_type, parameter_name="file_id")
self.file_id = file_id
assert_type_or_raise(file_unique_id, unicode_type, parameter_name="file_unique_id")
self.file_unique_id = file_unique_id
assert_type_or_raise(duration, int, parameter_name="duration")
self.duration = duration
assert_type_or_raise(performer, None, unicode_type, parameter_name="performer")
self.performer = performer
assert_type_or_raise(title, None, unicode_type, parameter_name="title")
self.title = title
assert_type_or_raise(mime_type, None, unicode_type, parameter_name="mime_type")
self.mime_type = mime_type
assert_type_or_raise(file_size, None, int, parameter_name="file_size")
self.file_size = file_size
assert_type_or_raise(thumb, None, PhotoSize, parameter_name="thumb")
self.thumb = thumb
self._raw = _raw
# end def __init__
def to_array(self):
"""
Serializes this Audio to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
array = super(Audio, self).to_array()
array['file_id'] = u(self.file_id) # py2: type unicode, py3: type str
array['file_unique_id'] = u(self.file_unique_id) # py2: type unicode, py3: type str
array['duration'] = int(self.duration) # type int
if self.performer is not None:
array['performer'] = u(self.performer) # py2: type unicode, py3: type str
if self.title is not None:
array['title'] = u(self.title) # py2: type unicode, py3: type str
if self.mime_type is not None:
array['mime_type'] = u(self.mime_type) # py2: type unicode, py3: type str
if self.file_size is not None:
array['file_size'] = int(self.file_size) # type int
if self.thumb is not None:
array['thumb'] = self.thumb.to_array() # type PhotoSize
return array
# end def to_array
@staticmethod
def validate_array(array):
"""
Builds a new array with valid values for the Audio constructor.
:return: new array with valid values
:rtype: dict
"""
assert_type_or_raise(array, dict, parameter_name="array")
data = Media.validate_array(array)
data['file_id'] = u(array.get('file_id'))
data['file_unique_id'] = u(array.get('file_unique_id'))
data['duration'] = int(array.get('duration'))
data['performer'] = u(array.get('performer')) if array.get('performer') is not None else None
data['title'] = u(array.get('title')) if array.get('title') is not None else None
data['mime_type'] = u(array.get('mime_type')) if array.get('mime_type') is not None else None
data['file_size'] = int(array.get('file_size')) if array.get('file_size') is not None else None
data['thumb'] = PhotoSize.from_array(array.get('thumb')) if array.get('thumb') is not None else None
return data
# end def validate_array
@staticmethod
def from_array(array):
"""
Deserialize a new Audio from a given dictionary.
:return: new Audio instance.
:rtype: Audio
"""
if not array: # None or {}
return None
# end if
data = Audio.validate_array(array)
data['_raw'] = array
return Audio(**data)
# end def from_array
def __str__(self):
"""
Implements `str(audio_instance)`
"""
return "Audio(file_id={self.file_id!r}, file_unique_id={self.file_unique_id!r}, duration={self.duration!r}, performer={self.performer!r}, title={self.title!r}, mime_type={self.mime_type!r}, file_size={self.file_size!r}, thumb={self.thumb!r})".format(self=self)
# end def __str__
def __repr__(self):
"""
Implements `repr(audio_instance)`
"""
if self._raw:
return "Audio.from_array({self._raw})".format(self=self)
# end if
return "Audio(file_id={self.file_id!r}, file_unique_id={self.file_unique_id!r}, duration={self.duration!r}, performer={self.performer!r}, title={self.title!r}, mime_type={self.mime_type!r}, file_size={self.file_size!r}, thumb={self.thumb!r})".format(self=self)
# end def __repr__
def __contains__(self, key):
"""
Implements `"key" in audio_instance`
"""
return (
key in ["file_id", "file_unique_id", "duration", "performer", "title", "mime_type", "file_size", "thumb"]
and hasattr(self, key)
and bool(getattr(self, key, None))
)
# end def __contains__
# end class Audio
class Document(Media):
"""
This object represents a general file (as opposed to photos, voice messages and audio files).
https://core.telegram.org/bots/api#document
Parameters:
:param file_id: Identifier for this file, which can be used to download or reuse the file
:type file_id: str|unicode
:param file_unique_id: Unique identifier for this file, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file.
:type file_unique_id: str|unicode
Optional keyword parameters:
:param thumb: Optional. Document thumbnail as defined by sender
:type thumb: pytgbot.api_types.receivable.media.PhotoSize
:param file_name: Optional. Original filename as defined by sender
:type file_name: str|unicode
:param mime_type: Optional. MIME type of the file as defined by sender
:type mime_type: str|unicode
:param file_size: Optional. File size
:type file_size: int
:param _raw: Optional. Original data this object was generated from. Could be `None`.
:type _raw: None | dict
"""
def __init__(self, file_id, file_unique_id, thumb=None, file_name=None, mime_type=None, file_size=None, _raw=None):
"""
This object represents a general file (as opposed to photos, voice messages and audio files).
https://core.telegram.org/bots/api#document
Parameters:
:param file_id: Identifier for this file, which can be used to download or reuse the file
:type file_id: str|unicode
:param file_unique_id: Unique identifier for this file, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file.
:type file_unique_id: str|unicode
Optional keyword parameters:
:param thumb: Optional. Document thumbnail as defined by sender
:type thumb: pytgbot.api_types.receivable.media.PhotoSize
:param file_name: Optional. Original filename as defined by sender
:type file_name: str|unicode
:param mime_type: Optional. MIME type of the file as defined by sender
:type mime_type: str|unicode
:param file_size: Optional. File size
:type file_size: int
:param _raw: Optional. Original data this object was generated from. Could be `None`.
:type _raw: None | dict
"""
super(Document, self).__init__()
assert_type_or_raise(file_id, unicode_type, parameter_name="file_id")
self.file_id = file_id
assert_type_or_raise(file_unique_id, unicode_type, parameter_name="file_unique_id")
self.file_unique_id = file_unique_id
assert_type_or_raise(thumb, None, PhotoSize, parameter_name="thumb")
self.thumb = thumb
assert_type_or_raise(file_name, None, unicode_type, parameter_name="file_name")
self.file_name = file_name
assert_type_or_raise(mime_type, None, unicode_type, parameter_name="mime_type")
self.mime_type = mime_type
assert_type_or_raise(file_size, None, int, parameter_name="file_size")
self.file_size = file_size
self._raw = _raw
# end def __init__
def to_array(self):
"""
Serializes this Document to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
array = super(Document, self).to_array()
array['file_id'] = u(self.file_id) # py2: type unicode, py3: type str
array['file_unique_id'] = u(self.file_unique_id) # py2: type unicode, py3: type str
if self.thumb is not None:
array['thumb'] = self.thumb.to_array() # type PhotoSize
if self.file_name is not None:
array['file_name'] = u(self.file_name) # py2: type unicode, py3: type str
if self.mime_type is not None:
array['mime_type'] = u(self.mime_type) # py2: type unicode, py3: type str
if self.file_size is not None:
array['file_size'] = int(self.file_size) # type int
return array
# end def to_array
@staticmethod
def validate_array(array):
"""
Builds a new array with valid values for the Document constructor.
:return: new array with valid values
:rtype: dict
"""
assert_type_or_raise(array, dict, parameter_name="array")
data = Media.validate_array(array)
data['file_id'] = u(array.get('file_id'))
data['file_unique_id'] = u(array.get('file_unique_id'))
data['thumb'] = PhotoSize.from_array(array.get('thumb')) if array.get('thumb') is not None else None
data['file_name'] = u(array.get('file_name')) if array.get('file_name') is not None else None
data['mime_type'] = u(array.get('mime_type')) if array.get('mime_type') is not None else None
data['file_size'] = int(array.get('file_size')) if array.get('file_size') is not None else None
return data
# end def validate_array
@staticmethod
def from_array(array):
"""
Deserialize a new Document from a given dictionary.
:return: new Document instance.
:rtype: Document
"""
if not array: # None or {}
return None
# end if
data = Document.validate_array(array)
data['_raw'] = array
return Document(**data)
# end def from_array
def __str__(self):
"""
Implements `str(document_instance)`
"""
return "Document(file_id={self.file_id!r}, file_unique_id={self.file_unique_id!r}, thumb={self.thumb!r}, file_name={self.file_name!r}, mime_type={self.mime_type!r}, file_size={self.file_size!r})".format(self=self)
# end def __str__
def __repr__(self):
"""
Implements `repr(document_instance)`
"""
if self._raw:
return "Document.from_array({self._raw})".format(self=self)
# end if
return "Document(file_id={self.file_id!r}, file_unique_id={self.file_unique_id!r}, thumb={self.thumb!r}, file_name={self.file_name!r}, mime_type={self.mime_type!r}, file_size={self.file_size!r})".format(self=self)
# end def __repr__
def __contains__(self, key):
"""
Implements `"key" in document_instance`
"""
return (
key in ["file_id", "file_unique_id", "thumb", "file_name", "mime_type", "file_size"]
and hasattr(self, key)
and bool(getattr(self, key, None))
)
# end def __contains__
# end class Document
class Video(Media):
"""
This object represents a video file.
https://core.telegram.org/bots/api#video
Parameters:
:param file_id: Identifier for this file, which can be used to download or reuse the file
:type file_id: str|unicode
:param file_unique_id: Unique identifier for this file, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file.
:type file_unique_id: str|unicode
:param width: Video width as defined by sender
:type width: int
:param height: Video height as defined by sender
:type height: int
:param duration: Duration of the video in seconds as defined by sender
:type duration: int
Optional keyword parameters:
:param thumb: Optional. Video thumbnail
:type thumb: pytgbot.api_types.receivable.media.PhotoSize
:param mime_type: Optional. Mime type of a file as defined by sender
:type mime_type: str|unicode
:param file_size: Optional. File size
:type file_size: int
:param _raw: Optional. Original data this object was generated from. Could be `None`.
:type _raw: None | dict
"""
def __init__(self, file_id, file_unique_id, width, height, duration, thumb=None, mime_type=None, file_size=None, _raw=None):
"""
This object represents a video file.
https://core.telegram.org/bots/api#video
Parameters:
:param file_id: Identifier for this file, which can be used to download or reuse the file
:type file_id: str|unicode
:param file_unique_id: Unique identifier for this file, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file.
:type file_unique_id: str|unicode
:param width: Video width as defined by sender
:type width: int
:param height: Video height as defined by sender
:type height: int
:param duration: Duration of the video in seconds as defined by sender
:type duration: int
Optional keyword parameters:
:param thumb: Optional. Video thumbnail
:type thumb: pytgbot.api_types.receivable.media.PhotoSize
:param mime_type: Optional. Mime type of a file as defined by sender
:type mime_type: str|unicode
:param file_size: Optional. File size
:type file_size: int
:param _raw: Optional. Original data this object was generated from. Could be `None`.
:type _raw: None | dict
"""
super(Video, self).__init__()
assert_type_or_raise(file_id, unicode_type, parameter_name="file_id")
self.file_id = file_id
assert_type_or_raise(file_unique_id, unicode_type, parameter_name="file_unique_id")
self.file_unique_id = file_unique_id
assert_type_or_raise(width, int, parameter_name="width")
self.width = width
assert_type_or_raise(height, int, parameter_name="height")
self.height = height
assert_type_or_raise(duration, int, parameter_name="duration")
self.duration = duration
assert_type_or_raise(thumb, None, PhotoSize, parameter_name="thumb")
self.thumb = thumb
assert_type_or_raise(mime_type, None, unicode_type, parameter_name="mime_type")
self.mime_type = mime_type
assert_type_or_raise(file_size, None, int, parameter_name="file_size")
self.file_size = file_size
self._raw = _raw
# end def __init__
def to_array(self):
"""
Serializes this Video to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
array = super(Video, self).to_array()
array['file_id'] = u(self.file_id) # py2: type unicode, py3: type str
array['file_unique_id'] = u(self.file_unique_id) # py2: type unicode, py3: type str
array['width'] = int(self.width) # type int
array['height'] = int(self.height) # type int
array['duration'] = int(self.duration) # type int
if self.thumb is not None:
array['thumb'] = self.thumb.to_array() # type PhotoSize
if self.mime_type is not None:
array['mime_type'] = u(self.mime_type) # py2: type unicode, py3: type str
if self.file_size is not None:
array['file_size'] = int(self.file_size) # type int
return array
# end def to_array
@staticmethod
def validate_array(array):
"""
Builds a new array with valid values for the Video constructor.
:return: new array with valid values
:rtype: dict
"""
assert_type_or_raise(array, dict, parameter_name="array")
data = Media.validate_array(array)
data['file_id'] = u(array.get('file_id'))
data['file_unique_id'] = u(array.get('file_unique_id'))
data['width'] = int(array.get('width'))
data['height'] = int(array.get('height'))
data['duration'] = int(array.get('duration'))
data['thumb'] = PhotoSize.from_array(array.get('thumb')) if array.get('thumb') is not None else None
data['mime_type'] = u(array.get('mime_type')) if array.get('mime_type') is not None else None
data['file_size'] = int(array.get('file_size')) if array.get('file_size') is not None else None
return data
# end def validate_array
@staticmethod
def from_array(array):
"""
Deserialize a new Video from a given dictionary.
:return: new Video instance.
:rtype: Video
"""
if not array: # None or {}
return None
# end if
data = Video.validate_array(array)
data['_raw'] = array
return Video(**data)
# end def from_array
def __str__(self):
"""
Implements `str(video_instance)`
"""
return "Video(file_id={self.file_id!r}, file_unique_id={self.file_unique_id!r}, width={self.width!r}, height={self.height!r}, duration={self.duration!r}, thumb={self.thumb!r}, mime_type={self.mime_type!r}, file_size={self.file_size!r})".format(self=self)
# end def __str__
def __repr__(self):
"""
Implements `repr(video_instance)`
"""
if self._raw:
return "Video.from_array({self._raw})".format(self=self)
# end if
return "Video(file_id={self.file_id!r}, file_unique_id={self.file_unique_id!r}, width={self.width!r}, height={self.height!r}, duration={self.duration!r}, thumb={self.thumb!r}, mime_type={self.mime_type!r}, file_size={self.file_size!r})".format(self=self)
# end def __repr__
def __contains__(self, key):
"""
Implements `"key" in video_instance`
"""
return (
key in ["file_id", "file_unique_id", "width", "height", "duration", "thumb", "mime_type", "file_size"]
and hasattr(self, key)
and bool(getattr(self, key, None))
)
# end def __contains__
# end class Video
class VideoNote(Media):
"""
This object represents a video message (available in Telegram apps as of v.4.0).
https://core.telegram.org/bots/api#videonote
Parameters:
:param file_id: Identifier for this file, which can be used to download or reuse the file
:type file_id: str|unicode
:param file_unique_id: Unique identifier for this file, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file.
:type file_unique_id: str|unicode
:param length: Video width and height (diameter of the video message) as defined by sender
:type length: int
:param duration: Duration of the video in seconds as defined by sender
:type duration: int
Optional keyword parameters:
:param thumb: Optional. Video thumbnail
:type thumb: pytgbot.api_types.receivable.media.PhotoSize
:param file_size: Optional. File size
:type file_size: int
:param _raw: Optional. Original data this object was generated from. Could be `None`.
:type _raw: None | dict
"""
def __init__(self, file_id, file_unique_id, length, duration, thumb=None, file_size=None, _raw=None):
"""
This object represents a video message (available in Telegram apps as of v.4.0).
https://core.telegram.org/bots/api#videonote
Parameters:
:param file_id: Identifier for this file, which can be used to download or reuse the file
:type file_id: str|unicode
:param file_unique_id: Unique identifier for this file, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file.
:type file_unique_id: str|unicode
:param length: Video width and height (diameter of the video message) as defined by sender
:type length: int
:param duration: Duration of the video in seconds as defined by sender
:type duration: int
Optional keyword parameters:
:param thumb: Optional. Video thumbnail
:type thumb: pytgbot.api_types.receivable.media.PhotoSize
:param file_size: Optional. File size
:type file_size: int
:param _raw: Optional. Original data this object was generated from. Could be `None`.
:type _raw: None | dict
"""
super(VideoNote, self).__init__()
assert_type_or_raise(file_id, unicode_type, parameter_name="file_id")
self.file_id = file_id
assert_type_or_raise(file_unique_id, unicode_type, parameter_name="file_unique_id")
self.file_unique_id = file_unique_id
assert_type_or_raise(length, int, parameter_name="length")
self.length = length
assert_type_or_raise(duration, int, parameter_name="duration")
self.duration = duration
assert_type_or_raise(thumb, None, PhotoSize, parameter_name="thumb")
self.thumb = thumb
assert_type_or_raise(file_size, None, int, parameter_name="file_size")
self.file_size = file_size
self._raw = _raw
# end def __init__
def to_array(self):
"""
Serializes this VideoNote to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
array = super(VideoNote, self).to_array()
array['file_id'] = u(self.file_id) # py2: type unicode, py3: type str
array['file_unique_id'] = u(self.file_unique_id) # py2: type unicode, py3: type str
array['length'] = int(self.length) # type int
array['duration'] = int(self.duration) # type int
if self.thumb is not None:
array['thumb'] = self.thumb.to_array() # type PhotoSize
if self.file_size is not None:
array['file_size'] = int(self.file_size) # type int
return array
# end def to_array
@staticmethod
def validate_array(array):
"""
Builds a new array with valid values for the VideoNote constructor.
:return: new array with valid values
:rtype: dict
"""
assert_type_or_raise(array, dict, parameter_name="array")
data = Media.validate_array(array)
data['file_id'] = u(array.get('file_id'))
data['file_unique_id'] = u(array.get('file_unique_id'))
data['length'] = int(array.get('length'))
data['duration'] = int(array.get('duration'))
data['thumb'] = PhotoSize.from_array(array.get('thumb')) if array.get('thumb') is not None else None
data['file_size'] = int(array.get('file_size')) if array.get('file_size') is not None else None
return data
# end def validate_array
@staticmethod
def from_array(array):
"""
Deserialize a new VideoNote from a given dictionary.
:return: new VideoNote instance.
:rtype: VideoNote
"""
if not array: # None or {}
return None
# end if
data = VideoNote.validate_array(array)
data['_raw'] = array
return VideoNote(**data)
# end def from_array
def __str__(self):
"""
Implements `str(videonote_instance)`
"""
return "VideoNote(file_id={self.file_id!r}, file_unique_id={self.file_unique_id!r}, length={self.length!r}, duration={self.duration!r}, thumb={self.thumb!r}, file_size={self.file_size!r})".format(self=self)
# end def __str__
def __repr__(self):
"""
Implements `repr(videonote_instance)`
"""
if self._raw:
return "VideoNote.from_array({self._raw})".format(self=self)
# end if
return "VideoNote(file_id={self.file_id!r}, file_unique_id={self.file_unique_id!r}, length={self.length!r}, duration={self.duration!r}, thumb={self.thumb!r}, file_size={self.file_size!r})".format(self=self)
# end def __repr__
def __contains__(self, key):
"""
Implements `"key" in videonote_instance`
"""
return (
key in ["file_id", "file_unique_id", "length", "duration", "thumb", "file_size"]
and hasattr(self, key)
and bool(getattr(self, key, None))
)
# end def __contains__
# end class VideoNote
class Voice(Media):
"""
This object represents a voice note.
https://core.telegram.org/bots/api#voice
Parameters:
:param file_id: Identifier for this file, which can be used to download or reuse the file
:type file_id: str|unicode
:param file_unique_id: Unique identifier for this file, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file.
:type file_unique_id: str|unicode
:param duration: Duration of the audio in seconds as defined by sender
:type duration: int
Optional keyword parameters:
:param mime_type: Optional. MIME type of the file as defined by sender
:type mime_type: str|unicode
:param file_size: Optional. File size
:type file_size: int
:param _raw: Optional. Original data this object was generated from. Could be `None`.
:type _raw: None | dict
"""
def __init__(self, file_id, file_unique_id, duration, mime_type=None, file_size=None, _raw=None):
"""
This object represents a voice note.
https://core.telegram.org/bots/api#voice
Parameters:
:param file_id: Identifier for this file, which can be used to download or reuse the file
:type file_id: str|unicode
:param file_unique_id: Unique identifier for this file, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file.
:type file_unique_id: str|unicode
:param duration: Duration of the audio in seconds as defined by sender
:type duration: int
Optional keyword parameters:
:param mime_type: Optional. MIME type of the file as defined by sender
:type mime_type: str|unicode
:param file_size: Optional. File size
:type file_size: int
:param _raw: Optional. Original data this object was generated from. Could be `None`.
:type _raw: None | dict
"""
super(Voice, self).__init__()
assert_type_or_raise(file_id, unicode_type, parameter_name="file_id")
self.file_id = file_id
assert_type_or_raise(file_unique_id, unicode_type, parameter_name="file_unique_id")
self.file_unique_id = file_unique_id
assert_type_or_raise(duration, int, parameter_name="duration")
self.duration = duration
assert_type_or_raise(mime_type, None, unicode_type, parameter_name="mime_type")
self.mime_type = mime_type
assert_type_or_raise(file_size, None, int, parameter_name="file_size")
self.file_size = file_size
self._raw = _raw
# end def __init__
def to_array(self):
"""
Serializes this Voice to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
array = super(Voice, self).to_array()
array['file_id'] = u(self.file_id) # py2: type unicode, py3: type str
array['file_unique_id'] = u(self.file_unique_id) # py2: type unicode, py3: type str
array['duration'] = int(self.duration) # type int
if self.mime_type is not None:
array['mime_type'] = u(self.mime_type) # py2: type unicode, py3: type str
if self.file_size is not None:
array['file_size'] = int(self.file_size) # type int
return array
# end def to_array
@staticmethod
def validate_array(array):
"""
Builds a new array with valid values for the Voice constructor.
:return: new array with valid values
:rtype: dict
"""
assert_type_or_raise(array, dict, parameter_name="array")
data = Media.validate_array(array)
data['file_id'] = u(array.get('file_id'))
data['file_unique_id'] = u(array.get('file_unique_id'))
data['duration'] = int(array.get('duration'))
data['mime_type'] = u(array.get('mime_type')) if array.get('mime_type') is not None else None
data['file_size'] = int(array.get('file_size')) if array.get('file_size') is not None else None
return data
# end def validate_array
@staticmethod
def from_array(array):
"""
Deserialize a new Voice from a given dictionary.
:return: new Voice instance.
:rtype: Voice
"""
if not array: # None or {}
return None
# end if
data = Voice.validate_array(array)
data['_raw'] = array
return Voice(**data)
# end def from_array
def __str__(self):
"""
Implements `str(voice_instance)`
"""
return "Voice(file_id={self.file_id!r}, file_unique_id={self.file_unique_id!r}, duration={self.duration!r}, mime_type={self.mime_type!r}, file_size={self.file_size!r})".format(self=self)
# end def __str__
def __repr__(self):
"""
Implements `repr(voice_instance)`
"""
if self._raw:
return "Voice.from_array({self._raw})".format(self=self)
# end if
return "Voice(file_id={self.file_id!r}, file_unique_id={self.file_unique_id!r}, duration={self.duration!r}, mime_type={self.mime_type!r}, file_size={self.file_size!r})".format(self=self)
# end def __repr__
def __contains__(self, key):
"""
Implements `"key" in voice_instance`
"""
return (
key in ["file_id", "file_unique_id", "duration", "mime_type", "file_size"]
and hasattr(self, key)
and bool(getattr(self, key, None))
)
# end def __contains__
# end class Voice
class Contact(Media):
"""
This object represents a phone contact.
https://core.telegram.org/bots/api#contact
Parameters:
:param phone_number: Contact's phone number
:type phone_number: str|unicode
:param first_name: Contact's first name
:type first_name: str|unicode
Optional keyword parameters:
:param last_name: Optional. Contact's last name
:type last_name: str|unicode
:param user_id: Optional. Contact's user identifier in Telegram
:type user_id: int
:param vcard: Optional. Additional data about the contact in the form of a vCard
:type vcard: str|unicode
:param _raw: Optional. Original data this object was generated from. Could be `None`.
:type _raw: None | dict
"""
def __init__(self, phone_number, first_name, last_name=None, user_id=None, vcard=None, _raw=None):
"""
This object represents a phone contact.
https://core.telegram.org/bots/api#contact
Parameters:
:param phone_number: Contact's phone number
:type phone_number: str|unicode
:param first_name: Contact's first name
:type first_name: str|unicode
Optional keyword parameters:
:param last_name: Optional. Contact's last name
:type last_name: str|unicode
:param user_id: Optional. Contact's user identifier in Telegram
:type user_id: int
:param vcard: Optional. Additional data about the contact in the form of a vCard
:type vcard: str|unicode
:param _raw: Optional. Original data this object was generated from. Could be `None`.
:type _raw: None | dict
"""
super(Contact, self).__init__()
assert_type_or_raise(phone_number, unicode_type, parameter_name="phone_number")
self.phone_number = phone_number
assert_type_or_raise(first_name, unicode_type, parameter_name="first_name")
self.first_name = first_name
assert_type_or_raise(last_name, None, unicode_type, parameter_name="last_name")
self.last_name = last_name
assert_type_or_raise(user_id, None, int, parameter_name="user_id")
self.user_id = user_id
assert_type_or_raise(vcard, None, unicode_type, parameter_name="vcard")
self.vcard = vcard
self._raw = _raw
# end def __init__
def to_array(self):
"""
Serializes this Contact to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
array = super(Contact, self).to_array()
array['phone_number'] = u(self.phone_number) # py2: type unicode, py3: type str
array['first_name'] = u(self.first_name) # py2: type unicode, py3: type str
if self.last_name is not None:
array['last_name'] = u(self.last_name) # py2: type unicode, py3: type str
if self.user_id is not None:
array['user_id'] = int(self.user_id) # type int
if self.vcard is not None:
array['vcard'] = u(self.vcard) # py2: type unicode, py3: type str
return array
# end def to_array
@staticmethod
def validate_array(array):
"""
Builds a new array with valid values for the Contact constructor.
:return: new array with valid values
:rtype: dict
"""
assert_type_or_raise(array, dict, parameter_name="array")
data = Media.validate_array(array)
data['phone_number'] = u(array.get('phone_number'))
data['first_name'] = u(array.get('first_name'))
data['last_name'] = u(array.get('last_name')) if array.get('last_name') is not None else None
data['user_id'] = int(array.get('user_id')) if array.get('user_id') is not None else None
data['vcard'] = u(array.get('vcard')) if array.get('vcard') is not None else None
return data
# end def validate_array
@staticmethod
def from_array(array):
"""
Deserialize a new Contact from a given dictionary.
:return: new Contact instance.
:rtype: Contact
"""
if not array: # None or {}
return None
# end if
data = Contact.validate_array(array)
data['_raw'] = array
return Contact(**data)
# end def from_array
def __str__(self):
"""
Implements `str(contact_instance)`
"""
return "Contact(phone_number={self.phone_number!r}, first_name={self.first_name!r}, last_name={self.last_name!r}, user_id={self.user_id!r}, vcard={self.vcard!r})".format(self=self)
# end def __str__
def __repr__(self):
"""
Implements `repr(contact_instance)`
"""
if self._raw:
return "Contact.from_array({self._raw})".format(self=self)
# end if
return "Contact(phone_number={self.phone_number!r}, first_name={self.first_name!r}, last_name={self.last_name!r}, user_id={self.user_id!r}, vcard={self.vcard!r})".format(self=self)
# end def __repr__
def __contains__(self, key):
"""
Implements `"key" in contact_instance`
"""
return (
key in ["phone_number", "first_name", "last_name", "user_id", "vcard"]
and hasattr(self, key)
and bool(getattr(self, key, None))
)
# end def __contains__
# end class Contact
class Dice(Media):
"""
This object represents an animated emoji that displays a random value.
https://core.telegram.org/bots/api#dice
Parameters:
:param emoji: Emoji on which the dice throw animation is based
:type emoji: str|unicode
:param value: Value of the dice, 1-6 for "" and "" base emoji, 1-5 for "" base emoji
:type value: int
Optional keyword parameters:
:param _raw: Optional. Original data this object was generated from. Could be `None`.
:type _raw: None | dict
"""
def __init__(self, emoji, value, _raw=None):
"""
This object represents an animated emoji that displays a random value.
https://core.telegram.org/bots/api#dice
Parameters:
:param emoji: Emoji on which the dice throw animation is based
:type emoji: str|unicode
:param value: Value of the dice, 1-6 for "" and "" base emoji, 1-5 for "" base emoji
:type value: int
Optional keyword parameters:
:param _raw: Optional. Original data this object was generated from. Could be `None`.
:type _raw: None | dict
"""
super(Dice, self).__init__()
assert_type_or_raise(emoji, unicode_type, parameter_name="emoji")
self.emoji = emoji
assert_type_or_raise(value, int, parameter_name="value")
self.value = value
self._raw = _raw
# end def __init__
def to_array(self):
"""
Serializes this Dice to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
array = super(Dice, self).to_array()
array['emoji'] = u(self.emoji) # py2: type unicode, py3: type str
array['value'] = int(self.value) # type int
return array
# end def to_array
@staticmethod
def validate_array(array):
"""
Builds a new array with valid values for the Dice constructor.
:return: new array with valid values
:rtype: dict
"""
assert_type_or_raise(array, dict, parameter_name="array")
data = Media.validate_array(array)
data['emoji'] = u(array.get('emoji'))
data['value'] = int(array.get('value'))
return data
# end def validate_array
@staticmethod
def from_array(array):
"""
Deserialize a new Dice from a given dictionary.
:return: new Dice instance.
:rtype: Dice
"""
if not array: # None or {}
return None
# end if
data = Dice.validate_array(array)
data['_raw'] = array
return Dice(**data)
# end def from_array
def __str__(self):
"""
Implements `str(dice_instance)`
"""
return "Dice(emoji={self.emoji!r}, value={self.value!r})".format(self=self)
# end def __str__
def __repr__(self):
"""
Implements `repr(dice_instance)`
"""
if self._raw:
return "Dice.from_array({self._raw})".format(self=self)
# end if
return "Dice(emoji={self.emoji!r}, value={self.value!r})".format(self=self)
# end def __repr__
def __contains__(self, key):
"""
Implements `"key" in dice_instance`
"""
return (
key in ["emoji", "value"]
and hasattr(self, key)
and bool(getattr(self, key, None))
)
# end def __contains__
# end class Dice
class PollOption(Receivable):
"""
This object contains information about one answer option in a poll.
https://core.telegram.org/bots/api#polloption
Parameters:
:param text: Option text, 1-100 characters
:type text: str|unicode
:param voter_count: Number of users that voted for this option
:type voter_count: int
Optional keyword parameters:
:param _raw: Optional. Original data this object was generated from. Could be `None`.
:type _raw: None | dict
"""
def __init__(self, text, voter_count, _raw=None):
"""
This object contains information about one answer option in a poll.
https://core.telegram.org/bots/api#polloption
Parameters:
:param text: Option text, 1-100 characters
:type text: str|unicode
:param voter_count: Number of users that voted for this option
:type voter_count: int
Optional keyword parameters:
:param _raw: Optional. Original data this object was generated from. Could be `None`.
:type _raw: None | dict
"""
super(PollOption, self).__init__()
assert_type_or_raise(text, unicode_type, parameter_name="text")
self.text = text
assert_type_or_raise(voter_count, int, parameter_name="voter_count")
self.voter_count = voter_count
self._raw = _raw
# end def __init__
def to_array(self):
"""
Serializes this PollOption to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
array = super(PollOption, self).to_array()
array['text'] = u(self.text) # py2: type unicode, py3: type str
array['voter_count'] = int(self.voter_count) # type int
return array
# end def to_array
@staticmethod
def validate_array(array):
"""
Builds a new array with valid values for the PollOption constructor.
:return: new array with valid values
:rtype: dict
"""
assert_type_or_raise(array, dict, parameter_name="array")
data = Receivable.validate_array(array)
data['text'] = u(array.get('text'))
data['voter_count'] = int(array.get('voter_count'))
return data
# end def validate_array
@staticmethod
def from_array(array):
"""
Deserialize a new PollOption from a given dictionary.
:return: new PollOption instance.
:rtype: PollOption
"""
if not array: # None or {}
return None
# end if
data = PollOption.validate_array(array)
data['_raw'] = array
return PollOption(**data)
# end def from_array
def __str__(self):
"""
Implements `str(polloption_instance)`
"""
return "PollOption(text={self.text!r}, voter_count={self.voter_count!r})".format(self=self)
# end def __str__
def __repr__(self):
"""
Implements `repr(polloption_instance)`
"""
if self._raw:
return "PollOption.from_array({self._raw})".format(self=self)
# end if
return "PollOption(text={self.text!r}, voter_count={self.voter_count!r})".format(self=self)
# end def __repr__
def __contains__(self, key):
"""
Implements `"key" in polloption_instance`
"""
return (
key in ["text", "voter_count"]
and hasattr(self, key)
and bool(getattr(self, key, None))
)
# end def __contains__
# end class PollOption
class PollAnswer(Receivable):
"""
This object represents an answer of a user in a non-anonymous poll.
https://core.telegram.org/bots/api#pollanswer
Parameters:
:param poll_id: Unique poll identifier
:type poll_id: str|unicode
:param user: The user, who changed the answer to the poll
:type user: pytgbot.api_types.receivable.peer.User
:param option_ids: 0-based identifiers of answer options, chosen by the user. May be empty if the user retracted their vote.
:type option_ids: list of int
Optional keyword parameters:
:param _raw: Optional. Original data this object was generated from. Could be `None`.
:type _raw: None | dict
"""
def __init__(self, poll_id, user, option_ids, _raw=None):
"""
This object represents an answer of a user in a non-anonymous poll.
https://core.telegram.org/bots/api#pollanswer
Parameters:
:param poll_id: Unique poll identifier
:type poll_id: str|unicode
:param user: The user, who changed the answer to the poll
:type user: pytgbot.api_types.receivable.peer.User
:param option_ids: 0-based identifiers of answer options, chosen by the user. May be empty if the user retracted their vote.
:type option_ids: list of int
Optional keyword parameters:
:param _raw: Optional. Original data this object was generated from. Could be `None`.
:type _raw: None | dict
"""
super(PollAnswer, self).__init__()
from .peer import User
assert_type_or_raise(poll_id, unicode_type, parameter_name="poll_id")
self.poll_id = poll_id
assert_type_or_raise(user, User, parameter_name="user")
self.user = user
assert_type_or_raise(option_ids, list, parameter_name="option_ids")
self.option_ids = option_ids
self._raw = _raw
# end def __init__
def to_array(self):
"""
Serializes this PollAnswer to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
array = super(PollAnswer, self).to_array()
array['poll_id'] = u(self.poll_id) # py2: type unicode, py3: type str
array['user'] = self.user.to_array() # type User
array['option_ids'] = self._as_array(self.option_ids) # type list of int
return array
# end def to_array
@staticmethod
def validate_array(array):
"""
Builds a new array with valid values for the PollAnswer constructor.
:return: new array with valid values
:rtype: dict
"""
assert_type_or_raise(array, dict, parameter_name="array")
from .peer import User
data = Receivable.validate_array(array)
data['poll_id'] = u(array.get('poll_id'))
data['user'] = User.from_array(array.get('user'))
data['option_ids'] = PollAnswer._builtin_from_array_list(required_type=int, value=array.get('option_ids'), list_level=1)
return data
# end def validate_array
@staticmethod
def from_array(array):
"""
Deserialize a new PollAnswer from a given dictionary.
:return: new PollAnswer instance.
:rtype: PollAnswer
"""
if not array: # None or {}
return None
# end if
data = PollAnswer.validate_array(array)
data['_raw'] = array
return PollAnswer(**data)
# end def from_array
def __str__(self):
"""
Implements `str(pollanswer_instance)`
"""
return "PollAnswer(poll_id={self.poll_id!r}, user={self.user!r}, option_ids={self.option_ids!r})".format(self=self)
# end def __str__
def __repr__(self):
"""
Implements `repr(pollanswer_instance)`
"""
if self._raw:
return "PollAnswer.from_array({self._raw})".format(self=self)
# end if
return "PollAnswer(poll_id={self.poll_id!r}, user={self.user!r}, option_ids={self.option_ids!r})".format(self=self)
# end def __repr__
def __contains__(self, key):
"""
Implements `"key" in pollanswer_instance`
"""
return (
key in ["poll_id", "user", "option_ids"]
and hasattr(self, key)
and bool(getattr(self, key, None))
)
# end def __contains__
# end class PollAnswer
class Poll(Media):
"""
This object contains information about a poll.
https://core.telegram.org/bots/api#poll
Parameters:
:param id: Unique poll identifier
:type id: str|unicode
:param question: Poll question, 1-255 characters
:type question: str|unicode
:param options: List of poll options
:type options: list of pytgbot.api_types.receivable.media.PollOption
:param total_voter_count: Total number of users that voted in the poll
:type total_voter_count: int
:param is_closed: True, if the poll is closed
:type is_closed: bool
:param is_anonymous: True, if the poll is anonymous
:type is_anonymous: bool
:param type: Poll type, currently can be "regular" or "quiz"
:type type: str|unicode
:param allows_multiple_answers: True, if the poll allows multiple answers
:type allows_multiple_answers: bool
Optional keyword parameters:
:param correct_option_id: Optional. 0-based identifier of the correct answer option. Available only for polls in the quiz mode, which are closed, or was sent (not forwarded) by the bot or to the private chat with the bot.
:type correct_option_id: int
:param explanation: Optional. Text that is shown when a user chooses an incorrect answer or taps on the lamp icon in a quiz-style poll, 0-200 characters
:type explanation: str|unicode
:param explanation_entities: Optional. Special entities like usernames, URLs, bot commands, etc. that appear in the explanation
:type explanation_entities: list of pytgbot.api_types.receivable.media.MessageEntity
:param open_period: Optional. Amount of time in seconds the poll will be active after creation
:type open_period: int
:param close_date: Optional. Point in time (Unix timestamp) when the poll will be automatically closed
:type close_date: int
:param _raw: Optional. Original data this object was generated from. Could be `None`.
:type _raw: None | dict
"""
def __init__(self, id, question, options, total_voter_count, is_closed, is_anonymous, type, allows_multiple_answers, correct_option_id=None, explanation=None, explanation_entities=None, open_period=None, close_date=None, _raw=None):
"""
This object contains information about a poll.
https://core.telegram.org/bots/api#poll
Parameters:
:param id: Unique poll identifier
:type id: str|unicode
:param question: Poll question, 1-255 characters
:type question: str|unicode
:param options: List of poll options
:type options: list of pytgbot.api_types.receivable.media.PollOption
:param total_voter_count: Total number of users that voted in the poll
:type total_voter_count: int
:param is_closed: True, if the poll is closed
:type is_closed: bool
:param is_anonymous: True, if the poll is anonymous
:type is_anonymous: bool
:param type: Poll type, currently can be "regular" or "quiz"
:type type: str|unicode
:param allows_multiple_answers: True, if the poll allows multiple answers
:type allows_multiple_answers: bool
Optional keyword parameters:
:param correct_option_id: Optional. 0-based identifier of the correct answer option. Available only for polls in the quiz mode, which are closed, or was sent (not forwarded) by the bot or to the private chat with the bot.
:type correct_option_id: int
:param explanation: Optional. Text that is shown when a user chooses an incorrect answer or taps on the lamp icon in a quiz-style poll, 0-200 characters
:type explanation: str|unicode
:param explanation_entities: Optional. Special entities like usernames, URLs, bot commands, etc. that appear in the explanation
:type explanation_entities: list of pytgbot.api_types.receivable.media.MessageEntity
:param open_period: Optional. Amount of time in seconds the poll will be active after creation
:type open_period: int
:param close_date: Optional. Point in time (Unix timestamp) when the poll will be automatically closed
:type close_date: int
:param _raw: Optional. Original data this object was generated from. Could be `None`.
:type _raw: None | dict
"""
super(Poll, self).__init__()
assert_type_or_raise(id, unicode_type, parameter_name="id")
self.id = id
assert_type_or_raise(question, unicode_type, parameter_name="question")
self.question = question
assert_type_or_raise(options, list, parameter_name="options")
self.options = options
assert_type_or_raise(total_voter_count, int, parameter_name="total_voter_count")
self.total_voter_count = total_voter_count
assert_type_or_raise(is_closed, bool, parameter_name="is_closed")
self.is_closed = is_closed
assert_type_or_raise(is_anonymous, bool, parameter_name="is_anonymous")
self.is_anonymous = is_anonymous
assert_type_or_raise(type, unicode_type, parameter_name="type")
self.type = type
assert_type_or_raise(allows_multiple_answers, bool, parameter_name="allows_multiple_answers")
self.allows_multiple_answers = allows_multiple_answers
assert_type_or_raise(correct_option_id, None, int, parameter_name="correct_option_id")
self.correct_option_id = correct_option_id
assert_type_or_raise(explanation, None, unicode_type, parameter_name="explanation")
self.explanation = explanation
assert_type_or_raise(explanation_entities, None, list, parameter_name="explanation_entities")
self.explanation_entities = explanation_entities
assert_type_or_raise(open_period, None, int, parameter_name="open_period")
self.open_period = open_period
assert_type_or_raise(close_date, None, int, parameter_name="close_date")
self.close_date = close_date
self._raw = _raw
# end def __init__
def to_array(self):
"""
Serializes this Poll to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
array = super(Poll, self).to_array()
array['id'] = u(self.id) # py2: type unicode, py3: type str
array['question'] = u(self.question) # py2: type unicode, py3: type str
array['options'] = self._as_array(self.options) # type list of PollOption
array['total_voter_count'] = int(self.total_voter_count) # type int
array['is_closed'] = bool(self.is_closed) # type bool
array['is_anonymous'] = bool(self.is_anonymous) # type bool
array['type'] = u(self.type) # py2: type unicode, py3: type str
array['allows_multiple_answers'] = bool(self.allows_multiple_answers) # type bool
if self.correct_option_id is not None:
array['correct_option_id'] = int(self.correct_option_id) # type int
if self.explanation is not None:
array['explanation'] = u(self.explanation) # py2: type unicode, py3: type str
if self.explanation_entities is not None:
array['explanation_entities'] = self._as_array(self.explanation_entities) # type list of MessageEntity
if self.open_period is not None:
array['open_period'] = int(self.open_period) # type int
if self.close_date is not None:
array['close_date'] = int(self.close_date) # type int
return array
# end def to_array
@staticmethod
def validate_array(array):
"""
Builds a new array with valid values for the Poll constructor.
:return: new array with valid values
:rtype: dict
"""
assert_type_or_raise(array, dict, parameter_name="array")
data = Media.validate_array(array)
data['id'] = u(array.get('id'))
data['question'] = u(array.get('question'))
data['options'] = PollOption.from_array_list(array.get('options'), list_level=1)
data['total_voter_count'] = int(array.get('total_voter_count'))
data['is_closed'] = bool(array.get('is_closed'))
data['is_anonymous'] = bool(array.get('is_anonymous'))
data['type'] = u(array.get('type'))
data['allows_multiple_answers'] = bool(array.get('allows_multiple_answers'))
data['correct_option_id'] = int(array.get('correct_option_id')) if array.get('correct_option_id') is not None else None
data['explanation'] = u(array.get('explanation')) if array.get('explanation') is not None else None
data['explanation_entities'] = MessageEntity.from_array_list(array.get('explanation_entities'), list_level=1) if array.get('explanation_entities') is not None else None
data['open_period'] = int(array.get('open_period')) if array.get('open_period') is not None else None
data['close_date'] = int(array.get('close_date')) if array.get('close_date') is not None else None
return data
# end def validate_array
@staticmethod
def from_array(array):
"""
Deserialize a new Poll from a given dictionary.
:return: new Poll instance.
:rtype: Poll
"""
if not array: # None or {}
return None
# end if
data = Poll.validate_array(array)
data['_raw'] = array
return Poll(**data)
# end def from_array
def __str__(self):
"""
Implements `str(poll_instance)`
"""
return "Poll(id={self.id!r}, question={self.question!r}, options={self.options!r}, total_voter_count={self.total_voter_count!r}, is_closed={self.is_closed!r}, is_anonymous={self.is_anonymous!r}, type={self.type!r}, allows_multiple_answers={self.allows_multiple_answers!r}, correct_option_id={self.correct_option_id!r}, explanation={self.explanation!r}, explanation_entities={self.explanation_entities!r}, open_period={self.open_period!r}, close_date={self.close_date!r})".format(self=self)
# end def __str__
def __repr__(self):
"""
Implements `repr(poll_instance)`
"""
if self._raw:
return "Poll.from_array({self._raw})".format(self=self)
# end if
return "Poll(id={self.id!r}, question={self.question!r}, options={self.options!r}, total_voter_count={self.total_voter_count!r}, is_closed={self.is_closed!r}, is_anonymous={self.is_anonymous!r}, type={self.type!r}, allows_multiple_answers={self.allows_multiple_answers!r}, correct_option_id={self.correct_option_id!r}, explanation={self.explanation!r}, explanation_entities={self.explanation_entities!r}, open_period={self.open_period!r}, close_date={self.close_date!r})".format(self=self)
# end def __repr__
def __contains__(self, key):
"""
Implements `"key" in poll_instance`
"""
return (
key in ["id", "question", "options", "total_voter_count", "is_closed", "is_anonymous", "type", "allows_multiple_answers", "correct_option_id", "explanation", "explanation_entities", "open_period", "close_date"]
and hasattr(self, key)
and bool(getattr(self, key, None))
)
# end def __contains__
# end class Poll
class Location(Media):
"""
This object represents a point on the map.
https://core.telegram.org/bots/api#location
Parameters:
:param longitude: Longitude as defined by sender
:type longitude: float
:param latitude: Latitude as defined by sender
:type latitude: float
Optional keyword parameters:
:param _raw: Optional. Original data this object was generated from. Could be `None`.
:type _raw: None | dict
"""
def __init__(self, longitude, latitude, _raw=None):
"""
This object represents a point on the map.
https://core.telegram.org/bots/api#location
Parameters:
:param longitude: Longitude as defined by sender
:type longitude: float
:param latitude: Latitude as defined by sender
:type latitude: float
Optional keyword parameters:
:param _raw: Optional. Original data this object was generated from. Could be `None`.
:type _raw: None | dict
"""
super(Location, self).__init__()
assert_type_or_raise(longitude, float, parameter_name="longitude")
self.longitude = longitude
assert_type_or_raise(latitude, float, parameter_name="latitude")
self.latitude = latitude
self._raw = _raw
# end def __init__
def to_array(self):
"""
Serializes this Location to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
array = super(Location, self).to_array()
array['longitude'] = float(self.longitude) # type float
array['latitude'] = float(self.latitude) # type float
return array
# end def to_array
@staticmethod
def validate_array(array):
"""
Builds a new array with valid values for the Location constructor.
:return: new array with valid values
:rtype: dict
"""
assert_type_or_raise(array, dict, parameter_name="array")
data = Media.validate_array(array)
data['longitude'] = float(array.get('longitude'))
data['latitude'] = float(array.get('latitude'))
return data
# end def validate_array
@staticmethod
def from_array(array):
"""
Deserialize a new Location from a given dictionary.
:return: new Location instance.
:rtype: Location
"""
if not array: # None or {}
return None
# end if
data = Location.validate_array(array)
data['_raw'] = array
return Location(**data)
# end def from_array
def __str__(self):
"""
Implements `str(location_instance)`
"""
return "Location(longitude={self.longitude!r}, latitude={self.latitude!r})".format(self=self)
# end def __str__
def __repr__(self):
"""
Implements `repr(location_instance)`
"""
if self._raw:
return "Location.from_array({self._raw})".format(self=self)
# end if
return "Location(longitude={self.longitude!r}, latitude={self.latitude!r})".format(self=self)
# end def __repr__
def __contains__(self, key):
"""
Implements `"key" in location_instance`
"""
return (
key in ["longitude", "latitude"]
and hasattr(self, key)
and bool(getattr(self, key, None))
)
# end def __contains__
# end class Location
class Venue(Media):
"""
This object represents a venue.
https://core.telegram.org/bots/api#venue
Parameters:
:param location: Venue location
:type location: pytgbot.api_types.receivable.media.Location
:param title: Name of the venue
:type title: str|unicode
:param address: Address of the venue
:type address: str|unicode
Optional keyword parameters:
:param foursquare_id: Optional. Foursquare identifier of the venue
:type foursquare_id: str|unicode
:param foursquare_type: Optional. Foursquare type of the venue. (For example, "arts_entertainment/default", "arts_entertainment/aquarium" or "food/icecream".)
:type foursquare_type: str|unicode
:param _raw: Optional. Original data this object was generated from. Could be `None`.
:type _raw: None | dict
"""
def __init__(self, location, title, address, foursquare_id=None, foursquare_type=None, _raw=None):
"""
This object represents a venue.
https://core.telegram.org/bots/api#venue
Parameters:
:param location: Venue location
:type location: pytgbot.api_types.receivable.media.Location
:param title: Name of the venue
:type title: str|unicode
:param address: Address of the venue
:type address: str|unicode
Optional keyword parameters:
:param foursquare_id: Optional. Foursquare identifier of the venue
:type foursquare_id: str|unicode
:param foursquare_type: Optional. Foursquare type of the venue. (For example, "arts_entertainment/default", "arts_entertainment/aquarium" or "food/icecream".)
:type foursquare_type: str|unicode
:param _raw: Optional. Original data this object was generated from. Could be `None`.
:type _raw: None | dict
"""
super(Venue, self).__init__()
assert_type_or_raise(location, Location, parameter_name="location")
self.location = location
assert_type_or_raise(title, unicode_type, parameter_name="title")
self.title = title
assert_type_or_raise(address, unicode_type, parameter_name="address")
self.address = address
assert_type_or_raise(foursquare_id, None, unicode_type, parameter_name="foursquare_id")
self.foursquare_id = foursquare_id
assert_type_or_raise(foursquare_type, None, unicode_type, parameter_name="foursquare_type")
self.foursquare_type = foursquare_type
self._raw = _raw
# end def __init__
def to_array(self):
"""
Serializes this Venue to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
array = super(Venue, self).to_array()
array['location'] = self.location.to_array() # type Location
array['title'] = u(self.title) # py2: type unicode, py3: type str
array['address'] = u(self.address) # py2: type unicode, py3: type str
if self.foursquare_id is not None:
array['foursquare_id'] = u(self.foursquare_id) # py2: type unicode, py3: type str
if self.foursquare_type is not None:
array['foursquare_type'] = u(self.foursquare_type) # py2: type unicode, py3: type str
return array
# end def to_array
@staticmethod
def validate_array(array):
"""
Builds a new array with valid values for the Venue constructor.
:return: new array with valid values
:rtype: dict
"""
assert_type_or_raise(array, dict, parameter_name="array")
data = Media.validate_array(array)
data['location'] = Location.from_array(array.get('location'))
data['title'] = u(array.get('title'))
data['address'] = u(array.get('address'))
data['foursquare_id'] = u(array.get('foursquare_id')) if array.get('foursquare_id') is not None else None
data['foursquare_type'] = u(array.get('foursquare_type')) if array.get('foursquare_type') is not None else None
return data
# end def validate_array
@staticmethod
def from_array(array):
"""
Deserialize a new Venue from a given dictionary.
:return: new Venue instance.
:rtype: Venue
"""
if not array: # None or {}
return None
# end if
data = Venue.validate_array(array)
data['_raw'] = array
return Venue(**data)
# end def from_array
def __str__(self):
"""
Implements `str(venue_instance)`
"""
return "Venue(location={self.location!r}, title={self.title!r}, address={self.address!r}, foursquare_id={self.foursquare_id!r}, foursquare_type={self.foursquare_type!r})".format(self=self)
# end def __str__
def __repr__(self):
"""
Implements `repr(venue_instance)`
"""
if self._raw:
return "Venue.from_array({self._raw})".format(self=self)
# end if
return "Venue(location={self.location!r}, title={self.title!r}, address={self.address!r}, foursquare_id={self.foursquare_id!r}, foursquare_type={self.foursquare_type!r})".format(self=self)
# end def __repr__
def __contains__(self, key):
"""
Implements `"key" in venue_instance`
"""
return (
key in ["location", "title", "address", "foursquare_id", "foursquare_type"]
and hasattr(self, key)
and bool(getattr(self, key, None))
)
# end def __contains__
# end class Venue
class UserProfilePhotos(Result):
"""
This object represent a user's profile pictures.
https://core.telegram.org/bots/api#userprofilephotos
Parameters:
:param total_count: Total number of profile pictures the target user has
:type total_count: int
:param photos: Requested profile pictures (in up to 4 sizes each)
:type photos: list of list of pytgbot.api_types.receivable.media.PhotoSize
Optional keyword parameters:
:param _raw: Optional. Original data this object was generated from. Could be `None`.
:type _raw: None | dict
"""
def __init__(self, total_count, photos, _raw=None):
"""
This object represent a user's profile pictures.
https://core.telegram.org/bots/api#userprofilephotos
Parameters:
:param total_count: Total number of profile pictures the target user has
:type total_count: int
:param photos: Requested profile pictures (in up to 4 sizes each)
:type photos: list of list of pytgbot.api_types.receivable.media.PhotoSize
Optional keyword parameters:
:param _raw: Optional. Original data this object was generated from. Could be `None`.
:type _raw: None | dict
"""
super(UserProfilePhotos, self).__init__()
assert_type_or_raise(total_count, int, parameter_name="total_count")
self.total_count = total_count
assert_type_or_raise(photos, list, parameter_name="photos")
self.photos = photos
self._raw = _raw
# end def __init__
def to_array(self):
"""
Serializes this UserProfilePhotos to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
array = super(UserProfilePhotos, self).to_array()
array['total_count'] = int(self.total_count) # type int
array['photos'] = self._as_array(self.photos) # type list of list of PhotoSize
return array
# end def to_array
@staticmethod
def validate_array(array):
"""
Builds a new array with valid values for the UserProfilePhotos constructor.
:return: new array with valid values
:rtype: dict
"""
assert_type_or_raise(array, dict, parameter_name="array")
data = Result.validate_array(array)
data['total_count'] = int(array.get('total_count'))
data['photos'] = PhotoSize.from_array_list(array.get('photos'), list_level=2)
return data
# end def validate_array
@staticmethod
def from_array(array):
"""
Deserialize a new UserProfilePhotos from a given dictionary.
:return: new UserProfilePhotos instance.
:rtype: UserProfilePhotos
"""
if not array: # None or {}
return None
# end if
data = UserProfilePhotos.validate_array(array)
data['_raw'] = array
return UserProfilePhotos(**data)
# end def from_array
def __str__(self):
"""
Implements `str(userprofilephotos_instance)`
"""
return "UserProfilePhotos(total_count={self.total_count!r}, photos={self.photos!r})".format(self=self)
# end def __str__
def __repr__(self):
"""
Implements `repr(userprofilephotos_instance)`
"""
if self._raw:
return "UserProfilePhotos.from_array({self._raw})".format(self=self)
# end if
return "UserProfilePhotos(total_count={self.total_count!r}, photos={self.photos!r})".format(self=self)
# end def __repr__
def __contains__(self, key):
"""
Implements `"key" in userprofilephotos_instance`
"""
return (
key in ["total_count", "photos"]
and hasattr(self, key)
and bool(getattr(self, key, None))
)
# end def __contains__
# end class UserProfilePhotos
class File(Receivable):
"""
This object represents a file ready to be downloaded.
The file can be downloaded via the link https://api.telegram.org/file/bot<token>/<file_path>.
It is guaranteed that the link will be valid for at least 1 hour.
When the link expires, a new one can be requested by calling getFile.
Maximum file size to download is 20 MB
https://core.telegram.org/bots/api#file
Parameters:
:param file_id: Identifier for this file, which can be used to download or reuse the file
:type file_id: str|unicode
:param file_unique_id: Unique identifier for this file, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file.
:type file_unique_id: str|unicode
Optional keyword parameters:
:param file_size: Optional. File size, if known
:type file_size: int
:param file_path: Optional. File path. Use https://api.telegram.org/file/bot<token>/<file_path> to get the file.
:type file_path: str|unicode
:param _raw: Optional. Original data this object was generated from. Could be `None`.
:type _raw: None | dict
"""
def __init__(self, file_id, file_unique_id, file_size=None, file_path=None, _raw=None):
"""
This object represents a file ready to be downloaded.
The file can be downloaded via the link https://api.telegram.org/file/bot<token>/<file_path>.
It is guaranteed that the link will be valid for at least 1 hour.
When the link expires, a new one can be requested by calling getFile.
Maximum file size to download is 20 MB
https://core.telegram.org/bots/api#file
Parameters:
:param file_id: Identifier for this file, which can be used to download or reuse the file
:type file_id: str|unicode
:param file_unique_id: Unique identifier for this file, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file.
:type file_unique_id: str|unicode
Optional keyword parameters:
:param file_size: Optional. File size, if known
:type file_size: int
:param file_path: Optional. File path. Use https://api.telegram.org/file/bot<token>/<file_path> to get the file.
:type file_path: str|unicode
:param _raw: Optional. Original data this object was generated from. Could be `None`.
:type _raw: None | dict
"""
super(File, self).__init__()
assert_type_or_raise(file_id, unicode_type, parameter_name="file_id")
self.file_id = file_id
assert_type_or_raise(file_unique_id, unicode_type, parameter_name="file_unique_id")
self.file_unique_id = file_unique_id
assert_type_or_raise(file_size, None, int, parameter_name="file_size")
self.file_size = file_size
assert_type_or_raise(file_path, None, unicode_type, parameter_name="file_path")
self.file_path = file_path
self._raw = _raw
# end def __init__
def get_download_url(self, token):
"""
Creates a url to download the file.
Note: Contains the secret API key, so you should not share this url!
:param token: API key
:type token: str
:return: url
:rtype: str
"""
return "https://api.telegram.org/file/bot{token}/{file_path}".format(token=token, file_path=self.file_path)
# end def get_download_url
def to_array(self):
"""
Serializes this File to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
array = super(File, self).to_array()
array['file_id'] = u(self.file_id) # py2: type unicode, py3: type str
array['file_unique_id'] = u(self.file_unique_id) # py2: type unicode, py3: type str
if self.file_size is not None:
array['file_size'] = int(self.file_size) # type int
if self.file_path is not None:
array['file_path'] = u(self.file_path) # py2: type unicode, py3: type str
return array
# end def to_array
@staticmethod
def validate_array(array):
"""
Builds a new array with valid values for the File constructor.
:return: new array with valid values
:rtype: dict
"""
assert_type_or_raise(array, dict, parameter_name="array")
data = Receivable.validate_array(array)
data['file_id'] = u(array.get('file_id'))
data['file_unique_id'] = u(array.get('file_unique_id'))
data['file_size'] = int(array.get('file_size')) if array.get('file_size') is not None else None
data['file_path'] = u(array.get('file_path')) if array.get('file_path') is not None else None
return data
# end def validate_array
@staticmethod
def from_array(array):
"""
Deserialize a new File from a given dictionary.
:return: new File instance.
:rtype: File
"""
if not array: # None or {}
return None
# end if
data = File.validate_array(array)
data['_raw'] = array
return File(**data)
# end def from_array
def __str__(self):
"""
Implements `str(file_instance)`
"""
return "File(file_id={self.file_id!r}, file_unique_id={self.file_unique_id!r}, file_size={self.file_size!r}, file_path={self.file_path!r})".format(self=self)
# end def __str__
def __repr__(self):
"""
Implements `repr(file_instance)`
"""
if self._raw:
return "File.from_array({self._raw})".format(self=self)
# end if
return "File(file_id={self.file_id!r}, file_unique_id={self.file_unique_id!r}, file_size={self.file_size!r}, file_path={self.file_path!r})".format(self=self)
# end def __repr__
def __contains__(self, key):
"""
Implements `"key" in file_instance`
"""
return (
key in ["file_id", "file_unique_id", "file_size", "file_path"]
and hasattr(self, key)
and bool(getattr(self, key, None))
)
# end def __contains__
# end class File
class ChatPhoto(Result):
"""
This object represents a chat photo.
https://core.telegram.org/bots/api#chatphoto
Parameters:
:param small_file_id: File identifier of small (160x160) chat photo. This file_id can be used only for photo download and only for as long as the photo is not changed.
:type small_file_id: str|unicode
:param small_file_unique_id: Unique file identifier of small (160x160) chat photo, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file.
:type small_file_unique_id: str|unicode
:param big_file_id: File identifier of big (640x640) chat photo. This file_id can be used only for photo download and only for as long as the photo is not changed.
:type big_file_id: str|unicode
:param big_file_unique_id: Unique file identifier of big (640x640) chat photo, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file.
:type big_file_unique_id: str|unicode
Optional keyword parameters:
:param _raw: Optional. Original data this object was generated from. Could be `None`.
:type _raw: None | dict
"""
def __init__(self, small_file_id, small_file_unique_id, big_file_id, big_file_unique_id, _raw=None):
"""
This object represents a chat photo.
https://core.telegram.org/bots/api#chatphoto
Parameters:
:param small_file_id: File identifier of small (160x160) chat photo. This file_id can be used only for photo download and only for as long as the photo is not changed.
:type small_file_id: str|unicode
:param small_file_unique_id: Unique file identifier of small (160x160) chat photo, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file.
:type small_file_unique_id: str|unicode
:param big_file_id: File identifier of big (640x640) chat photo. This file_id can be used only for photo download and only for as long as the photo is not changed.
:type big_file_id: str|unicode
:param big_file_unique_id: Unique file identifier of big (640x640) chat photo, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file.
:type big_file_unique_id: str|unicode
Optional keyword parameters:
:param _raw: Optional. Original data this object was generated from. Could be `None`.
:type _raw: None | dict
"""
super(ChatPhoto, self).__init__()
assert_type_or_raise(small_file_id, unicode_type, parameter_name="small_file_id")
self.small_file_id = small_file_id
assert_type_or_raise(small_file_unique_id, unicode_type, parameter_name="small_file_unique_id")
self.small_file_unique_id = small_file_unique_id
assert_type_or_raise(big_file_id, unicode_type, parameter_name="big_file_id")
self.big_file_id = big_file_id
assert_type_or_raise(big_file_unique_id, unicode_type, parameter_name="big_file_unique_id")
self.big_file_unique_id = big_file_unique_id
self._raw = _raw
# end def __init__
def to_array(self):
"""
Serializes this ChatPhoto to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
array = super(ChatPhoto, self).to_array()
array['small_file_id'] = u(self.small_file_id) # py2: type unicode, py3: type str
array['small_file_unique_id'] = u(self.small_file_unique_id) # py2: type unicode, py3: type str
array['big_file_id'] = u(self.big_file_id) # py2: type unicode, py3: type str
array['big_file_unique_id'] = u(self.big_file_unique_id) # py2: type unicode, py3: type str
return array
# end def to_array
@staticmethod
def validate_array(array):
"""
Builds a new array with valid values for the ChatPhoto constructor.
:return: new array with valid values
:rtype: dict
"""
assert_type_or_raise(array, dict, parameter_name="array")
data = Result.validate_array(array)
data['small_file_id'] = u(array.get('small_file_id'))
data['small_file_unique_id'] = u(array.get('small_file_unique_id'))
data['big_file_id'] = u(array.get('big_file_id'))
data['big_file_unique_id'] = u(array.get('big_file_unique_id'))
return data
# end def validate_array
@staticmethod
def from_array(array):
"""
Deserialize a new ChatPhoto from a given dictionary.
:return: new ChatPhoto instance.
:rtype: ChatPhoto
"""
if not array: # None or {}
return None
# end if
data = ChatPhoto.validate_array(array)
data['_raw'] = array
return ChatPhoto(**data)
# end def from_array
def __str__(self):
"""
Implements `str(chatphoto_instance)`
"""
return "ChatPhoto(small_file_id={self.small_file_id!r}, small_file_unique_id={self.small_file_unique_id!r}, big_file_id={self.big_file_id!r}, big_file_unique_id={self.big_file_unique_id!r})".format(self=self)
# end def __str__
def __repr__(self):
"""
Implements `repr(chatphoto_instance)`
"""
if self._raw:
return "ChatPhoto.from_array({self._raw})".format(self=self)
# end if
return "ChatPhoto(small_file_id={self.small_file_id!r}, small_file_unique_id={self.small_file_unique_id!r}, big_file_id={self.big_file_id!r}, big_file_unique_id={self.big_file_unique_id!r})".format(self=self)
# end def __repr__
def __contains__(self, key):
"""
Implements `"key" in chatphoto_instance`
"""
return (
key in ["small_file_id", "small_file_unique_id", "big_file_id", "big_file_unique_id"]
and hasattr(self, key)
and bool(getattr(self, key, None))
)
# end def __contains__
# end class ChatPhoto
class Sticker(Media):
"""
This object represents a sticker.
https://core.telegram.org/bots/api#sticker
Parameters:
:param file_id: Identifier for this file, which can be used to download or reuse the file
:type file_id: str|unicode
:param file_unique_id: Unique identifier for this file, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file.
:type file_unique_id: str|unicode
:param width: Sticker width
:type width: int
:param height: Sticker height
:type height: int
:param is_animated: True, if the sticker is animated
:type is_animated: bool
Optional keyword parameters:
:param thumb: Optional. Sticker thumbnail in the .WEBP or .JPG format
:type thumb: pytgbot.api_types.receivable.media.PhotoSize
:param emoji: Optional. Emoji associated with the sticker
:type emoji: str|unicode
:param set_name: Optional. Name of the sticker set to which the sticker belongs
:type set_name: str|unicode
:param mask_position: Optional. For mask stickers, the position where the mask should be placed
:type mask_position: pytgbot.api_types.receivable.stickers.MaskPosition
:param file_size: Optional. File size
:type file_size: int
:param _raw: Optional. Original data this object was generated from. Could be `None`.
:type _raw: None | dict
"""
def __init__(self, file_id, file_unique_id, width, height, is_animated, thumb=None, emoji=None, set_name=None, mask_position=None, file_size=None, _raw=None):
"""
This object represents a sticker.
https://core.telegram.org/bots/api#sticker
Parameters:
:param file_id: Identifier for this file, which can be used to download or reuse the file
:type file_id: str|unicode
:param file_unique_id: Unique identifier for this file, which is supposed to be the same over time and for different bots. Can't be used to download or reuse the file.
:type file_unique_id: str|unicode
:param width: Sticker width
:type width: int
:param height: Sticker height
:type height: int
:param is_animated: True, if the sticker is animated
:type is_animated: bool
Optional keyword parameters:
:param thumb: Optional. Sticker thumbnail in the .WEBP or .JPG format
:type thumb: pytgbot.api_types.receivable.media.PhotoSize
:param emoji: Optional. Emoji associated with the sticker
:type emoji: str|unicode
:param set_name: Optional. Name of the sticker set to which the sticker belongs
:type set_name: str|unicode
:param mask_position: Optional. For mask stickers, the position where the mask should be placed
:type mask_position: pytgbot.api_types.receivable.stickers.MaskPosition
:param file_size: Optional. File size
:type file_size: int
:param _raw: Optional. Original data this object was generated from. Could be `None`.
:type _raw: None | dict
"""
super(Sticker, self).__init__()
from .stickers import MaskPosition
assert_type_or_raise(file_id, unicode_type, parameter_name="file_id")
self.file_id = file_id
assert_type_or_raise(file_unique_id, unicode_type, parameter_name="file_unique_id")
self.file_unique_id = file_unique_id
assert_type_or_raise(width, int, parameter_name="width")
self.width = width
assert_type_or_raise(height, int, parameter_name="height")
self.height = height
assert_type_or_raise(is_animated, bool, parameter_name="is_animated")
self.is_animated = is_animated
assert_type_or_raise(thumb, None, PhotoSize, parameter_name="thumb")
self.thumb = thumb
assert_type_or_raise(emoji, None, unicode_type, parameter_name="emoji")
self.emoji = emoji
assert_type_or_raise(set_name, None, unicode_type, parameter_name="set_name")
self.set_name = set_name
assert_type_or_raise(mask_position, None, MaskPosition, parameter_name="mask_position")
self.mask_position = mask_position
assert_type_or_raise(file_size, None, int, parameter_name="file_size")
self.file_size = file_size
self._raw = _raw
# end def __init__
def to_array(self):
"""
Serializes this Sticker to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
array = super(Sticker, self).to_array()
array['file_id'] = u(self.file_id) # py2: type unicode, py3: type str
array['file_unique_id'] = u(self.file_unique_id) # py2: type unicode, py3: type str
array['width'] = int(self.width) # type int
array['height'] = int(self.height) # type int
array['is_animated'] = bool(self.is_animated) # type bool
if self.thumb is not None:
array['thumb'] = self.thumb.to_array() # type PhotoSize
if self.emoji is not None:
array['emoji'] = u(self.emoji) # py2: type unicode, py3: type str
if self.set_name is not None:
array['set_name'] = u(self.set_name) # py2: type unicode, py3: type str
if self.mask_position is not None:
array['mask_position'] = self.mask_position.to_array() # type MaskPosition
if self.file_size is not None:
array['file_size'] = int(self.file_size) # type int
return array
# end def to_array
@staticmethod
def validate_array(array):
"""
Builds a new array with valid values for the Sticker constructor.
:return: new array with valid values
:rtype: dict
"""
assert_type_or_raise(array, dict, parameter_name="array")
from .stickers import MaskPosition
data = Media.validate_array(array)
data['file_id'] = u(array.get('file_id'))
data['file_unique_id'] = u(array.get('file_unique_id'))
data['width'] = int(array.get('width'))
data['height'] = int(array.get('height'))
data['is_animated'] = bool(array.get('is_animated'))
data['thumb'] = PhotoSize.from_array(array.get('thumb')) if array.get('thumb') is not None else None
data['emoji'] = u(array.get('emoji')) if array.get('emoji') is not None else None
data['set_name'] = u(array.get('set_name')) if array.get('set_name') is not None else None
data['mask_position'] = MaskPosition.from_array(array.get('mask_position')) if array.get('mask_position') is not None else None
data['file_size'] = int(array.get('file_size')) if array.get('file_size') is not None else None
return data
# end def validate_array
@staticmethod
def from_array(array):
"""
Deserialize a new Sticker from a given dictionary.
:return: new Sticker instance.
:rtype: Sticker
"""
if not array: # None or {}
return None
# end if
data = Sticker.validate_array(array)
data['_raw'] = array
return Sticker(**data)
# end def from_array
def __str__(self):
"""
Implements `str(sticker_instance)`
"""
return "Sticker(file_id={self.file_id!r}, file_unique_id={self.file_unique_id!r}, width={self.width!r}, height={self.height!r}, is_animated={self.is_animated!r}, thumb={self.thumb!r}, emoji={self.emoji!r}, set_name={self.set_name!r}, mask_position={self.mask_position!r}, file_size={self.file_size!r})".format(self=self)
# end def __str__
def __repr__(self):
"""
Implements `repr(sticker_instance)`
"""
if self._raw:
return "Sticker.from_array({self._raw})".format(self=self)
# end if
return "Sticker(file_id={self.file_id!r}, file_unique_id={self.file_unique_id!r}, width={self.width!r}, height={self.height!r}, is_animated={self.is_animated!r}, thumb={self.thumb!r}, emoji={self.emoji!r}, set_name={self.set_name!r}, mask_position={self.mask_position!r}, file_size={self.file_size!r})".format(self=self)
# end def __repr__
def __contains__(self, key):
"""
Implements `"key" in sticker_instance`
"""
return (
key in ["file_id", "file_unique_id", "width", "height", "is_animated", "thumb", "emoji", "set_name", "mask_position", "file_size"]
and hasattr(self, key)
and bool(getattr(self, key, None))
)
# end def __contains__
# end class Sticker
class Game(Media):
"""
This object represents a game. Use BotFather to create and edit games, their short names will act as unique identifiers.
https://core.telegram.org/bots/api#game
Parameters:
:param title: Title of the game
:type title: str|unicode
:param description: Description of the game
:type description: str|unicode
:param photo: Photo that will be displayed in the game message in chats.
:type photo: list of pytgbot.api_types.receivable.media.PhotoSize
Optional keyword parameters:
:param text: Optional. Brief description of the game or high scores included in the game message. Can be automatically edited to include current high scores for the game when the bot calls setGameScore, or manually edited using editMessageText. 0-4096 characters.
:type text: str|unicode
:param text_entities: Optional. Special entities that appear in text, such as usernames, URLs, bot commands, etc.
:type text_entities: list of pytgbot.api_types.receivable.media.MessageEntity
:param animation: Optional. Animation that will be displayed in the game message in chats. Upload via BotFather
:type animation: pytgbot.api_types.receivable.media.Animation
:param _raw: Optional. Original data this object was generated from. Could be `None`.
:type _raw: None | dict
"""
def __init__(self, title, description, photo, text=None, text_entities=None, animation=None, _raw=None):
"""
This object represents a game. Use BotFather to create and edit games, their short names will act as unique identifiers.
https://core.telegram.org/bots/api#game
Parameters:
:param title: Title of the game
:type title: str|unicode
:param description: Description of the game
:type description: str|unicode
:param photo: Photo that will be displayed in the game message in chats.
:type photo: list of pytgbot.api_types.receivable.media.PhotoSize
Optional keyword parameters:
:param text: Optional. Brief description of the game or high scores included in the game message. Can be automatically edited to include current high scores for the game when the bot calls setGameScore, or manually edited using editMessageText. 0-4096 characters.
:type text: str|unicode
:param text_entities: Optional. Special entities that appear in text, such as usernames, URLs, bot commands, etc.
:type text_entities: list of pytgbot.api_types.receivable.media.MessageEntity
:param animation: Optional. Animation that will be displayed in the game message in chats. Upload via BotFather
:type animation: pytgbot.api_types.receivable.media.Animation
:param _raw: Optional. Original data this object was generated from. Could be `None`.
:type _raw: None | dict
"""
super(Game, self).__init__()
assert_type_or_raise(title, unicode_type, parameter_name="title")
self.title = title
assert_type_or_raise(description, unicode_type, parameter_name="description")
self.description = description
assert_type_or_raise(photo, list, parameter_name="photo")
self.photo = photo
assert_type_or_raise(text, None, unicode_type, parameter_name="text")
self.text = text
assert_type_or_raise(text_entities, None, list, parameter_name="text_entities")
self.text_entities = text_entities
assert_type_or_raise(animation, None, Animation, parameter_name="animation")
self.animation = animation
self._raw = _raw
# end def __init__
def to_array(self):
"""
Serializes this Game to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
array = super(Game, self).to_array()
array['title'] = u(self.title) # py2: type unicode, py3: type str
array['description'] = u(self.description) # py2: type unicode, py3: type str
array['photo'] = self._as_array(self.photo) # type list of PhotoSize
if self.text is not None:
array['text'] = u(self.text) # py2: type unicode, py3: type str
if self.text_entities is not None:
array['text_entities'] = self._as_array(self.text_entities) # type list of MessageEntity
if self.animation is not None:
array['animation'] = self.animation.to_array() # type Animation
return array
# end def to_array
@staticmethod
def validate_array(array):
"""
Builds a new array with valid values for the Game constructor.
:return: new array with valid values
:rtype: dict
"""
assert_type_or_raise(array, dict, parameter_name="array")
data = Media.validate_array(array)
data['title'] = u(array.get('title'))
data['description'] = u(array.get('description'))
data['photo'] = PhotoSize.from_array_list(array.get('photo'), list_level=1)
data['text'] = u(array.get('text')) if array.get('text') is not None else None
data['text_entities'] = MessageEntity.from_array_list(array.get('text_entities'), list_level=1) if array.get('text_entities') is not None else None
data['animation'] = Animation.from_array(array.get('animation')) if array.get('animation') is not None else None
return data
# end def validate_array
@staticmethod
def from_array(array):
"""
Deserialize a new Game from a given dictionary.
:return: new Game instance.
:rtype: Game
"""
if not array: # None or {}
return None
# end if
data = Game.validate_array(array)
data['_raw'] = array
return Game(**data)
# end def from_array
def __str__(self):
"""
Implements `str(game_instance)`
"""
return "Game(title={self.title!r}, description={self.description!r}, photo={self.photo!r}, text={self.text!r}, text_entities={self.text_entities!r}, animation={self.animation!r})".format(self=self)
# end def __str__
def __repr__(self):
"""
Implements `repr(game_instance)`
"""
if self._raw:
return "Game.from_array({self._raw})".format(self=self)
# end if
return "Game(title={self.title!r}, description={self.description!r}, photo={self.photo!r}, text={self.text!r}, text_entities={self.text_entities!r}, animation={self.animation!r})".format(self=self)
# end def __repr__
def __contains__(self, key):
"""
Implements `"key" in game_instance`
"""
return (
key in ["title", "description", "photo", "text", "text_entities", "animation"]
and hasattr(self, key)
and bool(getattr(self, key, None))
)
# end def __contains__
# end class Game
| gpl-3.0 | -5,835,422,778,018,785,000 | 34.429392 | 497 | 0.617664 | false | 3.765129 | false | false | false |
ndawe/rootpy | rootpy/ROOT.py | 2 | 4217 | """
:py:mod:`rootpy.ROOT`
=====================
This module is intended to be a drop-in replacement for ordinary
PyROOT imports by mimicking PyROOT's interface. If you find a case where it is
not, please report an issue to the rootpy developers.
Both ROOT and rootpy classes can be accessed in a harmonized way through this
module. This means you can take advantage of rootpy classes automatically by
replacing ``import ROOT`` with ``import rootpy.ROOT as ROOT`` or
``from rootpy import ROOT`` in your code, while maintaining backward
compatibility with existing use of ROOT's classes.
ROOT classes are automatically "asrootpy'd" *after* the constructor in ROOT has
been called:
.. sourcecode:: python
>>> import rootpy.ROOT as ROOT
>>> h = ROOT.TH1F('name', 'title', 10, 0, 1)
>>> h
Hist('name')
>>> h.TYPE
'F'
Also access rootpy classes under this same module without needing to remember
where to import them from in rootpy:
.. sourcecode:: python
>>> import rootpy.ROOT as ROOT
>>> h = ROOT.Hist(10, 0, 1, name='name', type='F')
>>> h
Hist('name')
>>> h.TYPE
'F'
Plain old ROOT can still be accessed through the ``R`` property:
.. sourcecode:: python
>>> from rootpy import ROOT
>>> ROOT.R.TFile
<class 'ROOT.TFile'>
"""
from __future__ import absolute_import
from copy import copy
import ROOT
from . import asrootpy, lookup_rootpy, ROOT_VERSION
from . import QROOT, stl
from .utils.module_facade import Facade
__all__ = []
def proxy_global(name, no_expand_macro=False, fname='func', args=()):
"""
Used to automatically asrootpy ROOT's thread local variables
"""
if no_expand_macro: # pragma: no cover
# handle older ROOT versions without _ExpandMacroFunction wrapping
@property
def gSomething_no_func(self):
glob = self(getattr(ROOT, name))
# create a fake func() that just returns self
def func():
return glob
glob.func = func
return glob
return gSomething_no_func
@property
def gSomething(self):
obj_func = getattr(getattr(ROOT, name), fname)
try:
obj = obj_func(*args)
except ReferenceError: # null pointer
return None
# asrootpy
return self(obj)
return gSomething
@Facade(__name__, expose_internal=False)
class Module(object):
__version__ = ROOT_VERSION
def __call__(self, arg, after_init=False):
return asrootpy(arg, warn=False, after_init=after_init)
def __getattr__(self, what):
try:
# check ROOT
result = self(getattr(ROOT, what), after_init=True)
except AttributeError:
# check rootpy
result = lookup_rootpy(what)
if result is None:
raise AttributeError(
'ROOT does not have the attribute `{0}` '
'and rootpy does not contain the class `{0}`'.format(what))
return result
try:
# Memoize
setattr(self, what, result)
except AttributeError:
# Oops... Oh well. I tried.
pass
return result
@property
def R(self):
return ROOT
gPad = proxy_global("gPad",
fname='GetPad' if ROOT_VERSION >= (6, 9, 2) else 'func',
args=(0,) if ROOT_VERSION >= (6, 9, 2) else ())
gVirtualX = proxy_global("gVirtualX")
if ROOT_VERSION < (5, 32, 0): # pragma: no cover
gDirectory = proxy_global("gDirectory", no_expand_macro=True)
gFile = proxy_global("gFile", no_expand_macro=True)
gInterpreter = proxy_global("gInterpreter", no_expand_macro=True)
else:
gDirectory = proxy_global("gDirectory",
fname='CurrentDirectory' if ROOT_VERSION >= (6, 9, 2) else 'func')
gFile = proxy_global("gFile",
fname='CurrentFile' if ROOT_VERSION >= (6, 9, 2) else 'func')
gInterpreter = proxy_global("gInterpreter",
no_expand_macro=ROOT_VERSION >= (6, 9, 2))
# use the smart template STL types from rootpy.stl instead
for t in QROOT.std.stlclasses:
locals()[t] = getattr(stl, t)
del t
| bsd-3-clause | -4,648,580,643,503,857,000 | 28.284722 | 79 | 0.609912 | false | 3.81629 | false | false | false |
jrutila/django-reportengine | reportengine/jsonfield.py | 1 | 5953 | """
Django JSON Field. This extends Django Model Fields to store JSON as a field-type.
"""
#TODO - Move this to utils or another application. This is tangential to reporting and useful for other things.
from django.db import models
try:
import json as simplejson
except ImportError:
from django.utils import simplejson
from django.core.serializers.json import DjangoJSONEncoder
import logging
class JSONFieldDescriptor(object):
def __init__(self, field, datatype=dict):
"""
Create a JSONFieldDescriptor
:param field: The field to create the descriptor for.
:param datatype: The datatype of the descriptor.
"""
self.field = field
self.datatype = datatype
def __get__(self, instance=None, owner=None):
if instance is None:
raise AttributeError(
"The '%s' attribute can only be accessed from %s instances."
% (self.field.name, owner.__name__))
if not hasattr(instance, self.field.get_cache_name()):
data = instance.__dict__.get(self.field.attname, self.datatype())
if not isinstance(data, self.datatype):
data = self.field.loads(data)
if data is None:
data = self.datatype()
setattr(instance, self.field.get_cache_name(), data)
return getattr(instance, self.field.get_cache_name())
def __set__(self, instance, value):
if not isinstance(value, (self.datatype, basestring)):
value = self.datatype(value)
instance.__dict__[self.field.attname] = value
try:
delattr(instance, self.field.get_cache_name())
except AttributeError:
pass
class JSONField(models.TextField):
"""
A field for storing JSON-encoded data. The data is accessible as standard
Python data types and is transparently encoded/decoded to/from a JSON
string in the database.
"""
serialize_to_string = True
descriptor_class = JSONFieldDescriptor
def __init__(self, verbose_name=None, name=None,
encoder=DjangoJSONEncoder(), decoder=simplejson.JSONDecoder(),
datatype=dict,
**kwargs):
"""
Create a new JSONField
:param verbose_name: The verbose name of the field
:param name: The short name of the field.
:param encoder: The encoder used to turn native datatypes into JSON.
:param decoder: The decoder used to turn JSON into native datatypes.
:param datatype: The native datatype to store.
:param kwargs: Other arguments to pass to parent constructor.
"""
blank = kwargs.pop('blank', True)
models.TextField.__init__(self, verbose_name, name, blank=blank,
**kwargs)
self.encoder = encoder
self.decoder = decoder
self.datatype = datatype
#TODO - Is this used anywhere? If not, let's remove it.
def db_type(self, connection=None):
"""
Returns the database type. Overrides django.db.models.Field's db_type.
:param connection: The database connection - defaults to none.
:return: The database type. Always returns the string 'text'.
"""
return "text"
def contribute_to_class(self, cls, name):
"""
Overrides django.db.models.Field's contribute to class to handle descriptors.
:param cls: The class to contribute to.
:param name: The name.
"""
super(JSONField, self).contribute_to_class(cls, name)
setattr(cls, self.name, self.descriptor_class(self, self.datatype))
def pre_save(self, model_instance, add):
"Returns field's value just before saving. If a descriptor, get's that instead of value from object."
descriptor = getattr(model_instance, self.attname)
if isinstance(descriptor, self.datatype):
return descriptor
return self.field.value_from_object(model_instance)
def get_db_prep_save(self, value, *args, **kwargs):
if not isinstance(value, basestring):
value = self.dumps(value)
return super(JSONField, self).get_db_prep_save(value, *args, **kwargs)
def value_to_string(self, obj):
"""
Turns the value to a JSON string.
:param obj: An object.
:return: A string.
"""
return self.dumps(self.value_from_object(obj))
def dumps(self, data):
"""
Encodes data and dumps.
:param data: A value.
:return: An encoded string.
"""
return self.encoder.encode(data)
def loads(self, val):
"""
:param val: A JSON encoddd string.
:return: A dict with data from val
"""
try:
val = self.decoder.decode(val)#, encoding=settings.DEFAULT_CHARSET)
# XXX We need to investigate why this is happening once we have
# a solid repro case.
if isinstance(val, basestring):
logging.warning("JSONField decode error. Expected dictionary, "
"got string for input '%s'" % val)
# For whatever reason, we may have gotten back
val = self.decoder.decode(val)#, encoding=settings.DEFAULT_CHARSET)
except ValueError:
val = None
return val
def south_field_triple(self):
"""
Returns a suitable description of this field for South."
:return: A tuple of field_class, args and kwargs from South's introspector.
"""
# We'll just introspect the _actual_ field.
from south.modelsinspector import introspector
field_class = "django.db.models.fields.TextField"
args, kwargs = introspector(self)
# That's our definition!
return (field_class, args, kwargs)
| bsd-3-clause | -8,755,492,323,825,175,000 | 35.29878 | 112 | 0.605409 | false | 4.40963 | false | false | false |
ftrautsch/mecoSHARK | mecoshark/processor/javaprocessor.py | 1 | 4793 | import logging
import os
import shutil
import subprocess
import sys
from mecoshark.processor.baseprocessor import BaseProcessor
from mecoshark.resultparser.sourcemeterparser import SourcemeterParser
class JavaProcessor(BaseProcessor):
"""
Implements :class:`~mecoshark.processor.baseprocessor.BaseProcessor` for Java
"""
@property
def supported_languages(self):
"""
See: :func:`~mecoshark.processor.baseprocessor.BaseProcessor.supported_languages`
"""
return ['java']
@property
def enabled(self):
"""
See: :func:`~mecoshark.processor.baseprocessor.BaseProcessor.enabled`
"""
return True
@property
def threshold(self):
"""
See: :func:`~mecoshark.processor.baseprocessor.BaseProcessor.threshold`
"""
return 0.4
def __init__(self, output_path, input_path):
super().__init__(output_path, input_path)
self.logger = logging.getLogger("processor")
return
def execute_sourcemeter(self):
"""
Executes sourcemeter for the java language
"""
# Clean output directory
shutil.rmtree(os.path.join(self.output_path, self.projectname), True)
template_path = os.path.dirname(os.path.realpath(__file__))+'/../../templates'
failure_happened = False
# try maven
if os.path.exists(os.path.join(self.input_path, 'pom.xml')):
self.logger.info("Trying out maven...")
self.prepare_template(os.path.join(template_path, 'build-maven.sh'))
self.prepare_template(os.path.join(template_path, 'analyze-maven.sh'))
try:
subprocess.run(os.path.join(self.output_path, 'analyze-maven.sh'), shell=True)
except Exception:
sys.exit(1)
pass
if not self.is_output_produced():
shutil.rmtree(os.path.join(self.output_path, self.projectname), True)
failure_happened = True
# try ant
if os.path.exists(os.path.join(self.input_path, 'build.xml')) and failure_happened:
self.logger.info("Trying out ant...")
self.prepare_template(os.path.join(template_path, 'build-ant.sh'))
self.prepare_template(os.path.join(template_path, 'analyze-ant.sh'))
try:
subprocess.run(os.path.join(self.output_path, 'analyze-ant.sh'), shell=True)
except Exception:
pass
if not self.is_output_produced():
shutil.rmtree(os.path.join(self.output_path, self.projectname), True)
failure_happened = True
# use directory based analysis otherwise
if failure_happened:
self.logger.info("Trying out directory analysis for java...")
self.prepare_template(os.path.join(template_path, 'analyze-dir.sh'))
if self.input_path.endswith("/"):
self.input_path = self.input_path[:-1]
if self.output_path.endswith("/"):
self.output_path = self.output_path[:-1]
try:
subprocess.run(os.path.join(self.output_path, 'analyze-dir.sh'), shell=True)
except Exception:
pass
if not self.is_output_produced():
self.logger.error('Problem in using mecoshark! No output was produced!')
def is_output_produced(self):
"""
Checks if output was produced for the process
:return: boolean
"""
output_path = os.path.join(self.output_path, self.projectname, 'java')
if not os.path.exists(output_path):
return False
output_path = os.path.join(output_path, os.listdir(output_path)[0])
number_of_files = len([name for name in os.listdir(output_path) if name.endswith('.csv')])
if number_of_files == 12:
return True
return False
def process(self, revision, url, options):
"""
See: :func:`~mecoshark.processor.baseprocessor.BaseProcessor.process`
Processes the given revision.
First executes sourcemeter with given options, then it creates the parser to store the data.
:param revision: revision
:param url: url of the project that is analyzed
:param options: options for execution
"""
self.execute_sourcemeter()
meco_path = os.path.join(self.output_path, self.projectname, 'java')
output_path = os.path.join(meco_path, os.listdir(meco_path)[0])
parser = SourcemeterParser(output_path, self.input_path, url, revision)
parser.store_data()
# delete directory
shutil.rmtree(os.path.join(self.output_path, self.projectname), True)
| apache-2.0 | -6,460,808,054,946,578,000 | 31.828767 | 100 | 0.606092 | false | 4.017603 | false | false | false |
hsharrison/pyglet2d | src/pyglet2d.py | 1 | 14357 | __version__ = '0.2.1'
from itertools import chain
import numpy as np
import pyglet
from Polygon import Polygon, setDataStyle, STYLE_NUMPY
from Polygon.Utils import pointList as point_list
setDataStyle(STYLE_NUMPY)
class Shape:
"""Graphical polygon primitive for use with `pyglet`_.
Alternative constructor methods:
- |Shape.circle|
- |Shape.rectangle|
- |Shape.regular_polygon|
- |Shape.from_dict|
Parameters
----------
vertices : array-like or |Polygon|.
If a |Polygon| is passed, its points will be used.
Otherwise, `vertices` should be a sequence of `[x, y]` locations or an array with x and y columns.
color : str or 3-tuple of int, optional
Color, in R, G, B format.
Alternatively, a key that refers to an element of `colors`.
velocity : array-like
Speed and direction of motion, in [dx_dt, dy_dt] format.
angular_velocity : float
Speed of angular motion, in counter-clockwise radians per second.
colors : dict of tuple, optional
Named colors, defined as R, G, B tuples.
Useful for easily switching between a set of colors.
Attributes
----------
poly : |Polygon|
Associated |Polygon| object.
vertices : |array|
An array of points, with x and y columns. Read-only.
center : |array|
The centroid of the shape.
Setting center calls |Shape.translate|.
position : |array|
Alias for `center`.
radius : |array|
Mean distance from each point to the center.
Setting radius calls |Shape.scale|.
color : str or tuple of int
The current color, in R, G, B format if `colors` was not passed.
Otherwise, the current color is represented as a key in `colors`.
colors : dict of tuple
Named colors.
velocity : |array|
Speed and direction of linear motion.
Angular_velocity : float
Speed of angular motion, in counter-clockwise radians per second.
enabled : bool
If False, the shape will not be drawn.
"""
def __init__(self, vertices, color=(255, 255, 255), velocity=(0, 0), angular_velocity=0, colors=None):
if isinstance(vertices, Polygon):
self.poly = vertices
else:
self.poly = Polygon(vertices)
self.colors = colors
self._color = 'primary'
if colors:
self.color = color
else:
self.colors = {'primary': color}
self.velocity = np.asarray(velocity)
self.angular_velocity = angular_velocity
# Construct vertex_list.
self._vertex_list = self._get_vertex_list()
self.enabled = True
@classmethod
def regular_polygon(cls, center, radius, n_vertices, start_angle=0, **kwargs):
"""Construct a regular polygon.
Parameters
----------
center : array-like
radius : float
n_vertices : int
start_angle : float, optional
Where to put the first point, relative to `center`,
in radians counter-clockwise starting from the horizontal axis.
kwargs
Other keyword arguments are passed to the |Shape| constructor.
"""
angles = (np.arange(n_vertices) * 2 * np.pi / n_vertices) + start_angle
return cls(center + radius * np.array([np.cos(angles), np.sin(angles)]).T, **kwargs)
@classmethod
def circle(cls, center, radius, n_vertices=50, **kwargs):
"""Construct a circle.
Parameters
----------
center : array-like
radius : float
n_vertices : int, optional
Number of points to draw.
Decrease for performance, increase for appearance.
kwargs
Other keyword arguments are passed to the |Shape| constructor.
"""
return cls.regular_polygon(center, radius, n_vertices, **kwargs)
@classmethod
def rectangle(cls, vertices, **kwargs):
"""Shortcut for creating a rectangle aligned with the screen axes from only two corners.
Parameters
----------
vertices : array-like
An array containing the ``[x, y]`` positions of two corners.
kwargs
Other keyword arguments are passed to the |Shape| constructor.
"""
bottom_left, top_right = vertices
top_left = [bottom_left[0], top_right[1]]
bottom_right = [top_right[0], bottom_left[1]]
return cls([bottom_left, bottom_right, top_right, top_left], **kwargs)
@classmethod
def from_dict(cls, spec):
"""Create a |Shape| from a dictionary specification.
Parameters
----------
spec : dict
A dictionary with either the fields ``'center'`` and ``'radius'`` (for a circle),
``'center'``, ``'radius'``, and ``'n_vertices'`` (for a regular polygon),
or ``'vertices'``.
If only two vertices are given, they are assumed to be lower left and top right corners of a rectangle.
Other fields are interpreted as keyword arguments.
"""
spec = spec.copy()
center = spec.pop('center', None)
radius = spec.pop('radius', None)
if center and radius:
return cls.circle(center, radius, **spec)
vertices = spec.pop('vertices')
if len(vertices) == 2:
return cls.rectangle(vertices, **spec)
return cls(vertices, **spec)
@property
def vertices(self):
return np.asarray(point_list(self.poly))
@property
def color(self):
if len(self.colors) == 1:
return self.colors[self._color]
else:
return self._color
@color.setter
def color(self, value):
if value in self.colors:
self._color = value
else:
self.colors[self._color] = value
@property
def _kwargs(self):
"""Keyword arguments for recreating the Shape from the vertices.
"""
return dict(color=self.color, velocity=self.velocity, colors=self.colors)
@property
def center(self):
return np.asarray(self.poly.center())
@center.setter
def center(self, value):
self.translate(np.asarray(value) - self.center)
@property
def radius(self):
return np.linalg.norm(self.vertices - self.center, axis=1).mean()
@radius.setter
def radius(self, value):
self.scale(value / self.radius)
@property
def _gl_vertices(self):
return list(chain(self.center, *point_list(self.poly)))
@property
def _gl_colors(self):
return (len(self) + 1) * self.colors[self._color]
def distance_to(self, point):
"""Distance from center to arbitrary point.
Parameters
----------
point : array-like
Returns
-------
float
"""
return np.linalg.norm(self.center - point)
def scale(self, factor, center=None):
"""Resize the shape by a proportion (e.g., 1 is unchanged), in-place.
Parameters
----------
factor : float or array-like
If a scalar, the same factor will be applied in the x and y dimensions.
center : array-like, optional
Point around which to perform the scaling.
If not passed, the center of the shape is used.
"""
factor = np.asarray(factor)
if len(factor.shape):
args = list(factor)
else:
args = [factor, factor]
if center is not None:
args.extend(center)
self.poly.scale(*args)
return self
def translate(self, vector):
"""Translate the shape along a vector, in-place.
Parameters
----------
vector : array-like
"""
self.poly.shift(*vector)
def rotate(self, angle, center=None):
"""Rotate the shape, in-place.
Parameters
----------
angle : float
Angle to rotate, in radians counter-clockwise.
center : array-like, optional
Point about which to rotate.
If not passed, the center of the shape will be used.
"""
args = [angle]
if center is not None:
args.extend(center)
self.poly.rotate(*args)
return self
def flip_x(self, center=None):
"""Flip the shape in the x direction, in-place.
Parameters
----------
center : array-like, optional
Point about which to flip.
If not passed, the center of the shape will be used.
"""
if center is None:
self.poly.flip()
else:
self.poly.flip(center[0])
def flip_y(self, center=None):
"""Flip the shape in the y direction, in-place.
Parameters
----------
center : array-like, optional
Point about which to flip.
If not passed, the center of the shape will be used.
"""
if center is None:
self.poly.flop()
else:
self.poly.flop(center[1])
return self
def flip(self, angle, center=None):
""" Flip the shape in an arbitrary direction.
Parameters
----------
angle : array-like
The angle, in radians counter-clockwise from the horizontal axis,
defining the angle about which to flip the shape (of a line through `center`).
center : array-like, optional
The point about which to flip.
If not passed, the center of the shape will be used.
"""
return self.rotate(-angle, center=center).flip_y(center=center).rotate(angle, center=center)
def _get_vertex_list(self):
indices = []
for i in range(1, len(self) + 1):
indices.extend([0, i, i + 1])
indices[-1] = 1
return pyglet.graphics.vertex_list_indexed(
len(self) + 1, indices, ('v2f', self._gl_vertices), ('c3B', self._gl_colors))
def draw(self):
"""Draw the shape in the current OpenGL context.
"""
if self.enabled:
self._vertex_list.colors = self._gl_colors
self._vertex_list.vertices = self._gl_vertices
self._vertex_list.draw(pyglet.gl.GL_TRIANGLES)
def update(self, dt):
"""Update the shape's position by moving it forward according to its velocity.
Parameters
----------
dt : float
"""
self.translate(dt * self.velocity)
self.rotate(dt * self.angular_velocity)
def enable(self, enabled):
"""Set whether the shape should be drawn.
Parameters
----------
enabled : bool
"""
self.enabled = enabled
return self
def overlaps(self, other):
"""Check if two shapes overlap.
Parameters
----------
other : |Shape|
Returns
-------
bool
"""
return bool(self.poly.overlaps(other.poly))
def covers(self, other):
"""Check if the shape completely covers another shape.
Parameters
----------
other : |Shape|
Returns
-------
bool
"""
return bool(self.poly.covers(other.poly))
def __repr__(self):
kwarg_strs = []
for arg, value in self._kwargs.items():
if isinstance(value, str):
value_str = "'{}'".format(value)
elif isinstance(value, np.ndarray):
value_str = '[{}, {}]'.format(*value)
else:
value_str = str(value)
kwarg_strs.append(arg + '=' + value_str)
kwargs = ',\n' + ', '.join(kwarg_strs)
return '{cls}({points}{kwargs})'.format(
cls=type(self).__name__,
points='[{}]'.format(',\n'.join('[{}, {}]'.format(x, y) for x, y in self.vertices)),
kwargs=kwargs,
)
def __eq__(self, other):
if isinstance(other, Shape):
if len(self) != len(other):
return False
return (np.all(np.isclose(np.sort(self.vertices, axis=0), np.sort(other.vertices, axis=0))) and
self.colors == other.colors and
self.color == other.color and
np.all(np.isclose(self.velocity, other.velocity)))
else:
return False
def __bool__(self):
return True
def __getitem__(self, item):
return self.vertices[item]
def __len__(self):
return self.poly.nPoints()
def __add__(self, other):
if isinstance(other, Shape):
return type(self)(self.poly + other.poly)
return type(self)(self.vertices + other, **self._kwargs)
__radd__ = __add__
def __sub__(self, other):
if isinstance(other, Shape):
return type(self)(self.poly - other.poly)
return type(self)(self.vertices - other, **self._kwargs)
def __mul__(self, other):
return type(self)(self.vertices * other, **self._kwargs)
def __rmul__(self, other):
return type(self)(other * self.vertices, **self._kwargs)
def __truediv__(self, other):
return type(self)(self.vertices / other, **self._kwargs)
__div__ = __truediv__
def __xor__(self, other):
return type(self)(self.poly ^ other.poly, **self._kwargs)
def __and__(self, other):
return type(self)(self.poly & other.poly, **self._kwargs)
def __or__(self, other):
return type(self)(self.poly | other.poly, **self._kwargs)
def __iadd__(self, other):
self.translate(other)
return self
def __isub__(self, other):
self.translate(-np.asarray(other))
return self
def __imul__(self, other):
if isinstance(other, int) or isinstance(other, float):
self.poly.scale(other, other)
elif len(other) == 2:
self.poly.scale(*other)
return self
def __itruediv__(self, other):
if isinstance(other, int) or isinstance(other, float):
self.poly.scale(1/other, 1/other)
elif len(other) == 2:
self.poly.scale(1/other[0], 1/other[1])
return self
__idiv__ = __itruediv__
position = center
| bsd-2-clause | -220,555,798,109,811,740 | 28.3 | 115 | 0.559866 | false | 4.246377 | false | false | false |
dede67/tresor | tresor2.py | 1 | 40870 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import wx
import wx.lib.mixins.listctrl as listmix
import sqlite3
cfgFile_g=".tresor2.settings"
# ###########################################################
# Die Verschluesselungs-Klasse
# braucht: python-pycrypto - Cryptographic modules for Python
import struct
import random
import hashlib
from Crypto.Cipher import AES
import base64
class DedeCrypt():
# Setzt das Passwort und liefert einen verschluesselten
# Hash-Wert dieses Passworts zurueck.
def PasswortEinstellen(self, password):
self.key=hashlib.sha256(password).digest()
return(self.verschluesseln(base64.b64encode(self.key)))
# Liefert True, wenn "passwordhash" auf das via PasswortEinstellen
# eingestellte Passwort passt. Sonst False.
def PasswortPruefen(self, passwordhash):
try:
tk=base64.b64decode(self.entschluesseln(passwordhash))
except TypeError:
return(False)
if tk==self.key:
return(True)
return(False)
# Liefert die verschluesselte Version der Liste "lst"
def ListeVerschluesseln(self, lst):
return(self.verschluesseln(self.ListePacken(lst)))
# Liefert die entschluesselte Version von "txt" als Liste
def ListeEntschluesseln(self, txt):
return(self.ListeEntpacken(self.entschluesseln(txt)))
# Liefert die verschluesselte Version des Strings "textu"
def verschluesseln(self, textu):
iv=self.__RandomString(16)
encryptor=AES.new(self.key, AES.MODE_ECB, iv)
return(base64.b64encode(iv + encryptor.encrypt(self.__String16(textu))))
# Liefert die entschluesselte Version von "textv"
def entschluesseln(self, textv):
c1=base64.b64decode(textv)
iv=c1[:16]
decryptor=AES.new(self.key, AES.MODE_ECB, iv)
c2=c1[16:]
try:
c3=decryptor.decrypt(c2)
except ValueError:
return("<error>")
return(self.__StringAuspacken(c3))
# Liefert einen String mit zufaelligen Zeichen der Laenge "laenge"
def __RandomString(self, laenge):
return(''.join(chr(random.randint(0, 0xFF)) for i in range(laenge)))
# Liefert soviele zufaellige Zeichen, wie noetig sind, um "text"
# damit zu einer ganzzahlig durch 16 teilbaren Laenge aufzufuellen
def __Laenge16(self, text):
if len(text)%16==0: return("")
return(self.__RandomString(16-len(text)%16))
# Liefert "text" mit vorangestellter Laengen-Info und aufgefuellt mit
# sovielen zufaelligen Zeichen, um ganzzahlig durch 16 teilbar zu sein
def __String16(self, text):
r=struct.pack('<h', len(text))+text
return(r+self.__Laenge16(r))
# Liefert einen mit "__String16" verpackten Text wieder in Ursprungsform
def __StringAuspacken(self, text):
l=struct.unpack('<h', text[:2])[0]
if l<0:
return("<error>")
return(text[2:l+2])
# Liefert den Inhalt der Liste "liste" als gepackten String
def ListePacken(self, liste):
s=""
for i in liste:
s+=struct.pack("<h", len(i))
s+=i
return(s)
# Liefert die Liste zu dem gepackten String "strg"
def ListeEntpacken(self, strg):
p=0
lst=[]
while p<len(strg):
l=struct.unpack("<h", strg[p:p+2])[0]
lst.append(strg[p+2:p+2+l])
p+=2+l
return(lst)
# ###########################################################
# Das Fester fuer das Programm
class TresorGUI(wx.Frame):
def __init__(self, parent, pos=wx.DefaultPosition, size=wx.DefaultSize):
wx.Frame.__init__(self, None, wx.ID_ANY, "Passwort-Verwaltung", pos=pos, size=size)
self.parent=parent
Tresor(self)
# ###########################################################
# listmix.ColumnSorterMixin will das so....
class MeinListCtrl(wx.ListCtrl):
def __init__(self, parent, ID=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.DefaultSize, style=0):
wx.ListCtrl.__init__(self, parent, ID, pos, size, style)
# ###########################################################
# Das eigentliche GUI
class Tresor(wx.Panel, listmix.ColumnSorterMixin):
# ###########################################################
# Will listmix.ColumnSorterMixin haben.
def GetListCtrl(self):
return self.liste
def OnColClick(self, event):
event.Skip()
# ###########################################################
# Initialisiert Variablen und laedt das Settings-File.
# Ist im Settings-File ein DB-Name enthalten, wird diese
# DB geoeffnet.
def __init__(self, parent):
wx.Panel.__init__(self, parent, -1, style=wx.WANTS_CHARS)
self.parent=parent
# [0]=dienst, [1]=userid, [2]=password, [3]=kommentar, [4]=datum, [5]=ID
self.dDataMap={} # display
self.sDataMap={} # sort
self.nachDBID={} # Key ist DB-ID
self.suchstring="" # Genutzt von OnCharEvent
self.cltimer=None # Genutzt von OnCharEvent
self.dbname="" # Init ueber SettingsFile
self.show_pwd=False # Init ueber SettingsFile
self.font=None # Init ueber SettingsFile
self.offeneDB=False # wird in DBoeffnen ggf. auf True gesetzt
self.tresor=DedeCrypt()
self.SettingsFileLaden()
self.FensterAufbauen()
self.MenueAufbauen()
self.MenueUpdate()
self.mview.Check(302, self.show_pwd)
if self.dbname!="":
wx.CallLater(100, self.DBoeffnen) # etwas Zeit geben, um das Fenster aufzubauen
# ###########################################################
# Laedt Daten aus dem Settings-File. Wenn das File nicht
# existiert, werden Defaultwerte eingestellt.
# Aufruf aus: __init__
def SettingsFileLaden(self):
fc=wx.FileConfig(localFilename=cfgFile_g)
self.dbname=fc.Read("dbname")
self.show_pwd=bool(fc.ReadInt("show_pwd"))
fs=fc.ReadInt("font_size")
ff=fc.ReadInt("font_family")
fy=fc.ReadInt("font_style")
fw=fc.ReadInt("font_weight")
fu=fc.ReadInt("font_underline")
fa=fc.Read( "font_face")
if fa=="":
self.font=wx.Font(12, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL)
else:
self.font=wx.Font(fs, ff, fy, fw, fu, fa)
# ###########################################################
# Erstellt das Haupt-Control
# Zusaetzlich zu den drei sichtbaren Spalten existiert eine
# vierte Spalte, in der sie jeweilige DB-ID steht.
#
# Aufruf aus: __init__
def FensterAufbauen(self):
self.liste=MeinListCtrl(self, style=wx.LC_REPORT|wx.BORDER_SUNKEN|wx.LC_SORT_ASCENDING|wx.LC_SINGLE_SEL)
self.liste.SetFont(self.font)
self.liste.Bind(wx.EVT_CHAR, self.OnCharEvent)
zb=7
self.liste.InsertColumn(0, 'Dienst', width=20*zb)
self.liste.InsertColumn(1, 'Username', width=20*zb)
self.liste.InsertColumn(2, 'Passwort', width=20*zb)
self.liste.InsertColumn(3, 'ID', width=0)
self.itemDataMap=self.sDataMap
listmix.ColumnSorterMixin.__init__(self, 3)
self.liste.Bind(wx.EVT_LIST_COL_CLICK, self.OnColClick)
self.liste.Bind(wx.EVT_LIST_ITEM_ACTIVATED, self.OnRowDClick)
self.liste.Bind(wx.EVT_CONTEXT_MENU, self.OnContextMenu)
topsizer=wx.BoxSizer(wx.VERTICAL)
lbsizer= wx.BoxSizer(wx.HORIZONTAL)
lbsizer.Add( self.liste, 1, wx.ALL|wx.EXPAND, 5)
topsizer.Add(self.liste, 1, wx.ALL|wx.EXPAND, 5)
self.liste.SetToolTip(wx.ToolTip('Doppelklick oeffnet den aktuellen Eintrag zum aendern'))
self.SetSizer(topsizer)
# ###########################################################
# Erstellt das Menue und die Statuszeile
# Aufruf aus: __init__
def MenueAufbauen(self):
self.menubar=wx.MenuBar()
self.mfile=wx.Menu()
self.mfile.Append(101, '&Neue Datenbank', 'Legt eine neue Datenbank an')
self.mfile.Append(102, '&Oeffnen', 'Oeffnet eine Datenbank')
self.mfile.AppendSeparator()
self.mfile.Append(105, '&Abgleichen (unfertig)', 'Importiert Änderungen aus einer weiteren Datenbank')
self.mfile.AppendSeparator()
self.mfile.Append(104, '&Speichern', 'Speichert die Programm-Einstellungen')
self.mfile.AppendSeparator()
self.mfile.Append(103, '&Beenden', 'Beendet das Programm')
self.medit=wx.Menu()
self.medit.Append(201, '&neuer Eintrag\tIns', 'Erstellt einen neuen Eintrag')
self.medit.Append(202, 'Eintrag &aendern\tEnter', 'Oeffnet den aktuellen Eintrag zum Aendern')
self.medit.Append(203, 'Eintrag &loeschen\tDel', 'Loescht den aktuellen Eintrag')
self.medit.AppendSeparator()
self.medit.Append(204, '&Username kopieren\tCtrl-N', 'Kopiert den aktuellen Username ins Clipboard')
self.medit.Append(205, '&Passwort kopieren\tCtrl-P', 'Kopiert das aktuelle Passwort ins Clipboard')
self.mview=wx.Menu()
self.mview.Append(301, '&Font', 'Erlaubt die Auswahl einer anderen Schriftart')
self.mview.AppendSeparator()
self.mview.Append(302, '&Passworte anzeigen', 'Schaltet die Anzeige der Passwoerter um', True)
self.mhelp=wx.Menu()
self.mhelp.Append(401, '&Ueber', 'Zeigt eine Versions-Info an')
self.menubar.Append(self.mfile, '&Datei')
self.menubar.Append(self.medit, 'B&earbeiten')
self.menubar.Append(self.mview, '&Ansicht')
self.menubar.Append(self.mhelp, '&Hilfe')
self.parent.SetMenuBar(self.menubar)
self.parent.CreateStatusBar(2)
self.parent.SetStatusWidths([-1, 50])
self.parent.Bind(wx.EVT_MENU, self.NeueDBGewaehlt, id=101)
self.parent.Bind(wx.EVT_MENU, self.OeffnenGewaehlt, id=102)
self.parent.Bind(wx.EVT_MENU, self.SpeichernGewaehlt, id=104)
self.parent.Bind(wx.EVT_MENU, self.BeendenGewaehlt, id=103)
self.parent.Bind(wx.EVT_MENU, self.ImportDBGewaehlt, id=105)
self.parent.Bind(wx.EVT_MENU, self.neuerEintragGewaehlt, id=201)
self.parent.Bind(wx.EVT_MENU, self.EintragAendernGewaehlt, id=202)
self.parent.Bind(wx.EVT_MENU, self.EintragLoeschenGewaehlt, id=203)
self.parent.Bind(wx.EVT_MENU, self.UsernameKopierenGewaehlt, id=204)
self.parent.Bind(wx.EVT_MENU, self.PasswortKopierenGewaehlt, id=205)
self.parent.Bind(wx.EVT_MENU, self.FontGewaehlt, id=301)
self.parent.Bind(wx.EVT_MENU, self.PasswortAnzeigenGewaehlt, id=302)
self.parent.Bind(wx.EVT_MENU, self.UeberGewaehlt, id=401)
# ###########################################################
# Setzt den Enabled-Status des Edit-Menues entspr. "self.offeneDB"
# Aufruf aus: MenueAufbauen, NeueDBGewaehlt, OeffnenGewaehlt, DBoeffnen
def MenueUpdate(self):
for i in range(201, 206):
self.medit.Enable(i, self.offeneDB)
self.mfile.Enable(105, self.offeneDB)
if self.offeneDB==False:
self.parent.SetStatusText("", 0)
else:
self.parent.SetStatusText(self.dbname, 0)
# ###########################################################
# Das Edit-Menue wird auch als Kontext-Menue dargestellt
def OnContextMenu(self, event):
self.liste.PopupMenu(self.medit)
# ###########################################################
# Menue: Neue DB
# Fragt einen DB-Namen an, erstellt und initialisiert die DB
# und oeffnet sie danach (das Passwort wird beim Oeffnen
# abgefragt und mit der DB verknuepft).
#
# Aufruf aus: <Menue>
def NeueDBGewaehlt(self, event):
dlg=wx.FileDialog(self, message="neue DB", defaultDir=".", defaultFile="tresor2.sqlite", \
wildcard="DBs|*.sqlite|alle|*", style=wx.FD_SAVE)
if dlg.ShowModal()!=wx.ID_OK:
dlg.Destroy()
return
self.dbname=dlg.GetPath()
dlg.Destroy()
self.offeneDB=False
self.MenueUpdate()
self.liste.DeleteAllItems()
self.connection=sqlite3.connect(self.dbname)
self.cursor=self.connection.cursor()
self.cursor.execute('CREATE TABLE UIDPWD' \
' (ID INTEGER NOT NULL PRIMARY KEY,' \
' daten VARCHAR)')
self.cursor.execute('CREATE TABLE UIDPWDbackup' \
' (ID INTEGER NOT NULL PRIMARY KEY,' \
' daten VARCHAR,' \
' backup DATE)')
self.cursor.execute('CREATE TABLE pwdtest' \
' (ID INTEGER PRIMARY KEY NOT NULL,' \
' pwdhash VARCHAR)')
self.connection.commit()
fc=wx.FileConfig(localFilename=cfgFile_g)
fc.Write("dbname", self.dbname)
fc.Flush()
self.DBoeffnen(db_frisch_angelegt=True)
# ###########################################################
# Menue: Oeffnen
# Fragt einen DB-Namen an und oeffnet die DB mit diesem Namen.
#
# Aufruf aus: <Menue>
def OeffnenGewaehlt(self, event):
dlg=wx.FileDialog(self, message="DB oeffnen", defaultDir=".", defaultFile="tresor2.sqlite", \
wildcard="DBs|*.sqlite|alle|*", style=wx.FD_OPEN|wx.FD_FILE_MUST_EXIST)
if dlg.ShowModal()!=wx.ID_OK:
dlg.Destroy()
return
self.dbname=dlg.GetPath()
dlg.Destroy()
self.offeneDB=False
self.MenueUpdate()
self.liste.DeleteAllItems()
self.DBoeffnen()
# ###########################################################
# Menue: Abgleichen
#
def ImportDBGewaehlt(self, event):
if self.offeneDB==False:
wx.MessageBox("Es ist noch keine Datenbank geladen!", "Fehler", wx.OK|wx.ICON_ERROR)
return
# self.cursor.execute('SELECT ID, daten, backup FROM UIDPWDbackup')
# c=self.cursor.fetchall()
# for i in c:
# d=self.tresor.ListeEntschluesseln(i[1])
# print i[2], d
dlg=wx.FileDialog(self, message="DB oeffnen", defaultDir=".", defaultFile="tresor2.sqlite", \
wildcard="DBs|*.sqlite|alle|*", style=wx.FD_OPEN|wx.FD_FILE_MUST_EXIST)
if dlg.ShowModal()!=wx.ID_OK:
dlg.Destroy()
return
dbname=dlg.GetPath()
dlg.Destroy()
tresor=DedeCrypt()
dlg=wx.PasswordEntryDialog(self, "Bitte Passwort angeben", dbname)
if dlg.ShowModal()!=wx.ID_OK:
dlg.Destroy()
return(False)
pw=tresor.PasswortEinstellen(dlg.GetValue())
dlg.Destroy()
connection=sqlite3.connect(dbname)
cursor=connection.cursor()
cursor.execute('SELECT pwdhash FROM pwdtest')
c=cursor.fetchone()
if tresor.PasswortPruefen(c[0])==False:
wx.MessageBox("Passwort scheint falsch zu sein!", "Fehler", wx.OK|wx.ICON_ERROR)
return(False)
dDataMap={}
cursor.execute('SELECT daten, ID FROM UIDPWD')
c=cursor.fetchone()
index=0
while c!=None:
d=tresor.ListeEntschluesseln(c[0])
td=(d[0], d[1], d[2], d[3], d[4], str(c[1]))
dDataMap.update({index : td})
index+=1
c=cursor.fetchone()
for i in dDataMap.values():
found=False
for j in self.dDataMap.values():
if i[5]==j[5]:
if i[0]!=j[0] or i[0]!=j[0] or i[2]!=j[2] or i[3]!=j[3] or i[4]!=j[4]:
print "\nÄnderung\n", i, "\n", j
found=True
else:
found=True # Sätze sind identisch
# if i[0].lower()==j[0].lower() and i[1].lower()==j[1].lower():
# # Dienst und User sind identisch
# found=True
# if i[2]!=j[2] or i[3]!=j[3] or i[4]!=j[4]:
# print "\nÄnderung\n", i, "\n", j
if found==False:
print "\nNeu\n", i
# ###########################################################
# Menue: Einstellungen speichern
# Gespeichert werden:
# die Position des Fensters auf dem Bildschirm,
# die Fenster-Abmessungen,
# die Font,
# der Passwort-Anzeige-Modus und
# der Datenbank-Name
# Aufruf aus: <Menue>
def SpeichernGewaehlt(self, event):
fc=wx.FileConfig(localFilename=cfgFile_g)
sp=self.parent.GetScreenPosition()
ss=self.parent.GetSizeTuple()
fc.WriteInt("pos_x", sp[0])
fc.WriteInt("pos_y", sp[1])
fc.WriteInt("size_x" , ss[0])
fc.WriteInt("size_y" , ss[1])
fc.WriteInt("font_size", self.font.GetPointSize())
fc.WriteInt("font_family", self.font.GetFamily())
fc.WriteInt("font_style", self.font.GetStyle())
fc.WriteInt("font_weight", self.font.GetWeight())
fc.WriteInt("font_underline", self.font.GetUnderlined())
fc.Write( "font_face", self.font.GetFaceName())
fc.WriteInt("show_pwd", int(self.mview.IsChecked(302)))
fc.Write( "dbname", self.dbname)
fc.Flush()
# ###########################################################
# Menue: Programm beenden
# Aufruf aus: <Menue>, OnCharEvent
def BeendenGewaehlt(self, event):
self.parent.Close()
# ###########################################################
# Menue: neuer Eintrag
# Ruft den Satz-Aenderungs-Dialog im Neu-Modus auf.
#
# Aufruf aus: <Menue>
def neuerEintragGewaehlt(self, event):
self.EinzelSatzAnzeigeOeffnen(-1)
# ###########################################################
# Menue: Eintrag aendern
# Ruft den Satz-Aenderungs-Dialog fuer den selektierten
# Eintrag auf.
#
# Aufruf aus: <Menue>
def EintragAendernGewaehlt(self, event):
s=self.liste.GetFirstSelected()
if s<0:
wx.MessageBox("Kein Satz ausgewaehlt", "Fehler", wx.OK|wx.ICON_ERROR)
return
self.EinzelSatzAnzeigeOeffnen(s)
# ###########################################################
# Doppelklick auf einem Satz der Liste -> Satz aendern.
def OnRowDClick(self, event):
self.EinzelSatzAnzeigeOeffnen(event.GetIndex())
# ###########################################################
# Menue: Eintrag loeschen
# Loescht den selektierten Eintrag nach Rueckfrage und aktualisiert
# die DB, um danach alles neu aus der DB nach "self.liste" zu laden.
#
# Aufruf aus: <Menue>
def EintragLoeschenGewaehlt(self, event):
idx=self.liste.GetFirstSelected()
if idx<0:
wx.MessageBox("Kein Satz ausgewaehlt", "Fehler", wx.OK|wx.ICON_ERROR)
return
d= self.liste.GetItem(idx, 0).GetText() # der Dienst wird nur fuer die Rueckfrage gebraucht
i=int(self.liste.GetItem(idx, 3).GetText()) # DB-ID aus self.liste
dlg=wx.MessageDialog(self, "Soll der Dienst <"+d+"> wirklich geloescht werden?", \
"Frage", wx.OK|wx.CANCEL)
if dlg.ShowModal()==wx.ID_OK:
self.cursor.execute('INSERT INTO UIDPWDbackup (daten, backup)' \
' SELECT daten, date("now")' \
' FROM UIDPWD WHERE ID=?', (i, ))
self.cursor.execute('DELETE FROM UIDPWD WHERE ID=?', (i, ))
self.connection.commit()
if self.DatenLaden()==True:
# 1x -1 fuer Count-auf-Index-Umrechnung und
# 1x -1, weil ja ein Satz geloescht wurde
# Beim Loeschen des letzten Satzes wird also -1 uebergeben
self.DatenAnzeigen(min((idx, self.liste.GetItemCount()-2)))
# ###########################################################
# Menue: Username kopieren
# Aufruf aus: <Menue>
def UsernameKopierenGewaehlt(self, event):
idx=self.liste.GetFirstSelected()
if idx<0:
wx.MessageBox("Kein Satz ausgewaehlt", "Fehler", wx.OK|wx.ICON_ERROR)
return
self.copy2clipboard(self.liste.GetItem(idx, 1).GetText())
# ###########################################################
# Menue: Passwort kopieren
# Aufruf aus: <Menue>
def PasswortKopierenGewaehlt(self, event):
idx=self.liste.GetFirstSelected()
if idx<0:
wx.MessageBox("Kein Satz ausgewaehlt", "Fehler", wx.OK|wx.ICON_ERROR)
return
i=int(self.liste.GetItem(idx, 3).GetText())
self.copy2clipboard(self.nachDBID[i][2])
# ###########################################################
# Menue: Schriftart auswaehlen
# Aufruf aus: <Menue>
def FontGewaehlt(self, event):
data=wx.FontData()
data.SetInitialFont(self.font)
dlg=wx.FontDialog(self, data)
if dlg.ShowModal()==wx.ID_OK:
data=dlg.GetFontData()
self.font=data.GetChosenFont()
self.liste.SetFont(self.font)
dlg.Destroy()
# ###########################################################
# Menue: Passwort anzeigen umgeschaltet
# Aufruf aus: <Menue>
def PasswortAnzeigenGewaehlt(self, event):
self.DatenAnzeigen()
# ###########################################################
# Menue: Ueber
# Aufruf aus: <Menue>
def UeberGewaehlt(self, event):
info=wx.AboutDialogInfo()
info.SetName("Passwort-Verwaltung")
info.SetVersion("1.0")
info.SetCopyright("D.A. (04/05.2012)")
info.SetDescription("Ein kleines Programm zum Verwalten von UserID/Passwort-Relationen")
info.SetLicence("Dieses Programm ist freie Software gemaess GNU General Public License")
info.AddDeveloper("Detlev Ahlgrimm")
wx.AboutBox(info)
# ###########################################################
# Kopiert "txt" ins Clipboard
def copy2clipboard(self, txt):
if wx.TheClipboard.Open():
do=wx.TextDataObject()
do.SetText(txt)
wx.TheClipboard.SetData(do)
wx.TheClipboard.Close()
else:
wx.MessageBox("Kann Clipboard nicht oeffnen", "Fehler", wx.OK|wx.ICON_ERROR)
# ###########################################################
# Oeffnen der DB.
# Bei Parameter "db_frisch_angelegt"==True wird der DB
# nach Passwort-Abfrage das eingegebene Passwort zugewiesen.
# Wurde der Parameter nicht oder mit False uebergeben, wird
# ebenfalls das Passwort abgefragt, dieses dann aber gegen
# die DB geprueft. Wenn es nicht passt, wird abgebrochen.
# Wenn es passt, wird der Datenbank-Inhalt ausgelesen und
# entschluesselt ins Programm / die Anzeige geladen.
#
# Aufruf aus: __init__, NeueDBGewaehlt, OeffnenGewaehlt
def DBoeffnen(self, db_frisch_angelegt=False):
self.parent.SetStatusText("", 0)
dlg=wx.PasswordEntryDialog(self, "Bitte Passwort angeben", self.dbname)
if dlg.ShowModal()!=wx.ID_OK:
dlg.Destroy()
self.liste.SetFocus()
return(False)
pw=self.tresor.PasswortEinstellen(dlg.GetValue())
dlg.Destroy()
self.connection=sqlite3.connect(self.dbname)
self.cursor=self.connection.cursor()
if db_frisch_angelegt==True:
self.cursor.execute('INSERT INTO pwdtest (pwdhash) VALUES (?)', (pw, ))
self.connection.commit()
else:
self.cursor.execute('SELECT pwdhash FROM pwdtest')
c=self.cursor.fetchone()
if self.tresor.PasswortPruefen(c[0])==False:
wx.MessageBox("Passwort scheint falsch zu sein!", "Fehler", wx.OK|wx.ICON_ERROR)
return(False)
self.offeneDB=True
self.MenueUpdate()
if self.DatenLaden()==True:
self.DatenAnzeigen()
self.parent.SetStatusText(self.dbname, 0)
return(True)
return(False)
# ###########################################################
# Laedt den Inhalt der aktuellen/geoeffneten DB nach:
# self.dDataMap, self.sDataMap und self.nachDBID
# Wenn das Passwort nicht auf den DB-Inhalt passt (was aber
# eigentlich nicht vorkommen sollte), wird abgebrochen und
# "False" zurueckgeliefert. Ansonsten "True".
#
# Aufruf aus: DBoeffnen, EintragLoeschenGewaehlt, EinzelSatzAnzeigeOeffnen
def DatenLaden(self):
self.dDataMap={} # display
self.sDataMap={} # sort
self.nachDBID={} # nach DB-ID
# c[0] c[1]
self.cursor.execute('SELECT daten, ID FROM UIDPWD')
c=self.cursor.fetchone()
index=0
while c!=None:
d=self.tresor.ListeEntschluesseln(c[0])
td=(d[0], d[1], d[2], d[3], d[4], str(c[1]))
ts=(d[0].lower(), d[1].lower(), d[2], d[3], d[4], c[1])
self.dDataMap.update({index : td})
self.sDataMap.update({index : ts})
self.nachDBID.update({c[1] : td})
index+=1
c=self.cursor.fetchone()
return(True)
# ###########################################################
# Stellt den Inhalt von self.dDataMap dar. Die Spalte "Passwort"
# wird je nach Menue-Status ausge-X-t oder lesbar dargestellt.
# Durch die Uebergabe von "select" wird erreicht, dass der entsprechende
# Eintrag selektiert wird. Bei Uebergabe eines Integers wird es als
# Index in der Liste interpretiert, bei String als Dienst-Name.
# Wurde nichts uebergeben, wird die Selektierung aus dem alten
# Listenzustand uebernommen.
# Sortierung und sichtbarer Ausschnitt wird, wenn moeglich, nach
# Neubefuellung wiederhergestellt.
#
# Aufruf aus: DBoeffnen, EintragLoeschenGewaehlt,
# PasswortAnzeigenGewaehlt, EinzelSatzAnzeigeOeffnen
def DatenAnzeigen(self, select=None):
aktuelleSortierung=self.GetSortState()
if aktuelleSortierung[0]==-1: # wenn noch keine Sortierung eingestellt ist...
aktuelleSortierung=(0, 1) # ...dann einstellen auf: spalte=0, aufsteigend
obersterSichtbarerIndex=self.liste.GetTopItem()
if select==None:
selektierterIndex=self.liste.GetFirstSelected()
if selektierterIndex==-1:
selektierterIndex=0
else:
if type(select)==int:
selektierterIndex=select
else:
selektierterIndex=None # Kenner fuer "nach Befuellung bestimmen" setzen
self.liste.DeleteAllItems()
self.itemDataMap=self.sDataMap
items=self.dDataMap.items()
index=0
for key, data in items:
self.liste.InsertStringItem(index, data[0])
self.liste.SetStringItem(index, 1, data[1])
if self.mview.IsChecked(302)==True:
self.liste.SetStringItem(index, 2, data[2])
else:
self.liste.SetStringItem(index, 2, "*"*len(data[2]))
self.liste.SetStringItem(index, 3, data[5])
self.liste.SetItemData(index, key)
index+=1
# Sortierung restaurieren
self.SortListItems(aktuelleSortierung[0], aktuelleSortierung[1])
# untersten Eintrag sichtbar machen
self.liste.Focus(self.liste.GetItemCount()-1)
# alten obersten Eintrag sichtbar machen
self.liste.Focus(obersterSichtbarerIndex)
# damit sollte wieder der urspruenglich sichtbare Bereich angezeigt sein
if selektierterIndex==None:
selektierterIndex=self.liste.FindItem(0, select)
self.liste.Select(selektierterIndex)
self.liste.EnsureVisible(selektierterIndex)
self.liste.SetFocus()
# ###########################################################
# Verarbeitet Tastendruecke im ListCtrl.
def OnCharEvent(self, event):
t={196 : "Ä", 214 : "Ö", 220 : "Ü", 223 : "ß", 228 : "ä", 246 : "ö", 252 : "ü"}
key=event.GetKeyCode()
ctrl=wx.GetKeyState(wx.WXK_CONTROL)
if key==wx.WXK_ESCAPE: # ESC
self.BeendenGewaehlt(event)
elif ctrl==False and ((key>32 and key<128) or # standard ASCII
(key in [196, 214, 220, 223, 228, 246, 252])): # Umlaut
if key>128:
self.suchstring+=t[key]
else:
self.suchstring+=chr(key)
self.parent.SetStatusText(self.suchstring, 1)
p=self.liste.FindItem(0, self.suchstring, True)
if p>=0:
self.liste.Select(p)
self.liste.EnsureVisible(p)
if self.cltimer!=None and self.cltimer.IsRunning():
# wenn timer schon laeuft -> verlaengern
self.cltimer.Restart(1000)
else:
# wenn timer noch nicht laeuft -> starten
self.cltimer=wx.CallLater(1000, self.MehrzeichenSucheTimerAbgelaufen)
else:
event.Skip()
# ###########################################################
# Setzt den suchstring nach einer Sekunde auf Leerstring zurueck.
def MehrzeichenSucheTimerAbgelaufen(self):
self.suchstring=""
self.parent.SetStatusText(self.suchstring, 1)
# ###########################################################
# Oeffnet den Dialog zur EinzelSatzAnzeige und verarbeitet
# die Daten. Wird "idx" mit -1 uebergeben, wird ein neuer Satz
# erstellt, bei "idx" >=0 wird es als Index in "self.liste"
# interpretiert und dieser Satz geaendert.
# Wurden Veraenderungen vorgenommen, wird die DB geaendert
# und danach alles neu aus der DB nach "self.liste" geladen.
#
# Aufruf aus: neuerEintragGewaehlt, EintragAendernGewaehlt, OnRowDClick
def EinzelSatzAnzeigeOeffnen(self, idx):
if idx<0:
t="Konto erstellen"
d=u=p=k=""
dt=wx.DateTime.Now()
else:
t="Konto aendern"
i=int(self.liste.GetItem(idx, 3).GetText())
d=self.nachDBID[i][0]
u=self.nachDBID[i][1]
p=self.nachDBID[i][2]
k=self.nachDBID[i][3]
jahr, monat, tag=self.nachDBID[i][4].split("-")
dt=wx.DateTimeFromDMY(int(tag), int(monat)-1, int(jahr))
dlg=EinzelSatz(self, t, self.dDataMap, d, u, p, k, dt)
if dlg.ShowModal()!=wx.ID_OK:
dlg.Destroy()
return
daten=dlg.GibDaten()
dlg.Destroy()
daten[0]=str(daten[0].encode("utf8"))
daten[1]=str(daten[1].encode("utf8"))
daten[2]=str(daten[2].encode("utf8"))
daten[3]=str(daten[3].encode("utf8"))
daten[4]=str(daten[4].FormatISODate())
d=self.tresor.ListeVerschluesseln(daten)
if idx<0:
self.cursor.execute('INSERT INTO UIDPWD (daten) VALUES (?)', (d, ))
self.connection.commit()
else:
id=i
self.cursor.execute('INSERT INTO UIDPWDbackup (daten, backup)' \
' SELECT daten, date("now")' \
' FROM UIDPWD WHERE ID=?', (id, ))
self.cursor.execute('UPDATE UIDPWD SET daten=? WHERE ID=?', (d, id))
self.connection.commit()
if self.DatenLaden()==True:
self.DatenAnzeigen(daten[0])
# ###########################################################
# Ein Dialog zum Aendern eines Satzes.
#
# Input : Initalwerte fuer die Text-Felder und
# dDataMap, um darueber vor Dialog-Ende erkennen zu
# koennen, ob der Inhalt von "dienst" unique ist
# Output: ggf. Clipboard-Inhalt (username oder password)
# eine Liste mit den neuen Werten:
# [dienst, username, password, kommentar, datum]
#
class EinzelSatz(wx.Dialog):
def __init__(self, parent, title, dDataMap, dienst="", user="", passwd="", komment="", datum=""):
super(EinzelSatz, self).__init__(parent=parent, title=title)
self.dDataMap=dDataMap
self.diensttxt= wx.StaticText( self, label="&Dienst:")
self.dienst= wx.TextCtrl( self, wx.ID_ANY, size=(200, -1))
self.usernametxt= wx.StaticText( self, label="&Benutzername:")
self.username= wx.TextCtrl( self, wx.ID_ANY, size=(200, -1))
self.passwordtxt= wx.StaticText( self, label="&Passwort:")
self.password= wx.TextCtrl( self, wx.ID_ANY, size=(200, -1))
self.generieren= wx.Button( self, wx.ID_ANY, "&Generieren")
self.datumtxt= wx.StaticText( self, label="&Datum:")
self.datum= wx.DatePickerCtrl(self, wx.ID_ANY)
self.kommentartxt=wx.StaticText( self, label="&Kommentar:")
self.kommentar= wx.TextCtrl( self, wx.ID_ANY, size=(450, 100), style=wx.TE_MULTILINE)
self.ok= wx.Button( self, wx.ID_OK, "&OK")
self.abbruch= wx.Button( self, wx.ID_CANCEL, "&Abbruch")
self.dienst.SetValue(dienst)
self.username.SetValue(user)
self.password.SetValue(passwd)
self.kommentar.SetValue(komment)
self.datum.SetValue(datum)
topsizer= wx.BoxSizer(wx.VERTICAL)
gbsizer= wx.GridBagSizer(2, 3)
l4sizer= wx.BoxSizer(wx.HORIZONTAL)
# size(x, y) pos(y, x) span(y, x)
gbsizer.Add(self.diensttxt, (0, 0), flag=wx.ALIGN_CENTER_VERTICAL|wx.ALL, border=1)
gbsizer.Add(self.dienst, (0, 1), flag=wx.ALIGN_CENTER_VERTICAL|wx.ALL, border=1)
gbsizer.Add(self.usernametxt, (1, 0), flag=wx.ALIGN_CENTER_VERTICAL|wx.ALL, border=1)
gbsizer.Add(self.username, (1, 1), flag=wx.ALIGN_CENTER_VERTICAL|wx.ALL, border=1)
gbsizer.Add(self.passwordtxt, (2, 0), flag=wx.ALIGN_CENTER_VERTICAL|wx.ALL, border=1)
gbsizer.Add(self.password, (2, 1), flag=wx.ALIGN_CENTER_VERTICAL|wx.ALL, border=1)
gbsizer.Add(self.generieren, (2, 2), flag=wx.LEFT, border=10)
gbsizer.Add(self.datumtxt, (3, 0), flag=wx.ALIGN_CENTER_VERTICAL|wx.ALL, border=1)
gbsizer.Add(self.datum, (3, 1), flag=wx.ALIGN_CENTER_VERTICAL|wx.ALL, border=1)
l4sizer.Add(self.ok, 0, wx.ALL, 1)
l4sizer.Add(self.abbruch, 0, wx.ALL, 1)
topsizer.Add(gbsizer, 0, wx.ALL, 5)
topsizer.Add(self.kommentartxt, 0, wx.ALL, 5)
topsizer.Add(self.kommentar, 0, wx.ALL, 5)
topsizer.Add(l4sizer, 0, wx.ALL, 5)
self.SetSizerAndFit(topsizer)
self.generieren.Bind( wx.EVT_BUTTON, self.GenerierenGewaehlt)
self.ok.Bind( wx.EVT_BUTTON, self.OkGewaehlt)
self.abbruch.Bind( wx.EVT_BUTTON, self.AbbruchGewaehlt)
self.username.Bind( wx.EVT_LEFT_DCLICK, self.username_dclick)
self.password.Bind( wx.EVT_LEFT_DCLICK, self.password_dclick)
self.username.SetToolTip(wx.ToolTip('Doppelklick kopiert den Namen ins Clipboard'))
self.password.SetToolTip(wx.ToolTip('Doppelklick kopiert den Namen ins Clipboard'))
self.ok.SetDefault()
self.dienst.SetFocus()
# ###########################################################
# Kopiert self.username ins Clipboard
def username_dclick(self, event):
self.username.SetSelection(-1, -1)
self.copy2clipboard(self.username.GetValue())
# ###########################################################
# Kopiert self.password ins Clipboard
def password_dclick(self, event):
self.password.SetSelection(-1, -1)
self.copy2clipboard(self.password.GetValue())
# ###########################################################
# Kopiert "txt" ins Clipboard
def copy2clipboard(self, txt):
if wx.TheClipboard.Open():
do=wx.TextDataObject()
do.SetText(txt)
wx.TheClipboard.SetData(do)
wx.TheClipboard.Close()
else:
wx.MessageBox("Kann Clipboard nicht oeffnen", "Fehler", wx.OK|wx.ICON_ERROR)
# ###########################################################
# Button Generieren
def GenerierenGewaehlt(self, event):
dlg=PasswortGenerator(self)
if dlg.ShowModal()==wx.ID_OK:
self.password.SetValue(dlg.GibPasswort())
dlg.Destroy()
# ###########################################################
# Button Ok
def OkGewaehlt(self, event):
self.EndModal(wx.ID_OK)
# ###########################################################
# Button Abbruch
def AbbruchGewaehlt(self, event):
self.EndModal(wx.ID_CANCEL)
# ###########################################################
# Liefert die eingegebenen Daten als Liste zurueck
def GibDaten(self):
return([self.dienst.GetValue(), self.username.GetValue(), \
self.password.GetValue(), self.kommentar.GetValue(), \
self.datum.GetValue()])
# ###########################################################
# Ein Dialog zum Erzeugen von Passwoertern
# Input : keiner
# Output: ein String mit einem Passwort (oder "")
class PasswortGenerator(wx.Dialog):
def __init__(self, parent, id=wx.ID_ANY, title="Passwort-Erzeugung"):
wx.Dialog.__init__(self, parent, id, title)
sb=wx.StaticBox(self, -1, " dieses Passwort... ")
c=["gross/klein", "nur klein", "nur gross"]
self.buchstaben_jn= wx.CheckBox(self, wx.ID_ANY, "...enthaelt &Buchstaben")
self.buchstaben_typ= wx.RadioBox(self, wx.ID_ANY, "", choices=c)
self.ziffern_jn= wx.CheckBox(self, wx.ID_ANY, "...enthaelt &Ziffern")
self.sonderzeichen_jn=wx.CheckBox(self, wx.ID_ANY, "...enthaelt &Sonderzeichen")
self.beginn_jn= wx.CheckBox(self, wx.ID_ANY, "...beg&innt mit einem Buchstaben")
self.buchstaben_jn.SetValue(True)
self.ziffern_jn.SetValue(True)
self.beginn_jn.SetValue(True)
st1= wx.StaticText(self, wx.ID_ANY, "...hat eine &Laenge von:")
st2= wx.StaticText(self, wx.ID_ANY, " bis:")
st3= wx.StaticText(self, wx.ID_ANY, " Zeichen")
self.laenge_u=wx.SpinCtrl(self, wx.ID_ANY, "", size=(50, -1), min=4, max=32, initial=8)
self.laenge_o=wx.SpinCtrl(self, wx.ID_ANY, "", size=(50, -1), min=8, max=40, initial=10)
st4= wx.StaticText(self, wx.ID_ANY, "&Passwort:")
self.passwort=wx.TextCtrl(self, wx.ID_ANY, size=(200, -1))
dummy= wx.StaticText(self, wx.ID_ANY, "", size=(100, -1))
erzeugen_but= wx.Button(self, wx.ID_ANY, "&Erzeuge")
self.ok_but= wx.Button(self, wx.ID_OK, "&Ok")
abbruch_but= wx.Button(self, wx.ID_CANCEL, "&Abbruch")
self.ok_but.Disable()
topsizer=wx.BoxSizer(wx.VERTICAL)
sbsizer= wx.StaticBoxSizer(sb, wx.VERTICAL)
l1sizer= wx.BoxSizer(wx.HORIZONTAL)
l2sizer= wx.BoxSizer(wx.HORIZONTAL)
l3sizer= wx.BoxSizer(wx.HORIZONTAL)
l4sizer= wx.BoxSizer(wx.HORIZONTAL)
l1sizer.Add(self.buchstaben_jn, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
l1sizer.Add(self.buchstaben_typ, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
sbsizer.Add(l1sizer, 0, wx.ALL, 0)
sbsizer.Add(self.ziffern_jn, 0, wx.ALL, 5)
sbsizer.Add(self.sonderzeichen_jn, 0, wx.ALL, 5)
sbsizer.Add(self.beginn_jn, 0, wx.ALL, 5)
l2sizer.Add(st1, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
l2sizer.Add(self.laenge_u, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
l2sizer.Add(st2, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
l2sizer.Add(self.laenge_o, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
l2sizer.Add(st3, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
sbsizer.Add(l2sizer, 0, wx.ALL, 0)
topsizer.Add(sbsizer, 0, wx.ALL, 0)
l3sizer.Add(st4, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
l3sizer.Add(self.passwort, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
topsizer.Add(l3sizer, 0, wx.ALL, 0)
l4sizer.Add(erzeugen_but, 0, wx.ALL, 5)
l4sizer.Add(dummy, 0, wx.ALL, 5)
l4sizer.Add(self.ok_but, 0, wx.ALL, 5)
l4sizer.Add(abbruch_but, 0, wx.ALL, 5)
topsizer.Add(l4sizer, 0, wx.ALL, 0)
self.buchstaben_jn.Bind(wx.EVT_CHECKBOX, self.buchstaben_jn_wahl)
self.laenge_u.Bind( wx.EVT_SPINCTRL, self.laenge_u_wahl)
erzeugen_but.Bind( wx.EVT_BUTTON, self.erzeugen_but_wahl)
self.passwort.Bind( wx.EVT_TEXT, self.passwort_wahl)
self.SetSizerAndFit(topsizer)
erzeugen_but.SetFocus()
# ###########################################################
# Liefert das Passwort
def GibPasswort(self):
return(self.passwort.GetValue())
# ###########################################################
# Steuert den Enabled-Status des Buchstaben-Typs gemaess
# Buchstaben-J/N-Auswahl
def buchstaben_jn_wahl(self, event):
if self.buchstaben_jn.GetValue()==False:
self.buchstaben_typ.Disable()
self.beginn_jn.Disable()
else:
self.buchstaben_typ.Enable()
self.beginn_jn.Enable()
# ###########################################################
# Sorgt dafuer, dass gilt: laenge_u <= laenge_o
def laenge_u_wahl(self, event):
self.laenge_o.SetRange(self.laenge_u.GetValue(), 40)
# ###########################################################
# Button "Erzeugen" gewaehlt
def erzeugen_but_wahl(self, event):
# zuerst mal die einzelnen Wertevorraete anlegen
bg="ABCDEFGHIJKLMNOPQRSTUVWXYZ"
bk="abcdefghijklmnopqrstuvwxyz"
z="0123456789"
s="!$%&/(){}?#*+-,;.:<>"
# dann die einzelnen Wertevorraete gemaess Einstellung zu einem
# Gesamt-Wertevorrat zusammenstellen
bm=""
if self.buchstaben_jn.GetValue()==True:
bt=self.buchstaben_typ.GetSelection()
if bt==0: bm+=bg+bk
elif bt==1: bm+=bk
else: bm+=bg
wm=bm
if self.ziffern_jn.GetValue()==True:
wm+=z
if self.sonderzeichen_jn.GetValue()==True:
wm+=s
# "wm" enthaelt jetzt den Gesamt-Wertevorrat
pl=random.randrange(self.laenge_u.GetValue(), self.laenge_o.GetValue()+1)
if self.beginn_jn.IsEnabled()==True and self.beginn_jn.GetValue()==True:
# muss mit Buchstaben beginnen
pwl=random.sample(bm, 1)
pwl+=random.sample(wm, pl-1)
else:
pwl=random.sample(wm, pl)
pw=""
for pwc in pwl:
pw+=pwc
self.passwort.SetValue(pw)
# ###########################################################
# Aenderung am Passwort
# Wenn das Passwort die eingestellte Minimal-Laenge aufweist,
# wird der OK-Button freigeschaltet. Ansonsten wird er
# ausgegraut.
def passwort_wahl(self, event):
if len(self.passwort.GetValue())>=self.laenge_u.GetValue():
self.ok_but.Enable()
else:
self.ok_but.Disable()
# ###########################################################
# Der Starter
if __name__=='__main__':
fc=wx.FileConfig(localFilename=cfgFile_g)
spx=fc.ReadInt("pos_x", -1)
spy=fc.ReadInt("pos_y", -1)
ssx=fc.ReadInt("size_x", -1)
ssy=fc.ReadInt("size_y", -1)
sp=(spx, spy) # (-1, -1) entspricht wx.DefaultPosition
ss=(ssx, ssy) # (-1, -1) entspricht wx.DefaultSize
app=wx.App()
frame=TresorGUI(None, pos=sp, size=ss).Show()
app.MainLoop()
| gpl-3.0 | 4,122,978,496,484,508,000 | 36.832407 | 108 | 0.606256 | false | 2.875369 | false | false | false |
june-yang/megautils | megautils/raid_ircu/virtual_driver.py | 1 | 5356 | # Copyright 2016 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Raid virtual driver"""
import copy
import re
import os
import json
import jsonschema
from jsonschema import exceptions as json_schema_exc
from megautils.raid_ircu import mega
from megautils import exception
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
RAID_CONFIG_SCHEMA = os.path.join(CURRENT_DIR, "raid_config_schema.json")
class VirtualDriver(object):
def __init__(self, adapter_id=None, id=None):
self.adapter = adapter_id
self.id = id
self.volume_id = ''
self.pi_supported = ''
self.status_of_volume = ''
self.volume_wwid = ''
self.raid_level = ''
self.size = ''
self.physical_hard_disks = None
def __flush__(self):
if self.adapter == None or self.id == None:
raise exception.InvalidParameterValue()
cmd = '%s LIST| grep -w 1000 "IR volume %s"' % (self.adapter, self.id)
ret = self._get_client().command(cmd)
self._handle(ret, multi_vd=False)
def _get_client(self):
return mega.Mega()
def _handle(self, retstr, multi_vd=True):
vds = []
for line in retstr:
if line.startswith('IR volume'):
if not multi_vd and len(vds) > 0:
return vds[0]
offset = line.split(' ')
self.id = int(offset[-1])
if self.id is not None and multi_vd:
vds.append(self.copy())
offset = line.split(' ')
self.id = int(offset[-1])
if line.startswith(' Volume ID'):
offset = line.find(':')
self.volume_id = int(line[offset + 1:].strip())
elif line.startswith(' PI Supported'):
offset = line.find(':')
self.pi_supported = line[offset + 1:].strip()
elif line.startswith(' Status of volume'):
offset = line.find(':')
self.status_of_volume = line[offset + 1:].strip()
elif line.startswith(' Volume wwid'):
offset = line.find(':')
self.volume_wwid = line[offset + 1:].strip()
elif line.startswith(' RAID level'):
offset = line.find(':')
self.raid_level = line[offset + 1:delim].strip()
elif line.startswith(' Size'):
offset = line.find(':')
self.size = int(line[offset + 1:].strip())
elif line.startswith(' Physical hard disks'):
offset = line.find(':')
if not self.physical_hard_disks:
self.physical_hard_disks = []
elif line.startswith(' PHY'):
offset = line.find(':')
self.physical_hard_disks.append(line[offset + 1:].strip())
if self.id is not None:
vds.append(self.copy())
return vds
def copy(self):
return copy.deepcopy(self)
def create(self, raid_level, disks):
"""
Create a virtual driver with disks
:param raid_level: raid level
:param disks: lsi mega raid create disk schema
"""
disk_formater = re.compile(r'^[0-9]+:[0-9]+$')
for disk in disks:
if not re.match(disk_formater, disk):
raise exception.InvalidDiskFormater(disk=disk)
if raid_level in [mega.RAID_0, mega.RAID_1, mega.mega.RAID_10]:
cmd = '%s CREATE %s MAX %s' % \
(self.adapter, mega.RAID_LEVEL_INPUT_MAPPING.get(raid_level),
' '.join(disks))
ret = self._get_client().command(cmd)
self.id = None
for line in ret.readlines():
offset = line.find('Created VD')
if offset < 0:
continue
self.id = line[offset + 11:]
break
if not self.id:
raise exception.MegaCLIError()
self.__flush__()
def destroy(self):
"""
Delete this raid
:return:
"""
self.__flush__()
cmd = '%s DELETEVOLUME %s' % (self.adapter, self.volume_id)
self._get_client().command(cmd)
self.id = None
def getall_virtual_drivers(self):
"""
Get all virtual drivers
:return:
"""
if self.adapter == None:
raise exception.InvalidParameterValue()
cmd = '%s LIST' % self.adapter
ret = self._get_client().command(cmd)
return self._handle(ret, multi_vd=True)
def set_boot_able(self):
"""
Set current virtual driver bootable
:return:
"""
self.__flush__()
cmd = '%s BOOTIR %s' % (self.adapter, self.volume_id)
self._get_client().command(cmd)
| apache-2.0 | -1,302,649,952,105,430,300 | 32.267081 | 79 | 0.546303 | false | 3.988086 | false | false | false |
Verizon/libcloud | libcloud/pricing.py | 18 | 6632 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
"""
A class which handles loading the pricing files.
"""
import os.path
from os.path import join as pjoin
try:
import simplejson as json
JSONDecodeError = json.JSONDecodeError
except ImportError:
import json
JSONDecodeError = ValueError
from libcloud.utils.connection import get_response_object
__all__ = [
'get_pricing',
'get_size_price',
'set_pricing',
'clear_pricing_data',
'download_pricing_file'
]
# Default URL to the pricing file
DEFAULT_FILE_URL = 'https://git-wip-us.apache.org/repos/asf?p=libcloud.git;a=blob_plain;f=libcloud/data/pricing.json' # NOQA
CURRENT_DIRECTORY = os.path.dirname(os.path.abspath(__file__))
DEFAULT_PRICING_FILE_PATH = pjoin(CURRENT_DIRECTORY, 'data/pricing.json')
CUSTOM_PRICING_FILE_PATH = os.path.expanduser('~/.libcloud/pricing.json')
# Pricing data cache
PRICING_DATA = {
'compute': {},
'storage': {}
}
VALID_PRICING_DRIVER_TYPES = ['compute', 'storage']
def get_pricing_file_path(file_path=None):
if os.path.exists(CUSTOM_PRICING_FILE_PATH) and \
os.path.isfile(CUSTOM_PRICING_FILE_PATH):
# Custom pricing file is available, use it
return CUSTOM_PRICING_FILE_PATH
return DEFAULT_PRICING_FILE_PATH
def get_pricing(driver_type, driver_name, pricing_file_path=None):
"""
Return pricing for the provided driver.
:type driver_type: ``str``
:param driver_type: Driver type ('compute' or 'storage')
:type driver_name: ``str``
:param driver_name: Driver name
:type pricing_file_path: ``str``
:param pricing_file_path: Custom path to a price file. If not provided
it uses a default path.
:rtype: ``dict``
:return: Dictionary with pricing where a key name is size ID and
the value is a price.
"""
if driver_type not in VALID_PRICING_DRIVER_TYPES:
raise AttributeError('Invalid driver type: %s', driver_type)
if driver_name in PRICING_DATA[driver_type]:
return PRICING_DATA[driver_type][driver_name]
if not pricing_file_path:
pricing_file_path = get_pricing_file_path(file_path=pricing_file_path)
with open(pricing_file_path) as fp:
content = fp.read()
pricing_data = json.loads(content)
size_pricing = pricing_data[driver_type][driver_name]
for driver_type in VALID_PRICING_DRIVER_TYPES:
# pylint: disable=maybe-no-member
pricing = pricing_data.get(driver_type, None)
if pricing:
PRICING_DATA[driver_type] = pricing
return size_pricing
def set_pricing(driver_type, driver_name, pricing):
"""
Populate the driver pricing dictionary.
:type driver_type: ``str``
:param driver_type: Driver type ('compute' or 'storage')
:type driver_name: ``str``
:param driver_name: Driver name
:type pricing: ``dict``
:param pricing: Dictionary where a key is a size ID and a value is a price.
"""
PRICING_DATA[driver_type][driver_name] = pricing
def get_size_price(driver_type, driver_name, size_id):
"""
Return price for the provided size.
:type driver_type: ``str``
:param driver_type: Driver type ('compute' or 'storage')
:type driver_name: ``str``
:param driver_name: Driver name
:type size_id: ``str`` or ``int``
:param size_id: Unique size ID (can be an integer or a string - depends on
the driver)
:rtype: ``float``
:return: Size price.
"""
pricing = get_pricing(driver_type=driver_type, driver_name=driver_name)
price = float(pricing[size_id])
return price
def invalidate_pricing_cache():
"""
Invalidate pricing cache for all the drivers.
"""
PRICING_DATA['compute'] = {}
PRICING_DATA['storage'] = {}
def clear_pricing_data():
"""
Invalidate pricing cache for all the drivers.
Note: This method does the same thing as invalidate_pricing_cache and is
here for backward compatibility reasons.
"""
invalidate_pricing_cache()
def invalidate_module_pricing_cache(driver_type, driver_name):
"""
Invalidate the cache for the specified driver.
:type driver_type: ``str``
:param driver_type: Driver type ('compute' or 'storage')
:type driver_name: ``str``
:param driver_name: Driver name
"""
if driver_name in PRICING_DATA[driver_type]:
del PRICING_DATA[driver_type][driver_name]
def download_pricing_file(file_url=DEFAULT_FILE_URL,
file_path=CUSTOM_PRICING_FILE_PATH):
"""
Download pricing file from the file_url and save it to file_path.
:type file_url: ``str``
:param file_url: URL pointing to the pricing file.
:type file_path: ``str``
:param file_path: Path where a download pricing file will be saved.
"""
dir_name = os.path.dirname(file_path)
if not os.path.exists(dir_name):
# Verify a valid path is provided
msg = ('Can\'t write to %s, directory %s, doesn\'t exist' %
(file_path, dir_name))
raise ValueError(msg)
if os.path.exists(file_path) and os.path.isdir(file_path):
msg = ('Can\'t write to %s file path because it\'s a'
' directory' % (file_path))
raise ValueError(msg)
response = get_response_object(file_url)
body = response.body
# Verify pricing file is valid
try:
data = json.loads(body)
except JSONDecodeError:
msg = 'Provided URL doesn\'t contain valid pricing data'
raise Exception(msg)
# pylint: disable=maybe-no-member
if not data.get('updated', None):
msg = 'Provided URL doesn\'t contain valid pricing data'
raise Exception(msg)
# No need to stream it since file is small
with open(file_path, 'w') as file_handle:
file_handle.write(body)
| apache-2.0 | -5,401,085,464,402,850,000 | 29.145455 | 125 | 0.663299 | false | 3.525784 | false | false | false |
DamienIrving/ocean-analysis | visualisation/energy_budget/plot_zonal_toa_breakdown.py | 1 | 10737 | """
Filename: plot_zonal_toa_breakdown.py
Author: Damien Irving, [email protected]
Description:
"""
# Import general Python modules
import sys, os, pdb, glob
import argparse
import numpy
import iris
from iris.experimental.equalise_cubes import equalise_attributes
import iris.plot as iplt
import matplotlib.pyplot as plt
from matplotlib import gridspec
import seaborn
seaborn.set_context('talk')
# Import my modules
cwd = os.getcwd()
repo_dir = '/'
for directory in cwd.split('/')[1:]:
repo_dir = os.path.join(repo_dir, directory)
if directory == 'ocean-analysis':
break
modules_dir = os.path.join(repo_dir, 'modules')
sys.path.append(modules_dir)
try:
import general_io as gio
import timeseries
import convenient_universal as uconv
import grids
except ImportError:
raise ImportError('Must run this script from anywhere within the ocean-analysis git repo')
# Define functions
aa_physics = {'CanESM2': 'p4', 'CCSM4': 'p10', 'CSIRO-Mk3-6-0': 'p4',
'GFDL-CM3': 'p1', 'GISS-E2-H': 'p107', 'GISS-E2-R': 'p107', 'NorESM1-M': 'p1'}
def ensemble_grid():
"""Make a dummy cube with desired grid."""
lat_values = numpy.arange(-89.5, 90, 1.0)
latitude = iris.coords.DimCoord(lat_values,
var_name='lat',
standard_name='latitude',
long_name='latitude',
units='degrees_north',
coord_system=iris.coord_systems.GeogCS(iris.fileformats.pp.EARTH_RADIUS))
dummy_data = numpy.zeros(len(lat_values))
new_cube = iris.cube.Cube(dummy_data, dim_coords_and_dims=[(latitude, 0)])
new_cube.coord('latitude').guess_bounds()
return new_cube
def ensemble_mean(cube_list):
"""Calculate the ensemble mean."""
if len(cube_list) > 1:
equalise_attributes(cube_list)
ensemble_cube = cube_list.merge_cube()
ensemble_mean = ensemble_cube.collapsed('ensemble_member', iris.analysis.MEAN)
else:
ensemble_mean = cube_list[0]
return ensemble_mean
def calc_anomaly(cube):
"""Calculate the anomaly."""
anomaly = cube.copy()
anomaly.data = anomaly.data - anomaly.data[0]
anomaly = anomaly[-1, ::]
anomaly.remove_coord('time')
return anomaly
def regrid(anomaly, ref_cube):
"""Regrid to reference cube, preserving the data sum"""
lat_bounds = anomaly.coord('latitude').bounds
lat_diffs = numpy.apply_along_axis(lambda x: x[1] - x[0], 1, lat_bounds)
anomaly_scaled = anomaly / lat_diffs
ref_points = [('latitude', ref_cube.coord('latitude').points)]
anomaly_regridded = anomaly_scaled.interpolate(ref_points, iris.analysis.Linear())
ref_lat_bounds = ref_cube.coord('latitude').bounds
ref_lat_diffs = numpy.apply_along_axis(lambda x: x[1] - x[0], 1, ref_lat_bounds)
new_anomaly = anomaly_regridded * ref_lat_diffs
return new_anomaly
def get_data(infile, var, metadata_dict, time_constraint, ensemble_number, ref_cube=False):
"""Get data"""
if infile:
cube = iris.load_cube(infile[0], var & time_constraint)
metadata_dict[infile[0]] = cube.attributes['history']
anomaly = calc_anomaly(cube)
final_value = anomaly.data.sum()
print(var, 'final global total:', final_value)
if ref_cube:
grid_match = ref_cube.coord('latitude') == cube.coord('latitude')
if not grid_match:
anomaly = regrid(anomaly, ref_cube)
final_value = anomaly.data.sum()
print(var, 'final global total (after regrid):', final_value)
if ref_cube.standard_name:
anomaly.replace_coord(ref_cube.coord('latitude'))
else:
if not anomaly.coord('latitude').has_bounds():
anomaly.coord('latitude').bounds = ref_cube.coord('latitude').bounds
new_aux_coord = iris.coords.AuxCoord(ensemble_number, long_name='ensemble_member', units='no_unit')
anomaly.add_aux_coord(new_aux_coord)
else:
cube = None
anomaly = None
final_value = None
return cube, anomaly, metadata_dict
def plot_breakdown(gs, rndt_anomaly, rsdt_anomaly, rsut_anomaly, rlut_anomaly, linewidth=None, decorate=True, ylim=True):
"""Plot netTOA and its component parts"""
ax = plt.subplot(gs)
plt.sca(ax)
if decorate:
labels = ['netTOA', 'rsdt', 'rsut', 'rlut']
else:
labels = [None, None, None, None]
iplt.plot(rndt_anomaly, color='black', label=labels[0], linewidth=linewidth)
iplt.plot(rsdt_anomaly, color='yellow', label=labels[1], linewidth=linewidth)
iplt.plot(rsut_anomaly * -1, color='orange', label=labels[2], linewidth=linewidth)
iplt.plot(rlut_anomaly * -1, color='purple', label=labels[3], linewidth=linewidth)
if ylim:
ylower, yupper = ylim
plt.ylim(ylower * 1e22, yupper * 1e22)
if decorate:
plt.ylabel('$J \; lat^{-1}$')
plt.xlim(-90, 90)
plt.axhline(y=0, color='0.5', linestyle='--')
plt.legend()
plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0), useMathText=True)
ax.yaxis.major.formatter._useMathText = True
def get_time_text(time_bounds):
"""Time text for plot title"""
start_year = time_bounds[0].split('-')[0]
end_year = time_bounds[-1].split('-')[0]
time_text = '%s-%s' %(start_year, end_year)
return time_text
def main(inargs):
"""Run program"""
nexp = len(inargs.experiments)
fig = plt.figure(figsize=[11 * nexp, 14])
gs = gridspec.GridSpec(2, nexp)
nmodels = len(inargs.models)
ensemble_ref_cube = ensemble_grid() if nmodels > 1 else None
var_list = ['rndt', 'rsdt', 'rsut', 'rlut']
plot_index = 0
time_constraint = gio.get_time_constraint(inargs.time)
time_text = get_time_text(inargs.time)
ensemble_dict = {}
for experiment in inargs.experiments:
data_dict = {}
for var in var_list:
data_dict[var] = iris.cube.CubeList([])
for index, model in enumerate(inargs.models):
mip = 'r1i1' + aa_physics[model] if experiment == 'historicalMisc' else 'r1i1p1'
dir_exp = experiment.split('-')[-1]
file_exp = 'historical-' + experiment if experiment[0:3] == 'rcp' else experiment
mydir = '/g/data/r87/dbi599/DRSv2/CMIP5/%s/%s/yr' %(model, dir_exp)
rndt_file = glob.glob('%s/atmos/%s/rndt/latest/dedrifted/rndt-zonal-sum_Ayr_%s_%s_%s_cumsum-all.nc' %(mydir, mip, model, file_exp, mip))
rsdt_file = glob.glob('%s/atmos/%s/rsdt/latest/dedrifted/rsdt-zonal-sum_Ayr_%s_%s_%s_cumsum-all.nc' %(mydir, mip, model, file_exp, mip))
rsut_file = glob.glob('%s/atmos/%s/rsut/latest/dedrifted/rsut-zonal-sum_Ayr_%s_%s_%s_cumsum-all.nc' %(mydir, mip, model, file_exp, mip))
rlut_file = glob.glob('%s/atmos/%s/rlut/latest/dedrifted/rlut-zonal-sum_Ayr_%s_%s_%s_cumsum-all.nc' %(mydir, mip, model, file_exp, mip))
anomaly_dict = {}
metadata_dict = {}
rndt_cube, anomaly_dict['rndt'], metadata_dict = get_data(rndt_file, 'TOA Incoming Net Radiation', metadata_dict, time_constraint, index, ref_cube=ensemble_ref_cube)
rsdt_cube, anomaly_dict['rsdt'], metadata_dict = get_data(rsdt_file, 'toa_incoming_shortwave_flux', metadata_dict, time_constraint, index, ref_cube=ensemble_ref_cube)
rsut_cube, anomaly_dict['rsut'], metadata_dict = get_data(rsut_file, 'toa_outgoing_shortwave_flux', metadata_dict, time_constraint, index, ref_cube=ensemble_ref_cube)
rlut_cube, anomaly_dict['rlut'], metadata_dict = get_data(rlut_file, 'toa_outgoing_longwave_flux', metadata_dict, time_constraint, index, ref_cube=ensemble_ref_cube)
if nmodels > 1:
plot_breakdown(gs[plot_index], anomaly_dict['rndt'], anomaly_dict['rsdt'], anomaly_dict['rsut'], anomaly_dict['rlut'],
linewidth=0.3, decorate=False, ylim=inargs.ylim)
for var in var_list:
data_dict[var].append(anomaly_dict[var])
ensemble_dict[experiment] = {}
for var in var_list:
cube_list = iris.cube.CubeList(filter(None, data_dict[var]))
ensemble_dict[experiment][var] = ensemble_mean(cube_list)
linewidth = None if nmodels == 1 else 4.0
model_label = 'ensemble' if nmodels > 1 else inargs.models[0]
experiment_label = 'historicalAA' if experiment == 'historicalMisc' else experiment
plot_breakdown(gs[plot_index], ensemble_dict[experiment]['rndt'], ensemble_dict[experiment]['rsdt'],
ensemble_dict[experiment]['rsut'], ensemble_dict[experiment]['rlut'], ylim=inargs.ylim)
plt.title(experiment_label)
plot_index = plot_index + 1
fig.suptitle('zonally integrated heat accumulation, ' + time_text, fontsize='large')
dpi = inargs.dpi if inargs.dpi else plt.savefig.__globals__['rcParams']['figure.dpi']
print('dpi =', dpi)
plt.savefig(inargs.outfile, bbox_inches='tight', dpi=dpi)
gio.write_metadata(inargs.outfile, file_info=metadata_dict)
if __name__ == '__main__':
extra_info ="""
author:
Damien Irving, [email protected]
"""
description = 'Plot ensemble timeseries'
parser = argparse.ArgumentParser(description=description,
epilog=extra_info,
argument_default=argparse.SUPPRESS,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("outfile", type=str, help="name of output file. e.g. /g/data/r87/dbi599/figures/energy-check-zonal/energy-check-zonal_yr_model_experiment_mip_1861-2005.png")
parser.add_argument("--models", type=str, nargs='*', help="models")
parser.add_argument("--experiments", type=str, nargs='*', choices=('historical', 'historicalGHG', 'historicalMisc', 'historical-rcp85', 'rcp85'), help="experiments")
parser.add_argument("--time", type=str, nargs=2, metavar=('START_DATE', 'END_DATE'), default=('1861-01-01', '2005-12-31'),
help="Time period [default = 1861-2005]")
parser.add_argument("--ylim", type=float, nargs=2, default=None,
help="y limits for plots (x 10^22)")
parser.add_argument("--dpi", type=float, default=None,
help="Figure resolution in dots per square inch [default=auto]")
args = parser.parse_args()
main(args)
| mit | 3,990,422,921,149,656,600 | 37.346429 | 203 | 0.61656 | false | 3.391346 | false | false | false |
tensorflow/tensorboard | tensorboard/uploader/server_info.py | 1 | 9363 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Initial server communication to determine session parameters."""
from google.protobuf import message
import requests
from absl import logging
from tensorboard import version
from tensorboard.plugins.scalar import metadata as scalars_metadata
from tensorboard.uploader.proto import server_info_pb2
# Request timeout for communicating with remote server.
_REQUEST_TIMEOUT_SECONDS = 10
# Minimum interval between initiating write WriteScalar RPCs, if not specified
# by server_info, in milliseconds
_DEFAULT_MIN_SCALAR_REQUEST_INTERVAL = 5000
# Minimum interval between initiating write WriteTensor RPCs, if not specified
# by server_info, in milliseconds.
_DEFAULT_MIN_TENSOR_REQUEST_INTERVAL = 1000
# Minimum interval between initiating blob write RPC streams, if not specified
# by server_info, in milliseconds.
# This may differ from the above RPC rate limits, because blob streams
# are not batched, so sending a sequence of N blobs requires N streams, which
# could reasonably be sent more frequently.
_DEFAULT_MIN_BLOB_REQUEST_INTERVAL = 1000
# Maximum WriteScalar request size, if not specified by server_info, in bytes.
# The server-side limit is 4 MiB [1]; we should pad a bit to mitigate any errors
# in our bookkeeping. Currently, we pad a lot because WriteScalar is relatively
# slow and we would otherwise risk Deadline Exceeded errors.
#
# [1]: https://github.com/grpc/grpc/blob/e70d8582b4b0eedc45e3d25a57b58a08b94a9f4a/include/grpc/impl/codegen/grpc_types.h#L447 # pylint: disable=line-too-long
_DEFAULT_MAX_SCALAR_REQUEST_SIZE = 128 * (2 ** 10) # 128KiB
# Maximum WriteTensor request size, if not specified by server_info, in bytes.
# The server-side limit is 4 MiB [1]; we should pad a bit to mitigate any errors
# in our bookkeeping. Currently, we pad a lot.
#
# [1]: https://github.com/grpc/grpc/blob/e70d8582b4b0eedc45e3d25a57b58a08b94a9f4a/include/grpc/impl/codegen/grpc_types.h#L447 # pylint: disable=line-too-long
_DEFAULT_MAX_TENSOR_REQUEST_SIZE = 512 * (2 ** 10) # 512KiB
# Maximum WriteBlob request size, if not specified by server_info, in bytes.
# The server-side limit is 4 MiB [1]; we pad with a 256 KiB chunk to mitigate
# any errors in our bookkeeping.
#
# [1]: https://github.com/grpc/grpc/blob/e70d8582b4b0eedc45e3d25a57b58a08b94a9f4a/include/grpc/impl/codegen/grpc_types.h#L447 # pylint: disable=line-too-long
_DEFAULT_MAX_BLOB_REQUEST_SIZE = 4 * (2 ** 20) - 256 * (2 ** 10) # 4MiB-256KiB
# Maximum blob size, if not specified by server_info, in bytes.
_DEFAULT_MAX_BLOB_SIZE = 10 * (2 ** 20) # 10MiB
# Maximum tensor point size, if not specified by server_info, in bytes.
_DEFAULT_MAX_TENSOR_POINT_SIZE = 16 * (2 ** 10) # 16KiB
def _server_info_request(upload_plugins):
"""Generates a ServerInfoRequest
Args:
upload_plugins: List of plugin names requested by the user and to be
verified by the server.
Returns:
A `server_info_pb2.ServerInfoRequest` message.
"""
request = server_info_pb2.ServerInfoRequest()
request.version = version.VERSION
request.plugin_specification.upload_plugins[:] = upload_plugins
return request
def fetch_server_info(origin, upload_plugins):
"""Fetches server info from a remote server.
Args:
origin: The server with which to communicate. Should be a string
like "https://tensorboard.dev", including protocol, host, and (if
needed) port.
upload_plugins: List of plugins names requested by the user and to be
verified by the server.
Returns:
A `server_info_pb2.ServerInfoResponse` message.
Raises:
CommunicationError: Upon failure to connect to or successfully
communicate with the remote server.
"""
endpoint = "%s/api/uploader" % origin
server_info_request = _server_info_request(upload_plugins)
post_body = server_info_request.SerializeToString()
logging.info("Requested server info: <%r>", server_info_request)
try:
response = requests.post(
endpoint,
data=post_body,
timeout=_REQUEST_TIMEOUT_SECONDS,
headers={"User-Agent": "tensorboard/%s" % version.VERSION},
)
except requests.RequestException as e:
raise CommunicationError("Failed to connect to backend: %s" % e)
if not response.ok:
raise CommunicationError(
"Non-OK status from backend (%d %s): %r"
% (response.status_code, response.reason, response.content)
)
try:
return server_info_pb2.ServerInfoResponse.FromString(response.content)
except message.DecodeError as e:
raise CommunicationError(
"Corrupt response from backend (%s): %r" % (e, response.content)
)
def create_server_info(frontend_origin, api_endpoint, upload_plugins):
"""Manually creates server info given a frontend and backend.
Args:
frontend_origin: The origin of the TensorBoard.dev frontend, like
"https://tensorboard.dev" or "http://localhost:8000".
api_endpoint: As to `server_info_pb2.ApiServer.endpoint`.
upload_plugins: List of plugin names requested by the user and to be
verified by the server.
Returns:
A `server_info_pb2.ServerInfoResponse` message.
"""
result = server_info_pb2.ServerInfoResponse()
result.compatibility.verdict = server_info_pb2.VERDICT_OK
result.api_server.endpoint = api_endpoint
url_format = result.url_format
placeholder = "{{EID}}"
while placeholder in frontend_origin:
placeholder = "{%s}" % placeholder
url_format.template = "%s/experiment/%s/" % (frontend_origin, placeholder)
url_format.id_placeholder = placeholder
result.plugin_control.allowed_plugins[:] = upload_plugins
return result
def experiment_url(server_info, experiment_id):
"""Formats a URL that will resolve to the provided experiment.
Args:
server_info: A `server_info_pb2.ServerInfoResponse` message.
experiment_id: A string; the ID of the experiment to link to.
Returns:
A URL resolving to the given experiment, as a string.
"""
url_format = server_info.url_format
return url_format.template.replace(url_format.id_placeholder, experiment_id)
def allowed_plugins(server_info):
"""Determines which plugins may upload data.
This pulls from the `plugin_control` on the `server_info` when that
submessage is set, else falls back to a default.
Args:
server_info: A `server_info_pb2.ServerInfoResponse` message.
Returns:
A `frozenset` of plugin names.
"""
if server_info.HasField("plugin_control"):
return frozenset(server_info.plugin_control.allowed_plugins)
else:
# Old server: gracefully degrade to scalars only, which have
# been supported since launch. TODO(@wchargin): Promote this
# branch to an error once we're confident that we won't roll
# back to old server versions.
return frozenset((scalars_metadata.PLUGIN_NAME,))
def upload_limits(server_info):
"""Returns UploadLimits, from server_info if possible, otherwise from defaults.
Args:
server_info: A `server_info_pb2.ServerInfoResponse` message.
Returns:
An instance of UploadLimits.
"""
if server_info.HasField("upload_limits"):
upload_limits = server_info.upload_limits
else:
upload_limits = server_info_pb2.UploadLimits()
if not upload_limits.max_scalar_request_size:
upload_limits.max_scalar_request_size = _DEFAULT_MAX_SCALAR_REQUEST_SIZE
if not upload_limits.max_tensor_request_size:
upload_limits.max_tensor_request_size = _DEFAULT_MAX_TENSOR_REQUEST_SIZE
if not upload_limits.max_blob_request_size:
upload_limits.max_blob_request_size = _DEFAULT_MAX_BLOB_REQUEST_SIZE
if not upload_limits.min_scalar_request_interval:
upload_limits.min_scalar_request_interval = (
_DEFAULT_MIN_SCALAR_REQUEST_INTERVAL
)
if not upload_limits.min_tensor_request_interval:
upload_limits.min_tensor_request_interval = (
_DEFAULT_MIN_TENSOR_REQUEST_INTERVAL
)
if not upload_limits.min_blob_request_interval:
upload_limits.min_blob_request_interval = (
_DEFAULT_MIN_BLOB_REQUEST_INTERVAL
)
if not upload_limits.max_blob_size:
upload_limits.max_blob_size = _DEFAULT_MAX_BLOB_SIZE
if not upload_limits.max_tensor_point_size:
upload_limits.max_tensor_point_size = _DEFAULT_MAX_TENSOR_POINT_SIZE
return upload_limits
class CommunicationError(RuntimeError):
"""Raised upon failure to communicate with the server."""
pass
| apache-2.0 | -9,042,173,694,021,370,000 | 38.340336 | 158 | 0.702125 | false | 3.757223 | false | false | false |
Phoenix1369/site | judge/models/problem.py | 1 | 14046 | from operator import attrgetter
from django.conf import settings
from django.contrib.contenttypes.fields import GenericRelation
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.core.validators import RegexValidator
from django.db import models
from django.db.models import F, QuerySet
from django.db.models.expressions import RawSQL
from django.db.models.functions import Coalesce
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from judge.fulltext import SearchQuerySet
from judge.models.profile import Profile
from judge.models.runtime import Language
from judge.user_translations import ugettext as user_ugettext
from judge.utils.raw_sql import unique_together_left_join, RawSQLColumn
__all__ = ['ProblemGroup', 'ProblemType', 'Problem', 'ProblemTranslation', 'ProblemClarification',
'TranslatedProblemQuerySet', 'TranslatedProblemForeignKeyQuerySet', 'License']
class ProblemType(models.Model):
name = models.CharField(max_length=20, verbose_name=_('problem category ID'), unique=True)
full_name = models.CharField(max_length=100, verbose_name=_('problem category name'))
def __unicode__(self):
return self.full_name
class Meta:
ordering = ['full_name']
verbose_name = _('problem type')
verbose_name_plural = _('problem types')
class ProblemGroup(models.Model):
name = models.CharField(max_length=20, verbose_name=_('problem group ID'), unique=True)
full_name = models.CharField(max_length=100, verbose_name=_('problem group name'))
def __unicode__(self):
return self.full_name
class Meta:
ordering = ['full_name']
verbose_name = _('problem group')
verbose_name_plural = _('problem groups')
class License(models.Model):
key = models.CharField(max_length=20, unique=True, verbose_name=_('key'),
validators=[RegexValidator(r'^[-\w.]+$', r'License key must be ^[-\w.]+$')])
link = models.CharField(max_length=256, verbose_name=_('link'))
name = models.CharField(max_length=256, verbose_name=_('full name'))
display = models.CharField(max_length=256, blank=True, verbose_name=_('short name'),
help_text=_('Displayed on pages under this license'))
icon = models.CharField(max_length=256, blank=True, verbose_name=_('icon'), help_text=_('URL to the icon'))
text = models.TextField(verbose_name=_('license text'))
def __unicode__(self):
return self.name
def get_absolute_url(self):
return reverse('license', args=(self.key,))
class Meta:
verbose_name = _('license')
verbose_name_plural = _('licenses')
class TranslatedProblemQuerySet(SearchQuerySet):
def __init__(self, **kwargs):
super(TranslatedProblemQuerySet, self).__init__(('code', 'name', 'description'), **kwargs)
def add_i18n_name(self, language):
queryset = self._clone()
alias = unique_together_left_join(queryset, ProblemTranslation, 'problem', 'language', language)
return queryset.annotate(i18n_name=Coalesce(RawSQL('%s.name' % alias, ()), F('name'),
output_field=models.CharField()))
class TranslatedProblemForeignKeyQuerySet(QuerySet):
def add_problem_i18n_name(self, key, language, name_field=None):
queryset = self._clone() if name_field is None else self.annotate(_name=F(name_field))
alias = unique_together_left_join(queryset, ProblemTranslation, 'problem', 'language', language,
parent_model=Problem)
# You must specify name_field if Problem is not yet joined into the QuerySet.
kwargs = {key: Coalesce(RawSQL('%s.name' % alias, ()),
F(name_field) if name_field else RawSQLColumn(Problem, 'name'),
output_field=models.CharField())}
return queryset.annotate(**kwargs)
class Problem(models.Model):
code = models.CharField(max_length=20, verbose_name=_('problem code'), unique=True,
validators=[RegexValidator('^[a-z0-9]+$', _('Problem code must be ^[a-z0-9]+$'))])
name = models.CharField(max_length=100, verbose_name=_('problem name'), db_index=True)
description = models.TextField(verbose_name=_('problem body'))
authors = models.ManyToManyField(Profile, verbose_name=_('creators'), blank=True, related_name='authored_problems')
curators = models.ManyToManyField(Profile, verbose_name=_('curators'), blank=True, related_name='curated_problems',
help_text=_('These users will be able to edit a problem, '
'but not be publicly shown as an author.'))
testers = models.ManyToManyField(Profile, verbose_name=_('testers'), blank=True, related_name='tested_problems',
help_text=_(
'These users will be able to view a private problem, but not edit it.'))
types = models.ManyToManyField(ProblemType, verbose_name=_('problem types'))
group = models.ForeignKey(ProblemGroup, verbose_name=_('problem group'))
time_limit = models.FloatField(verbose_name=_('time limit'))
memory_limit = models.IntegerField(verbose_name=_('memory limit'))
short_circuit = models.BooleanField(default=False)
points = models.FloatField(verbose_name=_('points'))
partial = models.BooleanField(verbose_name=_('allows partial points'), default=False)
allowed_languages = models.ManyToManyField(Language, verbose_name=_('allowed languages'))
is_public = models.BooleanField(verbose_name=_('publicly visible'), db_index=True, default=False)
is_manually_managed = models.BooleanField(verbose_name=_('manually managed'), db_index=True, default=False,
help_text=_('Whether judges should be allowed to manage data or not'))
date = models.DateTimeField(verbose_name=_('date of publishing'), null=True, blank=True, db_index=True,
help_text=_("Doesn't have magic ability to auto-publish due to backward compatibility"))
banned_users = models.ManyToManyField(Profile, verbose_name=_('personae non gratae'), blank=True,
help_text=_('Bans the selected users from submitting to this problem'))
license = models.ForeignKey(License, null=True, blank=True, on_delete=models.SET_NULL)
og_image = models.CharField(verbose_name=_('OpenGraph image'), max_length=150, blank=True)
summary = models.TextField(blank=True, verbose_name=_('problem summary'),
help_text=_('Plain-text, shown in meta description tag, e.g. for social media.'))
user_count = models.IntegerField(verbose_name=_('amount of users'), default=0,
help_text=_('The amount of users on the best solutions page.'))
ac_rate = models.FloatField(verbose_name=_('rate of AC submissions'), default=0)
objects = TranslatedProblemQuerySet.as_manager()
tickets = GenericRelation('Ticket')
def __init__(self, *args, **kwargs):
super(Problem, self).__init__(*args, **kwargs)
self._translated_name_cache = {}
self._i18n_name = None
@cached_property
def types_list(self):
return map(user_ugettext, map(attrgetter('full_name'), self.types.all()))
def languages_list(self):
return self.allowed_languages.values_list('common_name', flat=True).distinct().order_by('common_name')
def is_editor(self, profile):
return (self.authors.filter(id=profile.id) | self.curators.filter(id=profile.id)).exists()
def is_editable_by(self, user):
if not user.is_authenticated:
return False
if user.has_perm('judge.edit_all_problem') or user.has_perm('judge.edit_public_problem') and self.is_public:
return True
return self.is_editor(user.profile)
def is_accessible_by(self, user):
# All users can see public problems
if self.is_public:
return True
# If the user can view all problems
if user.has_perm('judge.see_private_problem'):
return True
# If the user authored the problem or is a curator
if user.has_perm('judge.edit_own_problem') and self.is_editor(user.profile):
return True
# If the user is in a contest containing that problem or is a tester
if user.is_authenticated:
return (self.testers.filter(id=user.profile.id).exists() or
Problem.objects.filter(id=self.id, contest__users__user=user.profile).exists())
else:
return False
def __unicode__(self):
return self.name
def get_absolute_url(self):
return reverse('problem_detail', args=(self.code,))
@cached_property
def author_ids(self):
return self.authors.values_list('id', flat=True)
@cached_property
def editor_ids(self):
return self.author_ids | self.curators.values_list('id', flat=True)
@cached_property
def tester_ids(self):
return self.testers.values_list('id', flat=True)
@cached_property
def usable_common_names(self):
return set(self.usable_languages.values_list('common_name', flat=True))
@property
def usable_languages(self):
return self.allowed_languages.filter(judges__in=self.judges.filter(online=True)).distinct()
def translated_name(self, language):
if language in self._translated_name_cache:
return self._translated_name_cache[language]
# Hits database despite prefetch_related.
try:
name = self.translations.filter(language=language).values_list('name', flat=True)[0]
except IndexError:
name = self.name
self._translated_name_cache[language] = name
return name
@property
def i18n_name(self):
if self._i18n_name is None:
self._i18n_name = self._trans[0].name if self._trans else self.name
return self._i18n_name
@i18n_name.setter
def i18n_name(self, value):
self._i18n_name = value
@property
def clarifications(self):
return ProblemClarification.objects.filter(problem=self)
def update_stats(self):
self.user_count = self.submission_set.filter(points__gt=0).values('user').distinct().count()
submissions = self.submission_set.count()
self.ac_rate = 100.0 * self.submission_set.filter(result='AC').count() / submissions if submissions else 0
self.save()
update_stats.alters_data = True
def _get_limits(self, key):
limits = {limit['language_id']: (limit['language__name'], limit[key])
for limit in self.language_limits.values('language_id', 'language__name', key)}
limit_ids = set(limits.keys())
common = []
for cn, ids in Language.get_common_name_map().iteritems():
if ids - limit_ids:
continue
limit = set(limits[id][1] for id in ids)
if len(limit) == 1:
limit = next(iter(limit))
common.append((cn, limit))
for id in ids:
del limits[id]
limits = limits.values() + common
limits.sort()
return limits
@property
def language_time_limit(self):
key = 'problem_tls:%d' % self.id
result = cache.get(key)
if result is not None:
return result
result = self._get_limits('time_limit')
cache.set(key, result)
return result
@property
def language_memory_limit(self):
key = 'problem_mls:%d' % self.id
result = cache.get(key)
if result is not None:
return result
result = self._get_limits('memory_limit')
cache.set(key, result)
return result
class Meta:
permissions = (
('see_private_problem', 'See hidden problems'),
('edit_own_problem', 'Edit own problems'),
('edit_all_problem', 'Edit all problems'),
('edit_public_problem', 'Edit all public problems'),
('clone_problem', 'Clone problem'),
('change_public_visibility', 'Change is_public field'),
('change_manually_managed', 'Change is_manually_managed field'),
)
verbose_name = _('problem')
verbose_name_plural = _('problems')
class ProblemTranslation(models.Model):
problem = models.ForeignKey(Problem, verbose_name=_('problem'), related_name='translations')
language = models.CharField(verbose_name=_('language'), max_length=7, choices=settings.LANGUAGES)
name = models.CharField(verbose_name=_('translated name'), max_length=100, db_index=True)
description = models.TextField(verbose_name=_('translated description'))
class Meta:
unique_together = ('problem', 'language')
verbose_name = _('problem translation')
verbose_name_plural = _('problem translations')
class ProblemClarification(models.Model):
problem = models.ForeignKey(Problem, verbose_name=_('clarified problem'))
description = models.TextField(verbose_name=_('clarification body'))
date = models.DateTimeField(verbose_name=_('clarification timestamp'), auto_now_add=True)
class LanguageLimit(models.Model):
problem = models.ForeignKey(Problem, verbose_name=_('problem'), related_name='language_limits')
language = models.ForeignKey(Language, verbose_name=_('language'))
time_limit = models.FloatField(verbose_name=_('time limit'))
memory_limit = models.IntegerField(verbose_name=_('memory limit'))
class Meta:
unique_together = ('problem', 'language')
verbose_name = _('language-specific resource limit')
verbose_name_plural = _('language-specific resource limits')
| agpl-3.0 | 6,084,416,906,760,643,000 | 43.590476 | 120 | 0.638331 | false | 4.136042 | true | false | false |
hane1818/Simple-HMM | hmm.py | 1 | 12664 | from math import log, exp
class HMM:
"""Simple implement for Hidden Markov Model"""
def __init__(self, state_num, observation_list,
initial_probability=None, transition_probability=None, observation_probability=None):
self.state_num = state_num
# Initial probability for choosing first state
self._init_prob = [0 for i in range(state_num)] if not initial_probability else initial_probability
self._state = [i for i in range(state_num)]
# Every state's transition probability
self._state_prob = [[(1/self.state_num) for j in range(state_num)] for i in range(state_num)] \
if not transition_probability else transition_probability
self._ob_list = observation_list
self._ob_num = len(observation_list)
# Every state's observation probability
self._ob_prob = [[1/self._ob_num for j in range(self._ob_num)] for i in range(self.state_num)] \
if not observation_probability else observation_probability
# Translate probability to log
self._init_prob = [log(p) for p in self._init_prob]
self._state_prob = [[log(p) for p in state] for state in self._state_prob]
self._ob_prob = [[log(p) for p in state] for state in self._ob_prob]
def forward(self, ob_list, time):
"""Use forward algorithm to evaluate probability of a given observation.
Parameters
----------
ob_list : array-like, Observation list.
time : integer, Assign which time of observation list.
Returns
-------
p : float, Probability of given observation.
prob_list : array, Forward probability in every time stamp.
"""
if time > len(ob_list):
raise IndexError("Time cannot be more than length of observation list.")
ob_list = self._get_ob_index(ob_list) # Transform observation to index
# Calculate probability of first observation for every state
forward_prob = [self._initial_ob_prob(ob_list[0])]
for t in range(1, time):
forward_prob.append([])
for j in range(self.state_num):
# Calculate probability that previous state probability transit to present state probability
p = self._log_sum([forward_prob[t-1][i] + self._state_prob[i][j] for i in range(self.state_num)])
# Calculate probability that present state to present observation
forward_prob[t].append(p + self._ob_prob[j][ob_list[t]])
return exp(self._log_sum(forward_prob[time-1])), forward_prob
def backward(self, ob_list, time):
"""Use backward algorithm to evaluate probability of a given observation.
Parameters
----------
ob_list : array-like, Observation list.
time : integer, Assign which time of observation list.
Returns
-------
p : float, Probability of given observation.
prob_list : array, Backward probability in every time stamp.
"""
if time > len(ob_list):
raise IndexError("Time cannot be more than length of observation list.")
ob_list = self._get_ob_index(ob_list) # Transform observation to index
# Initialize the probability
backward_prob = [[log(1) for i in range(self.state_num)] for t in range(time)]
for t in range(time-2, -1, -1):
for i in range(self.state_num):
# Calculate probability that following state probability back to present state probability.
p = self._log_sum([backward_prob[t+1][j] + self._state_prob[i][j] + self._ob_prob[j][ob_list[t+1]]
for j in range(self.state_num)])
backward_prob[t][i] = p
# Return the probability from last time to first time (need to multiply the probability from time0 to time1)
return exp(self._log_sum([self._init_prob[i] + self._ob_prob[i][ob_list[0]] + backward_prob[0][i]
for i in range(self.state_num)])), backward_prob
def decode(self, ob_list, time):
"""Use viterbi algorithm to find the state sequence for a given observation list.
Parameters
----------
ob_list : array-like, Observation list.
time : integer, Assign which time of observation list.
Returns
-------
state_seq : array, The best state sequence for given observation list
"""
if time > len(ob_list):
raise IndexError("Time cannot be more than length of observation list.")
ob_list = self._get_ob_index(ob_list) # Transform observation to index
# Calculate probability of first observation for every state
max_prob = self._initial_ob_prob(ob_list[0])
pre_prob = max_prob[:]
path = [[i] for i in range(self.state_num)]
for t in range(1, time):
new_path = [[] for i in range(self.state_num)]
for j in range(self.state_num):
# Find maximum probability and the most possible previous state to transit to present state
p, state = max([(pre_prob[i] + self._state_prob[i][j], i) for i in range(self.state_num)])
# Calculate probability that present state to present observation
max_prob[j] = p + self._ob_prob[j][ob_list[t]]
# Choose the most possible path to present state
new_path[j] = path[state] + [j]
# Record for changed probability
pre_prob = max_prob[:]
# Record for new path
path = new_path
# Find the last state
(prob, state) = max([(max_prob[i], i) for i in range(self.state_num)])
return path[state]
def train(self, data_sets):
"""Use EM algorithm to train models.
Parameters
----------
data_sets : array-like, A array of observation list.
Returns
-------
"""
size = len(data_sets)
# The probability of every path which pass through state i
all_state_prob = [] # gamma
# The probability of every path which pass by route from state i to state j
all_stateset_prob = [] # xi
# initial_prob = [0 for i in range(self.state_num)]
# state_prob = [[0 for j in range(self.state_num)] for i in range(self.state_num)]
for data in data_sets:
time = len(data)
# The probability of every path which pass through state i
state_prob = [-1e10 for i in range(self.state_num)]
state_prob = [state_prob for t in range(time)] # gamma
# The probability of every path which pass by route from state i to state j
state_set_prob = [[-1e10 for j in range(self.state_num)] for i in range(self.state_num)]
state_set_prob = [state_set_prob for t in range(time)] # xi
_, forward_prob = self.forward(data, time)
_, backward_prob = self.backward(data, time)
data = self._get_ob_index(data)
for t, ob in enumerate(data):
# p += α[t][i] * β[t][i]
p = self._log_sum([forward_prob[t][i] + backward_prob[t][i] for i in range(self.state_num)])
# γ[t][i] = α[t][i] * β[t][i] / p
for i in range(self.state_num):
state_prob[t][i] = forward_prob[t][i] + backward_prob[t][i] - p
if t < time-1:
# p += α[t][i] * a[i][j] * b[j][o[t+1]] * β[t+1][j]
p = self._log_sum([forward_prob[t][i] + self._state_prob[i][j] +
self._ob_prob[j][data[t+1]] + backward_prob[t+1][j]
for i in range(self.state_num) for j in range(self.state_num)])
# ξ[t][i][j] = α[t][i] * a[i][j] * b[j][o[t+1]] * β[t+1][j] / p;
for i in range(self.state_num):
for j in range(self.state_num):
state_set_prob[t][i][j] = forward_prob[t][i] + self._state_prob[i][j] + \
self._ob_prob[j][data[t+1]] + backward_prob[t+1][j] - p
# Update initial probability
"""
self._init_prob = [state_prob[0][i] for i in range(self.state_num)]
# Update state transition probability
for i in range(self.state_num):
p2 = self._log_sum([state_prob[t][i] for t in range(time-1)])
for j in range(self.state_num):
p1 = self._log_sum([state_set_prob[t][i][j] for t in range(time-1)])
self._state_prob[i][j] = p1 - p2
# Update observation probability
for i in range(self.state_num):
p = [-1e10 for o in range(self._ob_num)]
p2 = self._log_sum([state_prob[t][i] for t in range(time)])
for t in range(time):
p[data[t]] = self._log_sum([p[data[t]], state_prob[t][i]])
for j in range(self._ob_num):
self._ob_prob[i][j] = p[j] - p2
"""
all_state_prob.append(state_prob)
all_stateset_prob.append(state_set_prob)
pi = [self._log_sum([all_state_prob[l][0][i] for l in range(size)]) - log(size)
for i in range(self.state_num)]
print("pi:", pi)
a = [[-1e10 for i in range(self.state_num)] for j in range(self.state_num)]
b = [[-1e10 for o in range(self._ob_num)] for j in range(self.state_num)]
for i in range(self.state_num):
p2 = self._log_sum([all_state_prob[l][t][i] for l in range(size) for t in range(len(data_sets[l]) - 1)])
for j in range(self.state_num):
p1 = self._log_sum([all_stateset_prob[l][t][i][j]
for l in range(size) for t in range(len(data_sets[l]) - 1)])
print([all_stateset_prob[l][t][i][j] for l in range(size) for t in range(len(data_sets[l]) - 1)])
a[i][j] = p1 - p2
for i in range(self.state_num):
p = [-1e10 for o in range(self._ob_num)]
p2 = self._log_sum([all_state_prob[l][t][i] for l in range(size) for t in range(len(data_sets[l]))])
for l in range(size):
for t in range(len(data_sets[l])):
ob_ind = self._ob_list.index(data_sets[l][t])
p[ob_ind] = self._log_sum([p[ob_ind], all_state_prob[l][t][i]])
for j in range(self._ob_num):
b[i][j] = p[j] - p2
self._init_prob = pi
self._state_prob = a
self._ob_prob = b
"""
self._init_prob = [self._log_sum([all_state_prob[l][0][i] - log(size)
for l in range(size)])
for i in range(self.state_num)]
for i in range(self.state_num):
p2 = -1.0E10
p = 0
for x in all_state_prob:
p+=sum(x[0:-1][i])
p = [-1.0E10 for i in range(self._ob_num)]
for s, x in enumerate(all_state_prob):
for t, y in enumerate(x):
ob_ind = self._ob_list.index(data_sets[s][t])
p[ob_ind] = self._log_sum((p[ob_ind], y[i]))
p2 = self._log_sum((p2, y[i]))
for j in range(self.state_num):
p1 = -1.0E10
for prob_list in all_stateset_prob:
for prob in prob_list[:-1]:
p1 = self._log_sum((p1, prob[i][j]))
self._state_prob[i][j] = p1 - p2
for j in range(self._ob_num):
self._ob_prob[i][j] = p[j] - p2
"""
def _get_ob_index(self, observation):
return [self._ob_list.index(i) for i in observation] # Transform observation to index
def _initial_ob_prob(self, ob_index):
return [self._init_prob[i] + self._ob_prob[i][ob_index] for i in range(self.state_num)]
@staticmethod
def _log_sum(sequence):
"""
:param sequence: array-like, The log number need to be add like log(p+q)
:return: integer, After calculate log(p+q)
"""
start = sequence[0]
for value in sequence[1:]:
if start < value:
start, value = value, start
if start == 0 and value == 0:
start = log(exp(start) + exp(value))
else:
start += log(1 + exp(value - start))
return start
| mit | -4,213,058,366,093,229,600 | 46.040892 | 116 | 0.534139 | false | 3.681699 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.