repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
realm/realm-core | evergreen/bloaty_to_json.py | 1 | 6304 | #!/usr/bin/env python3
from __future__ import annotations
import argparse
import json
import re
from csv import DictReader
from pathlib import Path
parser = argparse.ArgumentParser(description='Checks how bloated realm has become')
parser.add_argument(
'--short-symbols-input',
type=Path,
help='Path to CSV output of short symbols input file',
)
parser.add_argument(
'--sections-input',
type=Path,
help='Path to CSV output of sections input file',
)
parser.add_argument(
'--compileunits-input',
type=Path,
help='Path to CSV output of compileunits input file',
)
parser.add_argument(
'--analyzed-file',
type=str,
help='Name of file being analyzed by bloaty',
)
evgOpts = parser.add_argument_group('Evergreen Metadata')
evgOpts.add_argument('--output', type=Path, help='The evergreen json output filename')
evgOpts.add_argument('--project', type=str, help='Evergreen project this script is running in')
evgOpts.add_argument('--execution', type=int, help='Execution # of this evergreen task')
evgOpts.add_argument(
'--is-patch',
type=bool,
dest='is_patch',
help='Specify if this is not a patch build',
)
evgOpts.add_argument(
'--build-variant',
type=str,
dest='build_variant',
help='Build variant of the evergreen task',
)
evgOpts.add_argument('--branch', type=str, help='Git branch that was being tested')
evgOpts.add_argument('--revision', type=str, help='Git sha being tested')
evgOpts.add_argument('--task-id', type=str, dest='task_id', help='Evergreen task ID of this task')
evgOpts.add_argument('--task-name', type=str, dest='task_name', help='Name of this evergreen task')
evgOpts.add_argument(
'--revision-order-id',
type=str,
dest='revision_order_id',
help='Evergreen revision order id',
)
evgOpts.add_argument('--version-id', type=str, dest='version_id', help='Name of this evergreen version')
args = parser.parse_args()
patch_username : str = ''
def parse_patch_order():
global patch_username
patch_order_re = re.compile(r"(?P<patch_username>[\w\@\.]+)_(?P<patch_order>\d+)")
match_obj = patch_order_re.match(args.revision_order_id)
patch_username = match_obj.group('patch_username')
return int(match_obj.group('patch_order'))
evg_order = int(args.revision_order_id) if not args.is_patch else parse_patch_order()
cxx_method_re = re.compile(
# namespaces/parent class name
r"(?P<ns>(?:(?:[_a-zA-Z][\w]*)(?:<.*>)?(?:::)|(?:\(anonymous namespace\)::))+)" +
r"(?P<name>[\~a-zA-Z_][\w]*)(?:<.*>)?" + # function/class name
r"(?P<is_function>\(\))?" + # if this is function, this will capture "()"
# will be a number if this is a lambda
r"(?:::\{lambda\(\)\#(?P<lambda_number>\d+)\}::)?")
elf_section_re = re.compile(r"\[section \.(?P<section_name>[\w\.\-]+)\]")
items : list[dict] = []
sections_seen = set()
if args.short_symbols_input:
with open(args.short_symbols_input, 'r') as csv_file:
input_csv_reader = DictReader(csv_file)
for row in input_csv_reader:
raw_name = row['shortsymbols']
if match := cxx_method_re.search(raw_name):
ns = match.group('ns').rstrip(':')
node_name = match.group('name')
if match.group('lambda_number'):
node_name = "{} lambda #{}".format(node_name, match.group('lambda_number'))
type_str: str = 'symbol'
if match.group('lambda_number'):
type_str = 'lambda'
elif match.group('is_function'):
type_str = 'function'
items.append({
'type': type_str,
'name': raw_name,
'ns': ns,
'file_size': int(row['filesize']),
'vm_size': int(row['vmsize']),
})
elif match := elf_section_re.search(raw_name):
section_name = match.group('section_name')
type_str: str = 'section' if not section_name.startswith('.debug') else 'debug_section'
if section_name not in sections_seen:
items.append({
'type': type_str,
'name': section_name,
'file_size': int(row['filesize']),
'vm_size': int(row['vmsize'])
})
else:
items.append({
'type': 'symbol',
'name': raw_name,
'file_size': int(row['filesize']),
'vm_size': int(row['vmsize']),
})
if args.sections_input:
with open(args.sections_input, 'r') as csv_file:
input_csv_reader = DictReader(csv_file)
for row in input_csv_reader:
section_name = row['sections']
type_str: str = 'section' if not section_name.startswith('.debug') else 'debug_section'
if section_name not in sections_seen:
items.append({
'name': section_name,
'type': type_str,
'file_size': int(row['filesize']),
'vm_size': int(row['vmsize'])
})
if args.sections_input:
with open(args.compileunits_input, 'r') as csv_file:
input_csv_reader = DictReader(csv_file)
for row in input_csv_reader:
compileunit_name = row['compileunits']
if not elf_section_re.search(compileunit_name):
items.append({
'name': compileunit_name,
'type': 'compileunit',
'file_size': int(row['filesize']),
'vm_size': int(row['vmsize'])
})
output_obj = {
'items': items,
'execution': args.execution,
'is_mainline': (args.is_patch is not True),
'analyzed_file': args.analyzed_file,
'order': evg_order,
'project': args.project,
'branch': args.branch,
'build_variant': args.build_variant,
'revision': args.revision,
'task_id': args.task_id,
'task_name': args.task_name,
'version_id': args.version_id,
'patch_username': patch_username
}
with open(args.output, 'w') as out_fp:
json.dump(output_obj, out_fp)
| apache-2.0 | -2,903,165,258,596,488,700 | 35.022857 | 104 | 0.564562 | false | 3.618829 | false | false | false |
aekazakov/narrative | src/biokbase/narrative/tests/test_kbasewsmanager.py | 4 | 10729 | """
Tests for Narrative notebook manager
"""
__author__ = 'Bill Riehl <[email protected]>'
import unittest
from getpass import getpass
from biokbase.narrative.kbasewsmanager import KBaseWSNotebookManager
from biokbase.workspace.client import Workspace
import biokbase.workspace
import biokbase.auth
import os
import re
from tornado import web
# matches valid names of Narratives = "workspace id"/"narrative name"
# e.g. complicated stuff like:
# wjriehl:my_complicated_workspace123/Here is a new narrative!
name_regex = re.compile('[\w:-]+/[\w:-]+')
# matches a valid Narrative reference name, eg:
# ws.768.obj.1234
obj_regex = re.compile('^ws\.\d+\.obj\.\d+')
bad_narrative_id = "Not a real Narrative id!"
test_user_id = "kbasetest"
class NarrBaseTestCase(unittest.TestCase):
# Before test:
# - Log in (for tests that require login)
# also sets the token in the environment variable so the manager can get to it.
@classmethod
def setUpClass(self):
self.user_id = test_user_id
self.pwd = getpass("Password for {}: ".format(test_user_id))
self.token = biokbase.auth.Token(user_id=self.user_id, password=self.pwd)
# by default, user's left logged out
@classmethod
def setUp(self):
self.mgr = KBaseWSNotebookManager()
@classmethod
def tearDown(self):
self.logout()
pass
@classmethod
def tearDownClass(self):
pass
@classmethod
def login(self):
biokbase.auth.set_environ_token(self.token.token)
@classmethod
def logout(self):
biokbase.auth.set_environ_token(None)
def test_manager_instantiated(self):
self.assertIsInstance(self.mgr, biokbase.narrative.kbasewsmanager.KBaseWSNotebookManager)
# test get_userid()
def test_user_id_loggedin(self):
self.login()
self.assertEquals(self.mgr.get_userid(), self.user_id)
# test get_userid()
def test_user_id_loggedout(self):
self.assertEquals(self.mgr.get_userid(), None)
# test wsclient()
def test_wsclient(self):
self.assertIsInstance(self.mgr.wsclient(), Workspace)
# test info_string (just make sure it's a string)
def test_info_string(self):
self.assertIsInstance(self.mgr.info_string(), basestring)
# test list notebooks while logged in returns a list of strings
def test_list_notebooks_loggedin(self):
self.login()
self.test_list_notebooks()
def test_list_notebooks_loggedout(self):
self.test_list_notebooks()
def test_list_notebooks(self):
nb_list = self.mgr.list_notebooks()
# assert we actually get something
self.assertIsInstance(nb_list, list)
# assert it's a list of formatted dicts
format_failure = self.check_nb_list_format(nb_list)
self.assertIsNone(format_failure)
def check_nb_list_format(self, nb_list):
for nb_info in nb_list:
if not 'name' in nb_info:
return 'Missing a "name" key!'
if not 'notebook_id' in nb_info:
return 'Missing a "notebook_id key!'
if not name_regex.match(nb_info['name']):
return 'Incorrect format for "name" key: {}'.format(nb_info['name'])
if not obj_regex.match(nb_info['notebook_id']):
return 'Incorrect format for "notebook_id" key: {}'.format(nb_info['notebook_id'])
# If we make it this far, don't return anything! Hooray!
return None
def test_clean_id(self):
spacey_str = 'test test test test test'
unspacey_str = 'test_test__test_test___test'
self.assertEquals(self.mgr._clean_id(spacey_str), unspacey_str)
class NarrDocumentTestCase(NarrBaseTestCase):
@classmethod
def setUpClass(self):
try:
self.login()
# id for test notebook that'll get twiddled in this test case
self.nb_id = self.mgr.new_notebook()
self.logout()
except:
print "Unable to create a new Narrative for testing manipulation methods against. Exiting..."
raise
@classmethod
def tearDownClass(self):
try:
self.login()
self.mgr.delete_notebook(self.nb_id)
self.logout()
except:
print "Unable to delete test Narrative with id {} after testing was completed!".format(self.nb_id)
raise
# test that we can create and destroy a new Narrative while logged in
def test_create_delete_new_nb_loggedin(self):
self.login()
try:
test_id = self.mgr.new_notebook()
self.assertIsNotNone(test_id)
except:
raise
try:
self.mgr.delete_notebook(test_id)
except:
raise
# test that trying to create a new Narrative while not logged in fails properly
def test_create_new_nb_loggedout(self):
with self.assertRaises(web.HTTPError) as err:
self.mgr.new_notebook()
self.assertEquals(err.exception.status_code, 401)
def test_notebook_exists_valid(self):
self.login()
self.assertTrue(self.mgr.notebook_exists(self.nb_id))
def test_notebook_exists_invalid(self):
self.login()
self.assertFalse(self.mgr.notebook_exists(bad_narrative_id))
def test_notebook_exists_loggedout(self):
with self.assertRaises(web.HTTPError) as err:
self.mgr.notebook_exists(self.nb_id)
self.assertEquals(err.exception.status_code, 400)
def test_get_name_valid(self):
self.login()
self.assertIsNotNone(self.mgr.get_name(self.nb_id))
def test_get_name_invalid(self):
with self.assertRaises(web.HTTPError) as err:
self.mgr.get_name(bad_narrative_id)
self.assertEquals(err.exception.status_code, 404)
def test_get_name_loggedout(self):
with self.assertRaises(web.HTTPError) as err:
self.mgr.get_name(self.nb_id)
self.assertEquals(err.exception.status_code, 404)
# create_checkpoint is a no-op for now, but leave in blank tests
def test_create_checkpoint_valid(self):
pass
def test_create_checkpoint_invalid(self):
pass
def test_create_checkpoint_loggedout(self):
pass
# list_checkpoints is a no-op for now, but leave in blank tests
def test_list_checkpoints_valid(self):
pass
def test_list_checkpoints_invalid(self):
pass
def test_list_checkpoints_loggedout(self):
pass
# restore_checkpoint is a no-op for now, but leave in blank tests
def test_restore_checkpoint_valid(self):
pass
def test_restore_checkpoint_invalid(self):
pass
def test_restore_checkpoint_loggedout(self):
pass
# delete_checkpoint is a no-op for now, but leave in blank tests
def test_delete_checkpoint_valid(self):
pass
def test_delete_checkpoint_invalid(self):
pass
def test_delete_checkpoint_loggedout(self):
pass
def test_read_notebook_valid(self):
self.login()
(last_modified, nb) = self.mgr.read_notebook_object(self.nb_id)
self.assertIsNone(self.validate_nb(last_modified, nb))
def test_read_notebook_invalid(self):
self.login()
with self.assertRaises(web.HTTPError) as err:
self.mgr.read_notebook_object(bad_narrative_id)
self.assertEquals(err.exception.status_code, 500)
def test_read_notebook_loggedout(self):
with self.assertRaises(web.HTTPError) as err:
self.mgr.read_notebook_object(bad_narrative_id)
self.assertEquals(err.exception.status_code, 400)
def validate_nb(self, last_modified, nb):
if last_modified is None:
return "Missing 'last modified' field!"
if nb is None:
return "Missing nb field!"
keylist = ['nbformat', 'nbformat_minor', 'worksheets', 'metadata']
for key in keylist:
if not key in nb:
return 'Required key "{}" missing from Narrative object'.format(key)
metadata_check = {
'description': '',
'format': 'ipynb',
'creator': self.user_id,
'data_dependencies': [],
'ws_name': '',
'type': 'KBaseNarrative.Narrative',
'name': '',
'job_ids': []
}
for key in metadata_check.keys():
if key in nb['metadata']:
test_val = metadata_check[key]
if len(test_val) > 0:
if test_val != nb['metadata'][key]:
return 'Metadata key "{}" should have value "{}", but has value "{}"'.format(key, test_val, nb['metadata'][key])
else:
return 'Required metadata key "{}" missing from Narrative object'.format(key)
return None
def test_write_notebook_object_valid(self):
self.login()
(last_modified, nb) = self.mgr.read_notebook_object(self.nb_id)
ret_id = self.mgr.write_notebook_object(nb, notebook_id=self.nb_id)
self.assertEquals(ret_id, self.nb_id)
# Without an id, we would expect it to create a new narrative object in the
# same workspace that Notebook knows about from its metadata
def test_write_notebook_object_valid_without_id(self):
self.login()
(last_modified, nb) = self.mgr.read_notebook_object(self.nb_id)
ret_id = self.mgr.write_notebook_object(nb)
# we haven't changed the notebook's name, so it should be the same
self.assertNotEquals(ret_id, self.nb_id)
# Do a little specific cleanup here.
if (ret_id is not self.nb_id):
self.mgr.delete_notebook(ret_id)
def test_write_notebook_object_invalid(self):
self.login()
with self.assertRaises(web.HTTPError) as err:
self.mgr.write_notebook_object({})
self.assertEquals(err.exception.status_code, 400) # should be 500?
def test_write_notebook_object_loggedout(self):
with self.assertRaises(web.HTTPError) as err:
self.mgr.write_notebook_object({})
self.assertEquals(err.exception.status_code, 400)
# not sure the best way to test this, and it's not very relevant for KBase, since we
# don't expose the mapping to users (this is for the typical IPython loading screen)
def test_delete_notebook_id(self):
pass
# cases left to test!
# new notebook name
# new nb name with funky characters
# reading a deleted Narrative
# reading/writing with creds, but unauthorized (e.g. kbasetest trying to write to wjriehl:home)
if __name__ == '__main__':
unittest.main() | mit | -1,639,539,438,502,310,700 | 33.28115 | 136 | 0.63072 | false | 3.768528 | true | false | false |
rabbitvcs/rabbitvcs | rabbitvcs/util/strings.py | 1 | 7919 | #
# This is an extension to the Nautilus file manager to allow better
# integration with the Subversion source control system.
#
# Copyright (C) 2006-2008 by Jason Field <[email protected]>
# Copyright (C) 2007-2008 by Bruce van der Kooij <[email protected]>
# Copyright (C) 2008-2010 by Adam Plumb <[email protected]>
#
# RabbitVCS is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# RabbitVCS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RabbitVCS; If not, see <http://www.gnu.org/licenses/>.
#
"""
Additional strings support.
"""
import sys
import codecs
import re
import six
import locale
__all__ = ["S", "IDENTITY_ENCODING", "UTF8_ENCODING", "SURROGATE_ESCAPE"]
unicode_null_string = six.u("")
non_alpha_num_re = re.compile("[^A-Za-z0-9]+")
SURROGATE_BASE = 0xDC00
RE_SURROGATE = re.compile(six.u("[") + six.unichr(SURROGATE_BASE + 0x80) +
six.u("-") + six.unichr(SURROGATE_BASE + 0xFF) +
six.u("]"))
RE_UTF8 = re.compile("^[Uu][Tt][Ff][ _-]?8$")
# Codec that maps ord(byte) == ord(unicode_char).
IDENTITY_ENCODING = "latin-1"
# An UTF-8 codec that implements surrogates, even in Python 2.
UTF8_ENCODING = "rabbitvcs-utf8"
def utf8_decode(input, errors="strict"):
return codecs.utf_8_decode(input, errors, True)
def utf8_encode(input, errors="strict"):
output = b''
pos = 0
end = len(input)
eh = None
while pos < end:
n = end
m = RE_SURROGATE.search(input, pos)
if m:
n = m.start()
if n > pos:
p, m = codecs.utf_8_encode(input[pos:n], errors)
output += p
pos = n
if pos < end:
e = UnicodeEncodeError(UTF8_ENCODING,
input, pos, pos + 1,
"surrogates not allowed")
if not eh:
eh = codecs.lookup_error(errors)
p, n = eh(e)
output += p
if n <= pos:
n = pos + 1
pos = n
return (output, len(input))
class Utf8IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return utf8_encode(input, self.errors)[0]
class Utf8IncrementalDecoder(codecs.BufferedIncrementalDecoder):
_buffer_decode = codecs.utf_8_decode
class Utf8StreamWriter(codecs.StreamWriter):
def encode(self, input, errors='strict'):
return utf8_encode(input, errors)
class Utf8StreamReader(codecs.StreamReader):
decode = codecs.utf_8_decode
def utf8_search(encoding):
encoding = non_alpha_num_re.sub("-", encoding).strip("-").lower()
if encoding != UTF8_ENCODING:
return None
return codecs.CodecInfo(
name=UTF8_ENCODING,
encode=utf8_encode,
decode=utf8_decode,
incrementalencoder=Utf8IncrementalEncoder,
incrementaldecoder=Utf8IncrementalDecoder,
streamwriter=Utf8StreamWriter,
streamreader=Utf8StreamReader
)
codecs.register(utf8_search)
# Emulate surrogateescape codecs error handler because it is not available
# Before Python 3.1
SURROGATE_ESCAPE = "rabbitvcs-surrogateescape"
def rabbitvcs_surrogate_escape(e):
if not isinstance(e, UnicodeError):
raise e
input = e.object[e.start:e.end]
if isinstance(e, UnicodeDecodeError):
output = [six.unichr(b) if b < 0x80 else \
six.unichr(SURROGATE_BASE + b) for b in bytearray(input)]
return (unicode_null_string.join(output), e.end)
if isinstance(e, UnicodeEncodeError):
output = b""
for c in input:
b = ord(c) - SURROGATE_BASE
if not 0x80 <= b <= 0xFF:
raise e
output += six.int2byte(b)
return (output, e.end)
raise e
codecs.register_error(SURROGATE_ESCAPE, rabbitvcs_surrogate_escape)
class S(str):
"""
Stores a string in native form: unicode with surrogates in Python 3 and
utf-8 in Python 2.
Provides the following methods:
encode: overloaded to use UTF8_ENCODING and SURROGATE_ESCAPE error handler.
decode: overloaded to use UTF8_ENCODING and SURROGATE_ESCAPE error handler.
bytes: get the string as bytes.
unicode: get the string as unicode.
display: get the string in native form, without surrogates.
"""
if str == bytes:
# Python 2.
def __new__(cls, value, encoding=UTF8_ENCODING, errors=SURROGATE_ESCAPE):
if isinstance(value, bytearray):
value = bytes(value)
if isinstance(value, str):
encoding, errors = S._codeargs(encoding, errors)
if encoding.lower() != UTF8_ENCODING:
value = value.decode(encoding, errors)
if isinstance(value, six.text_type):
value = value.encode(UTF8_ENCODING, SURROGATE_ESCAPE)
elif not isinstance(value, str):
value = str(value)
return str.__new__(cls, value)
def encode(self, encoding=UTF8_ENCODING, errors=SURROGATE_ESCAPE):
encoding, errors = self._codeargs(encoding, errors)
if encoding.lower() == UTF8_ENCODING:
return str(self)
value = str.decode(self, UTF8_ENCODING, SURROGATE_ESCAPE)
return value.encode(encoding, errors)
def decode(self, encoding=UTF8_ENCODING, errors=SURROGATE_ESCAPE):
encoding, errors = self._codeargs(encoding, errors)
return str.decode(self, encoding, errors)
def display(self, encoding=None, errors='replace'):
encoding, errors = self._codeargs(encoding, errors)
value = str.decode(self, UTF8_ENCODING, errors)
return value.encode(encoding, errors)
else:
# Python 3.
def __new__(cls, value, encoding=UTF8_ENCODING, errors=SURROGATE_ESCAPE):
if isinstance(value, bytearray):
value = bytes(value)
if isinstance(value, bytes):
encoding, errors = S._codeargs(encoding, errors)
value = value.decode(encoding, errors)
elif not isinstance(value, str):
value = str(value)
return str.__new__(cls, value)
def encode(self, encoding=UTF8_ENCODING, errors=SURROGATE_ESCAPE):
encoding, errors = self._codeargs(encoding, errors)
return str.encode(self, encoding, errors)
def decode(self, encoding=UTF8_ENCODING, errors=SURROGATE_ESCAPE):
return str(self);
def display(self, encoding=None, errors='replace'):
return RE_SURROGATE.sub(six.unichr(0xFFFD), self)
def bytes(self, encoding=UTF8_ENCODING, errors=SURROGATE_ESCAPE):
return self.encode(encoding, errors)
def unicode(self):
return self.decode()
def valid(self, encoding=None, errors=SURROGATE_ESCAPE):
return self.display(encoding, errors) == self
@staticmethod
def _codeargs(encoding, errors):
if not encoding:
encoding = locale.getlocale(locale.LC_MESSAGES)[1]
if not encoding:
encoding = sys.getdefaultencoding()
if RE_UTF8.match(encoding):
encoding = UTF8_ENCODING
if errors.lower() == 'strict':
errors = SURROGATE_ESCAPE
return encoding, errors
| gpl-2.0 | 1,803,338,357,491,945,200 | 33.732456 | 81 | 0.613335 | false | 3.821911 | false | false | false |
fsi-hska/fsiBot | modules/WeatherModule.py | 1 | 3372 | #! /usr/bin/env python
# coding=utf8
from BotModule import BotModule
import urllib, json
class WeatherModule(BotModule):
def __init__(self):
return
def command(self, nick, cmd, args, type):
if cmd == "!wetter":
postalcode = "karlsruhe"
if len(args) > 0:
postalcode = ' '.join(args).lower()
if postalcode.startswith('honoluluu'):
answer = 'Computer sagt: NEIN!'
if type == 'public':
self.sendPublicMessage(answer)
else :
self.sendPrivateMessage(nick, answer)
return
elif postalcode == 'mêlée island':
answer = 'Dublonen, Dublonen!'
if type == 'public':
self.sendPublicMessage(answer)
else :
self.sendPrivateMessage(nick, answer)
return
try:
u = urllib.urlopen("http://api.openweathermap.org/data/2.1/find/name?q=%s&type=like&units=metric" % urllib.quote(postalcode))
except urllib2.HTTPError, e:
if self.DEBUG:
print 'Error fetching data, Error: %s' % e.code
return
except urllib2.URLError, e:
if self.DEBUG:
print 'Error fetching data, Error: %s' % e.args
return
if u.getcode() != 200:
if self.DEBUG:
print 'Error fetching data, Errorcode: %s' % u.getcode()
return
try:
jsondata = json.loads(u.read())
except ValueError, e:
if self.DEBUG:
print "ValueError %s" % e
return
if jsondata['cod'] != '200':
if jsondata['message'] != '':
answer = 'Leck? welches Leck?'
if type == 'public':
self.sendPublicMessage(answer)
else :
self.sendPrivateMessage(nick, answer)
return
if len(jsondata['list']) < 1:
answer = 'Leck? welches Leck?'
if type == 'public':
self.sendPublicMessage(answer)
else :
self.sendPrivateMessage(nick, answer)
return
elif len(jsondata['list']) > 1:
answer = 'Mr Cotton´s Papagei! Die selbe Frage!'
if type == 'public':
self.sendPublicMessage(answer)
else :
self.sendPrivateMessage(nick, answer)
return
weather = {}
try:
weather['city'] = jsondata['list'][0]['name']
weather['temp'] = jsondata['list'][0]['main']['temp']
weather['cond'] = jsondata['list'][0]['weather'][0]['description']
weather['windspeed'] = jsondata['list'][0]['wind']['speed']
weather['cloudiness'] = jsondata['list'][0]['clouds']['all']
weather['rain_last_1h'] = jsondata['list'][0]['rain']['1h']
weather['humidity'] = jsondata['list'][0]['main']['humidity']
except KeyError, e:
if self.DEBUG:
print "KeyError: %s" % e
answer = "Wetter für %s: %.2f°C, %s" % (weather['city'].encode('utf-8'), weather['temp'], weather['cond'].encode('utf-8'))
if 'windspeed' in weather:
answer += ", wind speed: %.1fkm/h" % weather['windspeed']
if 'humidity' in weather:
answer += ", humidity: %d%%" % weather['humidity']
if 'cloudiness' in weather:
answer += ", cloudiness: %d%%" % weather['cloudiness']
if 'rain_last_1h' in weather:
answer += ", rain last 1h: %.3fl/m²" % weather['rain_last_1h']
if type == 'public':
self.sendPublicMessage(answer)
if weather['temp'] > 30:
self.sendPublicMessage('Willkommen in der der Karibik, Schätzchen!')
else :
self.sendPrivateMessage(nick, answer)
def help(self, nick):
self.sendPrivateMessage(nick, "!wetter [Ort] - Gibt aktuelle Wetterdaten aus. Default Ort ist Karlsruhe.")
return
| mit | -964,415,333,796,767,900 | 27.516949 | 129 | 0.622585 | false | 2.959543 | false | false | false |
labase/vitollino | src/lab/views/main.py | 1 | 9327 | #! /usr/bin/env python
# -*- coding: UTF8 -*-
# Este arquivo é parte do programa Vittolino
# Copyright 2011-2017 Carlo Oliveira <[email protected]>,
# `Labase <http://labase.selfip.org/>`__, `GPL <http://is.gd/3Udt>`__.
#
# Vittolino é um software livre, você pode redistribuí-lo e/ou
# modificá-lo dentro dos termos da Licença Pública Geral GNU como
# publicada pela Fundação do Software Livre (FSF), na versão 2 da
# Licença.
#
# Este programa é distribuído na esperança de que possa ser útil,
# mas SEM NENHUMA GARANTIA, sem uma garantia implícita de ADEQUAÇÃO
# a qualquer MERCADO ou APLICAÇÃO EM PARTICULAR. Veja a
# Licença Pública Geral GNU para maiores detalhes.
#
# Você deve ter recebido uma cópia da Licença Pública Geral GNU
# junto com este programa, se não, veja em <http://www.gnu.org/licenses/>
"""
Gerador de labirintos e jogos tipo 'novel'.
"""
from _spy.vitollino.vitollino import STYLE, INVENTARIO, Cena, Elemento
from _spy.vitollino.vitollino import Texto
from _spy.vitollino.vitollino import JOGO as j
from browser import window, html
Cena._cria_divs = lambda *_: None
STYLE['width'] = 1024
STYLE['min-height'] = "800px"
INVENTARIO.elt.style.width = 1024
IMG = dict(
A_NORTE="https://i.imgur.com/aLEjWgB.png",
A_LESTE="https://i.imgur.com/sivjAnO.png",
A_SUL="https://i.imgur.com/otHJhF0.png",
# B_NORTE="https://i.imgur.com/40K5493.png", B_LESTE="https://i.imgur.com/R3bpFXD.png",
B_OESTE="https://i.imgur.com/dlxY8hi.png", B_SUL="https://i.imgur.com/eYM3Yp9.png",
B_NORTE="https://activufrj.nce.ufrj.br/file/SuperPythonEM/rect3569.jpg",
B_LESTE="https://activufrj.nce.ufrj.br/file/SuperPythonEM/rect3565.jpg",
C_LESTE="https://i.imgur.com/94V79TA.png", C_NORTE="https://i.imgur.com/YJfnhy9.png",
C_OESTE="https://i.imgur.com/Fzz2FNz.png", C_SUL="https://i.imgur.com/LFKXlB1.png",
D_NORTE="http://i.imgur.com/1uWH7rU.png", D_LESTE="https://i.imgur.com/b0FcjLq.png",
D_OESTE="https://i.imgur.com/406g75C.png", D_SUL="https://i.imgur.com/HQBtUoQ.png",
E_NORTE="https://i.imgur.com/uNkTVGg.png", E_SUL="http://i.imgur.com/bculg4O.png",
E_LESTE="https://i.imgur.com/lUi1E1v.png", E_OESTE="https://i.imgur.com/bPBT1d7.png",
F_NORTE="https://i.imgur.com/iHsggAa.png", F_SUL="http://i.imgur.com/euNeDGs.png",
F_LESTE="https://i.imgur.com/NqSCDQR.png", F_OESTE="https://i.imgur.com/hG4mgby.png",
G_NORTE="https://i.imgur.com/XDIASJa.png", G_SUL="https://i.imgur.com/ARQZ8CX.png",
G_LESTE="https://i.imgur.com/pJOegNT.png", G_OESTE="http://i.imgur.com/9IhOYjO.png",
H_NORTE="https://i.imgur.com/WjTtZPn.png", H_LESTE="https://i.imgur.com/AzvB8hs.png",
H_OESTE="https://i.imgur.com/SIhLGCP.png", H_SUL="https://i.imgur.com/UVnpzzE.png",
I_NORTE="https://i.imgur.com/RSdQSH1.png", I_SUL="https://i.imgur.com/UGCRJ0d.png",
I_LESTE="https://i.imgur.com/jSn4zsl.png", I_OESTE="https://i.imgur.com/eG43vn5.png",
J_NORTE="https://i.imgur.com/MMO11Dv.png", J_SUL="https://i.imgur.com/RkWPb8Z.png",
J_LESTE="https://i.imgur.com/btv0qfO.png", J_OESTE="https://i.imgur.com/lDezYKu.png",
K_NORTE="https://i.imgur.com/Tx9Q6vW.png", K_SUL="https://i.imgur.com/rrI94Xh.png",
K_LESTE="https://i.imgur.com/R6gON2E.png", K_OESTE="https://i.imgur.com/Mn69uua.png",
L_NORTE="https://i.imgur.com/oAu9lkN.png", L_SUL="https://i.imgur.com/xTjd7UV.png",
L_LESTE="https://i.imgur.com/JMQAGvc.png", L_OESTE="http://i.imgur.com/UJBMKY7.png",
M_NORTE="https://i.imgur.com/qoHwGLW.png", M_SUL="https://i.imgur.com/5P3U1Ai.png",
M_LESTE="http://i.imgur.com/1UXBodl.png", M_OESTE="https://i.imgur.com/AC2KgZg.png",
N_NORTE="https://i.imgur.com/KVlUf94.png", N_LESTE="https://i.imgur.com/f6vR0tY.png",
N_OESTE="https://i.imgur.com/GE8IsRM.png", N_SUL="https://i.imgur.com/RfUP0ez.png",
O_NORTE="https://i.imgur.com/lOT96Hr.png", O_SUL="https://i.imgur.com/HtRKv7X.png",
O_LESTE="https://i.imgur.com/uvPjc14.png", O_OESTE="https://i.imgur.com/I7Gn0Xx.png",
P_NORTE="https://i.imgur.com/OutDPac.png", P_SUL="https://i.imgur.com/sAIhp4b.png",
P_LESTE="https://i.imgur.com/dc2Ol59.png", P_OESTE="https://i.imgur.com/9IBwxjI.png",
Q_NORTE="https://i.imgur.com/JRYlZeN.png", Q_SUL="http://i.imgur.com/4BCiuYZ.png",
Q_LESTE="https://i.imgur.com/ek4cwBg.png", Q_OESTE="https://i.imgur.com/vmZHZmr.png",
R_NORTE="https://i.imgur.com/qnjq624.png", R_SUL="https://i.imgur.com/nZvwdhP.png",
R_LESTE="https://i.imgur.com/gS4rXYk.png", R_OESTE="http://i.imgur.com/2Z36mLI.png"
)
PROP = dict(
NOTE="https://i.imgur.com/SghupND.png", LIVRO="https://i.imgur.com/yWylotH.png?1",
FORCE="https://i.imgur.com/aLTJY2B.png",
FAKEOB = "https://upload.wikimedia.org/wikipedia/commons/3/3d/Simple_Rectangle_-_Semi-Transparent.svg"
)
def cria_lab():
def und(ch):
return "MANSÃO_%s" % NOME[ch].replace(" ", "_") if ch in NOME else "_NOOO_"
j.c.c(**SCENES)
salas = {nome: [getattr(j.c, lado) for lado in lados if hasattr(j.c, lado)] for nome, lados in ROOMS.items()}
j.s.c(**salas)
chambers = [[getattr(j.s, und(ch)) if hasattr(j.s, und(ch)) else None for ch in line] for line in MAP]
j.l.m(chambers)
blqa, blqb = j.s.MANSÃO_BLOQUEIO.sul.N, j.s.MANSÃO_ARMA_DO_CRIME.norte.N
j.s.MANSÃO_HALL.oeste.portal(N=j.s.MANSÃO_FACHADA.oeste)
print("cria_lab", blqa.img)
blqa.fecha()
blqb.fecha()
j.s.MANSÃO_HALL.leste.vai()
# j.s.MANSÃO_HALL.oeste.vai()
class Note:
def __init__(self):
self.onde = self.cena = j.s.MANSÃO_HALL.leste
print("implanta_livro_de_notas", self.cena.img)
self.livro = Cena(PROP["LIVRO"])
self.papel = Elemento(
img=PROP["FAKEOB"], tit="Um lavatorio", vai=self.pega_papel, style=dict(
left=360, top=356, width=170, height="111px"))
self.implanta_livro_de_notas()
self.div = html.DIV(style=dict(
position="absolute", left=45, top=70, width=450, background="transparent", border="none"))
self.ta = html.TEXTAREA(CODE, cols="70", rows="20", style=dict(
position="absolute", left=50, top=50, background="transparent", border="none"))
self.div <= self.ta
self.livro.elt <= self.div
def implanta_livro_de_notas(self):
print("implanta_livro_de_notas", self.papel.img)
from _spy.vitollino.vitollino import Cursor
Cursor(self.papel.elt)
self.papel.entra(self.cena)
def pega_papel(self, _=0):
texto = Texto(self.cena, "Um Livro de Notas", "Você encontra um livro de notas")
texto.vai()
j.i.bota(self.papel, "papel", None) # texto.vai)
self.papel.vai = self.mostra_livro
def mostra_livro(self):
self.onde = j.i.cena
self.livro.portal(O=self.onde, L=self.onde)
self.livro.vai()
self.escreve_livro()
def escreve_livro(self):
cm = window.CodeMirror.fromTextArea(self.ta, dict(mode="python", theme="solarized"))
self.escreve_livro = lambda *_: None
class Force:
def __init__(self):
self.onde = self.cena = j.s.MANSÃO_HALL.leste
self.force = Elemento(
img=PROP["FORCE"], tit="campo de força", vai=self.toca_campo, style=dict(
left=0, top=30, width=850, height="680px"))
self.implanta_campo_de_forca()
def implanta_campo_de_forca(self):
self.force.entra(self.cena)
def toca_campo(self, _=0):
texto = Texto(self.cena, "Campo de Força", "Você recebe um pulso de força e é jogado para trás")
texto.vai()
def main(*_):
# criarsalab()
# j.m("https://is.gd/Ldlg0V")
cria_lab()
Note()
#Force()
pass
NOMES = """SALA A - FACHADA
SALA B - HALL
SALA C - SALA DE ESTAR
SALA D - CENA DO CRIME
SALA H - A CHAVE
SALA I - FOLHA DE CADERNO
SALA J - BLOQUEIO
SALA E - DESPENSA
SALA K - PANO ENSANGUENTADO
SALA L - ESCURIDÃO
SALA F - ENTRADA DO QUARTO
SALA G - QUARTO
SALA N - SALA DE TV
SALA Q - SALA DE JANTAR
SALA R - COZINHA
SALA P - CORREDOR
SALA O - SALA DE VIGILÂNCIA
SALA M - ARMA DO CRIME""".split("\n")
CARDINAL = "NORTE LESTE SUL OESTE".split()
NOME = {line.split(" - ")[0].split()[-1]: line.split(" - ")[1].replace(" ", "_") for line in NOMES}
ROOMS = {"MANSÃO_%s" % NOME[room]: ["MANSÃO_%s_%s" % (NOME[room], k) for k in CARDINAL]
for room in NOME.keys()}
SCENES = {"MANSÃO_%s_%s" % (NOME[room], k): IMG["%s_%s" % (room, k)]
for k in CARDINAL for room in NOME.keys() if "%s_%s" % (room, k) in IMG}
MAP = """
ABC
--D-E-FG
--HIJKL
----M-N
----OPQR"""[1:].split("\n")
CODE = """
def cria_lab():
def und(ch):
return "MANSÃO_%s" % NOME[ch].replace(" ", "_") if ch in NOME else "_NOOO_"
j.c.c(**SCENES)
salas = {nome: [getattr(j.c, lado) for lado in lados if hasattr(j.c, lado)] for nome, lados in ROOMS.items()}
j.s.c(**salas)
chambers = [[getattr(j.s, und(ch)) if hasattr(j.s, und(ch)) else None for ch in line] for line in MAP]
j.l.m(chambers)
blqa, blqb = j.s.MANSÃO_BLOQUEIO.sul.N, j.s.MANSÃO_ARMA_DO_CRIME.norte.N
j.s.MANSÃO_HALL.oeste.portal(N=j.s.MANSÃO_FACHADA.oeste)
print("cria_lab", blqa.img)
blqa.fecha()
blqb.fecha()
j.s.MANSÃO_FACHADA.leste.vai()
""" | gpl-3.0 | 1,896,173,857,323,334,400 | 41.737327 | 113 | 0.63755 | false | 2.2 | false | false | false |
igorwwwwwwwwwwwwwwwwwwww/ngenious.website | fabfile.py | 1 | 1625 | from fabric.api import *
from fabric.utils import abort
from fabric.contrib.project import rsync_project
from fabric.contrib.files import exists
if not env.hosts:
env.hosts = [
'[email protected]',
'[email protected]',
]
project_name = 'ngenious.website'
target_dir = '/var/www/'+project_name
backup_dir = target_dir+'-backup'
staging_dir = target_dir+'-staging'
@task(default=True)
def deploy():
puts('> Cleaning up previous backup and staging dir')
run('rm -rf %s %s' % (backup_dir, staging_dir))
puts('> Preparing staging')
run('cp -r %s %s' % (target_dir, staging_dir))
puts('> Uploading changes')
with cd(staging_dir):
with hide('stdout'):
extra_opts = '--omit-dir-times'
rsync_project(
env.cwd,
'./',
delete=True,
exclude=['.git', '*.pyc'],
extra_opts=extra_opts,
)
puts('> Switching changes to live')
run('mv %s %s' % (target_dir, backup_dir))
run('mv %s %s' % (staging_dir, target_dir))
@task
def rollback():
if exists(backup_dir):
puts('> Rolling back to previous deploy')
run('mv %s %s' % (target_dir, staging_dir))
run('mv %s %s' % (backup_dir, target_dir))
else:
abort('Rollback failed, no backup exists')
@task
def reload():
puts('> Reloading nginx and php5-fpm')
run('service nginx reload')
run('service php5-fpm reload')
@task
def restart():
puts('> Restarting nginx and php5-fpm')
run('service nginx restart')
run('service php5-fpm restart')
| mit | 6,400,153,944,193,456,000 | 26.083333 | 57 | 0.584615 | false | 3.329918 | false | false | false |
mvo5/snapcraft | tests/fake_servers/search.py | 2 | 5444 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016-2019 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import json
import logging
import os
import urllib.parse
from pyramid import response
import tests
from tests.fake_servers import base
logger = logging.getLogger(__name__)
class FakeStoreSearchServer(base.BaseFakeServer):
# XXX This fake server as reused as download server, to avoid passing a
# port as an argument. --elopio - 2016-05-01
def configure(self, configurator):
configurator.add_route("info", "/v2/snaps/info/{snap}", request_method="GET")
configurator.add_view(self.info, route_name="info")
configurator.add_route(
"download", "/download-snap/{snap}", request_method="GET"
)
configurator.add_view(self.download, route_name="download")
def info(self, request):
snap = request.matchdict["snap"]
logger.debug(
"Handling details request for package {}, with headers {}".format(
snap, request.headers
)
)
if "User-Agent" not in request.headers:
response_code = 500
return response.Response(None, response_code)
payload = self._get_info_payload(request)
if payload is None:
response_code = 404
return response.Response(json.dumps({}).encode(), response_code)
response_code = 200
content_type = "application/hal+json"
return response.Response(
payload, response_code, [("Content-Type", content_type)]
)
def _get_info_payload(self, request):
# core snap is used in integration tests with fake servers.
snap = request.matchdict["snap"]
# tests/data/test-snap.snap
test_sha3_384 = (
"8c0118831680a22090503ee5db98c88dd90ef551d80fc816"
"dec968f60527216199dacc040cddfe5cec6870db836cb908"
)
revision = "10000"
confinement = "strict"
if snap in ("test-snap", "core"):
sha3_384 = test_sha3_384
elif snap == "snapcraft":
sha3_384 = test_sha3_384
revision = "25"
confinement = "classic"
elif snap == "test-snap-with-wrong-sha":
sha3_384 = "wrong sha"
elif (
snap == "test-snap-branded-store"
and request.headers.get("Snap-Device-Store") == "Test-Branded"
):
sha3_384 = test_sha3_384
else:
return None
channel_map = list()
for arch in ("amd64", "i386", "s390x", "arm64", "armhf", "ppc64el"):
for risk in ("stable", "edge"):
channel_map.append(
{
"channel": {
"architecture": arch,
"name": risk,
"released-at": "019-01-17T15:01:26.537392+00:00",
"risk": risk,
"track": "latest",
},
"download": {
"deltas": [],
"sha3-384": sha3_384,
"url": urllib.parse.urljoin(
"http://localhost:{}".format(self.server.server_port),
"download-snap/test-snap.snap",
),
},
"created-at": "2019-01-16T14:59:16.711111+00:00",
"confinement": confinement,
"revision": revision,
}
)
return json.dumps(
{
"channel-map": channel_map,
"snap": {
"name": snap,
"snap-id": "good",
"publisher": {
"id": snap + "-developer-id",
"validation": "unproven",
},
},
"snap-id": "good",
"name": snap,
}
).encode()
def download(self, request):
snap = request.matchdict["snap"]
logger.debug("Handling download request for snap {}".format(snap))
if "User-Agent" not in request.headers:
response_code = 500
return response.Response(None, response_code)
response_code = 200
content_type = "application/octet-stream"
# TODO create a test snap during the test instead of hardcoding it.
# --elopio - 2016-05-01
snap_path = os.path.join(
os.path.dirname(tests.__file__), "data", "test-snap.snap"
)
with open(snap_path, "rb") as snap_file:
return response.Response(
snap_file.read(), response_code, [("Content-Type", content_type)]
)
| gpl-3.0 | -6,621,952,700,684,669,000 | 35.05298 | 86 | 0.5259 | false | 4.223429 | true | false | false |
CMTaylor/robotframework-autoitlibrary | setup.py | 3 | 5035 | """
Package: robotframework-AutoItLibrary
Module: AutoItLibrary Installation Module
Purpose: This is a Python "Distutils" setup program used to build installers for and to install the
robotframework-AutoItLibrary.
Copyright (c) 2008-2010 Texas Instruments, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__author__ = "Martin Taylor <[email protected]>"
from distutils.core import setup
from distutils.sysconfig import get_python_lib
import sys
import os
import shutil
import subprocess
CLASSIFIERS = """
Development Status :: 5 - Production/Stable
License :: OSI Approved :: Apache Software License
Operating System :: Microsoft :: Windows
Programming Language :: Python
Topic :: Software Development :: Testing
"""[1:-1]
DESCRIPTION = """
AutoItLibrary is a Robot Framework keyword library wrapper for for the
freeware AutoIt tool (http://www.autoitscript.com/autoit3/index.shtml)
using AutoIt's AutoItX.dll COM object. The AutoItLibrary class
provides a proxy for the AutoIt keywords callable on the AutoIt COM
object and provides additional high-level keywords implemented as
methods in this class.
"""[1:-1]
if __name__ == "__main__":
#
# Install the 3rd party packages
#
if sys.argv[1].lower() == "install" :
if os.name == "nt" :
#
# Install and register AutoItX
#
if os.path.isfile(os.path.join(get_python_lib(), "AutoItLibrary/lib/AutoItX3.dll")) :
print "Don't think we need to unregister the old one..."
instDir = os.path.normpath(os.path.join(get_python_lib(), "AutoItLibrary/lib"))
if not os.path.isdir(instDir) :
os.makedirs(instDir)
instFile = os.path.normpath(os.path.join(instDir, "AutoItX3.dll"))
shutil.copyfile("3rdPartyTools/AutoIt/AutoItX3.dll", instFile)
#
# Register the AutoItX COM object
# and make its methods known to Python
#
cmd = r"%SYSTEMROOT%\system32\regsvr32.exe /S " + instFile
print cmd
subprocess.check_call(cmd, shell=True)
makepy = os.path.normpath(os.path.join(get_python_lib(), "win32com/client/makepy.py"))
#
# Make sure we have win32com installed
#
if not os.path.isfile(makepy) :
print "AutoItLibrary requires win32com. See http://starship.python.net/crew/mhammond/win32/."
sys.exit(2)
cmd = "python %s %s" % (makepy, instFile)
print cmd
subprocess.check_call(cmd)
else :
print "AutoItLibrary cannot be installed on non-Windows platforms."
sys.exit(2)
#
# Figure out the install path
#
destPath = os.path.normpath(os.path.join(os.getenv("HOMEDRIVE"), r"\RobotFramework\Extensions\AutoItLibrary"))
#
# Do the distutils installation
#
setup(name = "AutoItLibrary",
version = "1.1",
description = "AutoItLibrary for Robot Framework",
author = "Martin Taylor",
author_email = "[email protected]",
url = "http://code.google.com/p/robotframework-autoitlibrary/",
license = "Apache License 2.0",
platforms = "Microsoft Windows",
classifiers = CLASSIFIERS.splitlines(),
long_description = DESCRIPTION,
package_dir = {'' : "src"},
packages = ["AutoItLibrary"],
data_files = [(destPath,
["ReadMe.txt",
"COPYRIGHT.txt",
"LICENSE.txt",
"doc/AutoItLibrary.html",
"3rdPartyTools/AutoIt/Au3Info.exe",
"3rdPartyTools/AutoIt/AutoItX.chm",
"3rdPartyTools/AutoIt/AutoIt_License.html",
]),
(os.path.join(destPath, "tests"),
["tests/CalculatorGUIMap.py",
"tests/__init__.html",
"tests/Calculator_Test_Cases.html",
"tests/RobotIDE.bat",
"tests/RunTests.bat"
]),
]
)
#
# -------------------------------- End of file --------------------------------
| apache-2.0 | 6,858,002,108,029,616,000 | 39.934959 | 114 | 0.565641 | false | 4.241786 | true | false | false |
danrschlosser/eventum | eventum/routes/auth.py | 1 | 8487 | """
.. module:: auth
:synopsis: All routes on the ``auth`` Blueprint.
.. moduleauthor:: Dan Schlosser <[email protected]>
"""
import base64
import httplib2
import os
from apiclient.discovery import build
from flask import (Blueprint, render_template, request, flash, session, g,
redirect, url_for, current_app)
from oauth2client.client import (FlowExchangeError,
flow_from_clientsecrets,
AccessTokenCredentials)
from eventum.lib.json_response import json_success, json_error_message
from eventum.models import User, Whitelist
from eventum.forms import CreateProfileForm
from eventum.routes.base import MESSAGE_FLASH
auth = Blueprint('auth', __name__)
gplus_service = build('plus', 'v1')
@auth.route('/login', methods=['GET'])
def login():
"""If the user is not logged in, display an option to log in. On click,
make a request to Google to authenticate.
If they are logged in, redirect.
**Route:** ``/admin/login``
**Methods:** ``GET``
"""
if g.user is not None and 'gplus_id' in session:
# use code=303 to avoid POSTing to the next page.
return redirect(url_for('admin.index'), code=303)
load_csrf_token_into_session()
args_next = request.args.get('next')
next = args_next if args_next else request.url_root
client_id = current_app.config['EVENTUM_GOOGLE_CLIENT_ID']
return render_template('eventum_auth/login.html',
client_id=client_id,
state=session['state'],
# reauthorize=True,
next=next)
@auth.route('/store-token', methods=['POST'])
def store_token():
"""Do the oauth flow for Google plus sign in, storing the access token
in the session, and redircting to create an account if appropriate.
Because this method will be called from a ``$.ajax()`` request in
JavaScript, we can't return ``redirect()``, so instead this method returns
the URL that the user should be redirected to, and the redirect happens in
html:
.. code:: javascript
success: function(response) {
window.location.href = response.data.redirect_url;
}
**Route:** ``/admin/store-token``
**Methods:** ``POST``
"""
if request.args.get('state', '') != session.get('state'):
return json_error_message('Invalid state parameter.', 401)
del session['state']
code = request.data
try:
# Upgrade the authorization code into a credentials object
oauth_flow = flow_from_clientsecrets(
current_app.config['EVENTUM_CLIENT_SECRETS_PATH'],
scope='')
oauth_flow.redirect_uri = 'postmessage'
credentials = oauth_flow.step2_exchange(code)
except FlowExchangeError:
return json_error_message('Failed to upgrade the authorization code.',
401)
gplus_id = credentials.id_token['sub']
# Store the access token in the session for later use.
session['credentials'] = credentials.access_token
session['gplus_id'] = gplus_id
if User.objects(gplus_id=gplus_id).count() == 0:
# A new user model must be made
# Get the user's name and email to populate the form
http = httplib2.Http()
http = credentials.authorize(http)
people_document = gplus_service.people().get(
userId='me').execute(http=http)
# The user must be whitelisted in order to create an account.
email = people_document['emails'][0]['value']
if Whitelist.objects(email=email).count() != 1:
return json_error_message('User has not been whitelisted.',
401,
{'whitelisted': False, 'email': email})
return json_success({
'redirect_url': url_for('.create_profile',
next=request.args.get('next'),
name=people_document['displayName'],
email=email,
image_url=people_document['image']['url'])
})
user = User.objects().get(gplus_id=gplus_id)
user.register_login()
user.save()
# The user already exists. Redirect to the next url or
# the root of the application ('/')
if request.args.get('next'):
return json_success({'redirect_url': request.args.get('next')})
return json_success({'redirect_url': request.url_root})
@auth.route('/create-profile', methods=['GET', 'POST'])
def create_profile():
"""Create a profile (filling in the form with openid data), and
register it in the database.
**Route:** ``/admin/create-profile``
**Methods:** ``GET, POST``
"""
if g.user is not None and 'gplus_id' in session:
# use code=303 to avoid POSTing to the next page.
return redirect(url_for('admin.index'), code=303)
form = CreateProfileForm(request.form,
name=request.args['name'],
email=request.args['email'],
next=request.args['next'])
if form.validate_on_submit():
if User.objects(email=form.email.data).count() != 0:
# A user with this email already exists. Override it.
user = User.objects.get(email=form.email.data)
user.openid = session['openid']
user.name = form.name.data
flash('Account with this email already exists. Overridden.',
MESSAGE_FLASH)
user.register_login()
user.save()
else:
# Retreive their user type from the whitelist then remove them.
wl = Whitelist.objects().get(email=form.email.data)
user_type = wl.user_type
wl.redeemed = True
wl.save()
# Create a brand new user
user = User(email=form.email.data,
name=form.name.data,
gplus_id=session['gplus_id'],
user_type=user_type,
image_url=request.args.get('image_url'))
flash('Account created successfully.', MESSAGE_FLASH)
user.register_login()
user.save()
# redirect to the next url or the root of the application ('/')
if form.next.data:
# use code=303 to avoid POSTing to the next page.
return redirect(form.next.data, code=303)
# use code=303 to avoid POSTing to the next page.
return redirect('/', code=303)
return render_template('eventum_auth/create_profile.html',
image_url=request.args.get('image_url'), form=form)
@auth.route('/logout', methods=['GET'])
def logout():
"""Logs out the current user.
**Route:** ``/admin/logout``
**Methods:** ``GET``
"""
session.pop('gplus_id', None)
g.user = None
return redirect(url_for('client.index'))
def load_csrf_token_into_session():
"""Create a unique session cross-site request forgery (CSRF) token and
load it into the session for later verification.
"""
# 24 bytes in b64 == 32 characters
session['state'] = base64.urlsafe_b64encode(os.urandom(24))
@auth.route('/disconnect', methods=['GET', 'POST'])
def disconnect():
"""Revoke current user's token and reset their session.
**Route:** ``/admin/disconnect``
**Methods:** ``GET, POST``
"""
# Only disconnect a connected user.
credentials = AccessTokenCredentials(
session.get('credentials'), request.headers.get('User-Agent'))
if credentials is None:
return json_error_message('Current user not connected.', 401)
# Execute HTTP GET request to revoke current token.
access_token = credentials.access_token
url = ('https://accounts.google.com/o/oauth2/revoke?token={}'
.format(str(access_token)))
h = httplib2.Http()
result = h.request(url, 'GET')[0]
session.pop('gplus_id', None)
g.user = None
if result['status'] == '200':
# Reset the user's session.
del session['credentials']
else:
# For whatever reason, the given token was invalid.
current_app.logger.error('Failed to revoke token for given user.')
# use code=303 to avoid POSTing to the next page.
return redirect(url_for('.login'), code=303)
| mit | 309,365,035,966,521,340 | 34.51046 | 78 | 0.592318 | false | 4.1 | false | false | false |
meraki-analytics/cassiopeia-datastores | cassiopeia-diskstore/cassiopeia_diskstore/spectator.py | 1 | 3073 | from typing import Type, TypeVar, MutableMapping, Any, Iterable
from datapipelines import DataSource, DataSink, PipelineContext, Query, validate_query
from cassiopeia.data import Platform, Region
from cassiopeia.dto.spectator import FeaturedGamesDto, CurrentGameInfoDto
from cassiopeia.datastores.uniquekeys import convert_region_to_platform
from .common import SimpleKVDiskService
T = TypeVar("T")
class SpectatorDiskService(SimpleKVDiskService):
@DataSource.dispatch
def get(self, type: Type[T], query: MutableMapping[str, Any], context: PipelineContext = None) -> T:
pass
@DataSource.dispatch
def get_many(self, type: Type[T], query: MutableMapping[str, Any], context: PipelineContext = None) -> Iterable[T]:
pass
@DataSink.dispatch
def put(self, type: Type[T], item: T, context: PipelineContext = None) -> None:
pass
@DataSink.dispatch
def put_many(self, type: Type[T], items: Iterable[T], context: PipelineContext = None) -> None:
pass
##################
# Featured Games #
##################
_validate_get_featured_games_query = Query. \
has("platform").as_(Platform)
@get.register(FeaturedGamesDto)
@validate_query(_validate_get_featured_games_query, convert_region_to_platform)
def get_featured_games(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> FeaturedGamesDto:
key = "{clsname}.{platform}".format(clsname=FeaturedGamesDto.__name__, platform=query["platform"].value)
return FeaturedGamesDto(self._get(key))
@put.register(FeaturedGamesDto)
def put_featured_games(self, item: FeaturedGamesDto, context: PipelineContext = None) -> None:
platform = Region(item["region"]).platform.value
key = "{clsname}.{platform}".format(clsname=FeaturedGamesDto.__name__, platform=platform)
self._put(key, item)
################
# Current Game #
################
_validate_get_current_game_query = Query. \
has("platform").as_(Platform).also. \
has("summoner.id").as_(str)
@get.register(CurrentGameInfoDto)
@validate_query(_validate_get_current_game_query, convert_region_to_platform)
def get_current_game(self, query: MutableMapping[str, Any], context: PipelineContext = None) -> CurrentGameInfoDto:
key = "{clsname}.{platform}.{id}".format(clsname=CurrentGameInfoDto.__name__,
platform=query["platform"].value,
id=query["summoner.id"])
return CurrentGameInfoDto(self._get(key))
@put.register(CurrentGameInfoDto)
def put_current_game(self, item: CurrentGameInfoDto, context: PipelineContext = None) -> None:
platform = Region(item["region"]).platform.value
key = "{clsname}.{platform}.{id}".format(clsname=CurrentGameInfoDto.__name__,
platform=platform,
id=item["summonerId"])
self._put(key, item)
| mit | 3,853,958,600,843,314,000 | 42.28169 | 119 | 0.636837 | false | 3.846058 | false | false | false |
ddcampayo/polyFEM | tools/movie_particles.py | 1 | 1385 | #!/usr/bin/python
import matplotlib.pyplot as plt
import numpy as np
#import matplotlib.cm as cm
#from matplotlib.colors import Normalize
import sys
#print "This is the name of the script: ", sys.argv[0]
#print "Number of arguments: ", len(sys.argv)
#print "The arguments are: " , str(sys.argv)
if(len(sys.argv) == 1) :
init_t = 0
else:
init_t = int( sys.argv[1] )
#import pylab as pl
plt.figure(figsize=(8,8))
skip=1
#path='timings_full/'
path='./'
LL= 1
for n in range( init_t ,2000000+skip,skip):
plt.clf()
dt=np.loadtxt(path+str(n)+'/particles.dat')
x=dt[:,0]; y=dt[:,1];
vol=dt[:,3]
# vx=dt[:,5]; vym=dt[:,6];
p=dt[:,5]
# I=dt[:,14]; # eccentricity
r = np.sqrt( x**2 + y**2 )
rm = np.argmax(r)
p -= p[ rm ] # np.min( p )
# plt.plot( r , p , 'o' )
plt.scatter( x , y , s=80 , c=p )
# plt.scatter( x , y , 80, c= vol , vmin=0.0022, vmax=0.0028 )
# plt.scatter( x , y , 10, c=w )
# plt.scatter( x , y , 10, c=I )
# plt.scatter( x , y , 80, c= I , vmin= 1.02e-6, vmax= 1.06e-6 )
# plt.scatter( x , y , 80, c= np.log( d2 + 1e-18 ) )
# plt.scatter( x , y , 10, c=om )
plt.xlim([-LL/2.0 , LL/2.0 ])
plt.ylim([-LL/2.0 , LL/2.0 ])
# pl.colorbar(ticks=[0.45,0.55])
print( 'snap{:03d}'.format( int(n/skip) ) )
plt.savefig( 'snap{:03d}'.format( int(n/skip) ) )
| gpl-3.0 | -8,238,294,631,123,967,000 | 19.367647 | 68 | 0.537184 | false | 2.379725 | false | false | false |
AveryPratt/django-imager | imagersite/imager_images/migrations/0003_auto_20170127_1757.py | 1 | 1340 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-27 17:57
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('imager_images', '0002_auto_20170125_0218'),
]
operations = [
migrations.AlterField(
model_name='albums',
name='date_modified',
field=models.DateField(auto_now=True, null=True),
),
migrations.AlterField(
model_name='albums',
name='date_published',
field=models.DateField(auto_now=True, null=True),
),
migrations.AlterField(
model_name='albums',
name='date_uploaded',
field=models.DateField(auto_now_add=True, null=True),
),
migrations.AlterField(
model_name='photos',
name='date_modified',
field=models.DateField(auto_now=True, null=True),
),
migrations.AlterField(
model_name='photos',
name='date_published',
field=models.DateField(auto_now=True, null=True),
),
migrations.AlterField(
model_name='photos',
name='date_uploaded',
field=models.DateField(auto_now_add=True, null=True),
),
]
| mit | 3,201,460,821,906,132,500 | 28.777778 | 65 | 0.55597 | false | 4.161491 | false | false | false |
llfccc/weekly | weekly/utils/permission_middleware.py | 1 | 2194 | # !/usr/bin/env python
# !-*-coding:utf-8-*-
import time
from django.utils.deprecation import MiddlewareMixin
from django.core.cache import cache
from utils.tools import my_response, Logger
# class PermissionCheck(MiddlewareMixin):
# """
# 中间件,用于检查请求权限
# """
# cookie_time = 2 * 3600
# @staticmethod
# def process_request(request):
# """
# :param request:
# :return:
# """
# # print "start", time.time()
# if "login" in request.path:
# return
# # request.COOKIES["sid"] = "9342c00a6cb65a2d35e2bd48cc2ab163"
# sid = request.COOKIES.get("sid")
# content = cache.get(sid)
# if content:
# username = content.get("username")
# Logger.debug("{0}: request, url is: {1}".format(username, request.path.encode("utf-8")))
# request.COOKIES["username"] = username
# else:
# return my_response(code=-1, msg="登录超时!")
# @staticmethod
# def process_response(request, response):
# sid = request.COOKIES.get("sid")
# if sid and "logout" not in request.path:
# cache.expire(sid, timeout=PermissionCheck.cookie_time)
# response.set_cookie("sid", sid, max_age=PermissionCheck.cookie_time - 10)
# # print "end time", time.time()
# return response
class PrintCheck(MiddlewareMixin):
"""
中间件,用于检查请求权限
"""
cookie_time = 2 * 3600
@staticmethod
def process_request(request):
"""
:param request:
:return:
"""
# print "start", time.time()
if "login" in request.path:
return
# request.COOKIES["sid"] = "9342c00a6cb65a2d35e2bd48cc2ab163"
sid = request.COOKIES.get("sid")
content = cache.get(sid)
if content:
chinese_name = content.get("chinese_name")
Logger.debug("{0}: request, url is: {1}".format(username, request.path.encode("utf-8")))
request.COOKIES["chinese_name"] = chinese_name
else:
return my_response(code=-1, msg="登录超时!") | apache-2.0 | -5,133,435,417,261,500,000 | 30.279412 | 102 | 0.564911 | false | 3.245802 | false | false | false |
slackpad/hashtagtodo-open | todo/models/calendar.py | 1 | 1322 | from google.appengine.ext import ndb
class Calendar(ndb.Model):
summary = ndb.StringProperty()
time_zone = ndb.StringProperty()
show_in_todolist = ndb.BooleanProperty(default=True)
active = ndb.BooleanProperty(default=True)
watch_id = ndb.StringProperty()
watch_expires = ndb.DateTimeProperty()
resource_id = ndb.StringProperty()
created = ndb.DateTimeProperty(auto_now_add=True)
updated = ndb.DateTimeProperty(auto_now=True)
@classmethod
def create_or_update(cls, user_key, _id, summary, time_zone):
if not _id:
raise ValueError('Invalid id for Calendar object.')
calendar = cls.get_or_insert(_id, parent=user_key,
summary=summary,
time_zone=time_zone)
if calendar.summary != summary or \
calendar.time_zone != time_zone:
calendar.summary = summary
calendar.time_zone = time_zone
calendar.put()
return calendar
@classmethod
def get_by_id(cls, _id):
return ndb.Key(urlsafe=_id).get()
@classmethod
def get_by_watch_id(cls, watch_id):
return cls.query(cls.watch_id==watch_id).get()
@classmethod
def get_all(cls, user_key):
return cls.query(ancestor=user_key)
| mit | -4,252,697,951,458,202,600 | 31.243902 | 65 | 0.611195 | false | 3.96997 | false | false | false |
op3/hdtv | hdtv/histogram.py | 1 | 26585 | # -*- coding: utf-8 -*-
# HDTV - A ROOT-based spectrum analysis software
# Copyright (C) 2006-2009 The HDTV development team (see file AUTHORS)
#
# This file is part of HDTV.
#
# HDTV is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# HDTV is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License
# along with HDTV; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
import os
from scipy.interpolate import InterpolatedUnivariateSpline
import numpy as np
import ROOT
import hdtv.color
import hdtv.rootext.mfile
import hdtv.rootext.calibration
import hdtv.rootext.display
import hdtv.rootext.fit
from hdtv.drawable import Drawable
from hdtv.specreader import SpecReader, SpecReaderError
from hdtv.cal import CalibrationFitter
from hdtv.util import LockViewport
# Don't add created spectra to the ROOT directory
ROOT.TH1.AddDirectory(ROOT.kFALSE)
def HasPrimitiveBinning(hist):
if hist.GetNbinsX() != (hist.GetXaxis().GetXmax() - hist.GetXaxis().GetXmin()):
return False
for bin in range(0, hist.GetNbinsX()):
if hist.GetBinWidth(bin) != 1.0:
return False
return True
class Histogram(Drawable):
"""
Histogram object
This class is hdtvs wrapper around a ROOT histogram. It adds a calibration,
plus some internal management for drawing the histogram to the hdtv spectrum
viewer.
"""
def __init__(self, hist, color=hdtv.color.default, cal=None):
Drawable.__init__(self, color, cal)
self._norm = 1.0
self._ID = None
self.effCal = None
self.typeStr = "spectrum"
self.cal = cal
if cal is None:
self.SetHistWithPrimitiveBinning(hist)
else:
self._hist = hist
def __str__(self):
return self.name
def __copy__(self):
# call C++ copy constructor
hist = self._hist.__class__(self._hist)
# create new spectrum object
return Histogram(hist, color=self.color, cal=self.cal)
# hist property
def _set_hist(self, hist):
self._hist = hist
if self.displayObj:
self.displayObj.SetHist(self._hist)
def _get_hist(self):
return self._hist
hist = property(_get_hist, _set_hist)
# name property
def _get_name(self):
if self._hist:
return self._hist.GetName()
def _set_name(self, name):
self._hist.SetName(name)
name = property(_get_name, _set_name)
# norm property
def _set_norm(self, norm):
self._norm = norm
if self.displayObj:
self.displayObj.SetNorm(norm)
def _get_norm(self):
return self._norm
norm = property(_get_norm, _set_norm)
@property
def info(self):
"""
Return a string describing this spectrum
"""
s = "Spectrum type: %s\n" % self.typeStr
if not self._hist:
return s
s += "Name: %s\n" % str(self)
s += "Nbins: %d\n" % self._hist.GetNbinsX()
xmin = self._hist.GetXaxis().GetXmin()
xmax = self._hist.GetXaxis().GetXmax()
if self.cal and not self.cal.IsTrivial():
s += "Xmin: %.2f (cal) %.2f (uncal)\n" % (self.cal.Ch2E(xmin), xmin)
s += "Xmax: %.2f (cal) %.2f (uncal)\n" % (self.cal.Ch2E(xmax), xmax)
else:
s += "Xmin: %.2f\n" % xmin
s += "Xmax: %.2f\n" % xmax
if not self.cal or self.cal.IsTrivial():
s += "Calibration: none\n"
elif isinstance(self.cal, ROOT.HDTV.Calibration):
s += "Calibration: Polynomial, degree %d\n" % self.cal.GetDegree()
else:
s += "Calibration: unknown\n"
return s
# TODO: sumw2 function should be called at some point for correct error
# handling
def Plus(self, spec):
"""
Add other spectrum to this one
"""
# If the spectra have the same calibration (~= have the same binning),
# the root build-in add can be used
if self.cal == spec.cal or (self.cal.IsTrivial() and spec.cal.IsTrivial()):
hdtv.ui.info("Adding binwise")
self._hist.Add(spec._hist, 1.0)
# If the binning is different, determine the amount to add to each bin
# by integrating the other spectrum
else:
hdtv.ui.info("Adding calibrated")
nbins = self._hist.GetNbinsX()
for n in range(0, nbins):
integral = ROOT.HDTV.TH1IntegrateWithPartialBins(
spec._hist,
spec.cal.E2Ch(self.cal.Ch2E(n - 0.5)),
spec.cal.E2Ch(self.cal.Ch2E(n + 0.5)),
)
# Note: Can't use Fill due to bin errors?
self._hist.SetBinContent(
n + 1, self._hist.GetBinContent(n + 1) + integral
)
# update display
if self.displayObj:
self.displayObj.SetHist(self._hist)
self.typeStr = "spectrum, modified (sum)"
def Minus(self, spec):
"""
Substract other spectrum from this one
"""
# If the spectra have the same calibration (~= have the same binning),
# the root build-in add can be used
if self.cal == spec.cal or (self.cal.IsTrivial() and spec.cal.IsTrivial()):
hdtv.ui.info("Adding binwise")
self._hist.Add(spec._hist, -1.0)
# If the binning is different, determine the amount to add to each bin
# by integrating the other spectrum
else:
hdtv.ui.info("Adding calibrated")
nbins = self._hist.GetNbinsX()
for n in range(0, nbins):
integral = ROOT.HDTV.TH1IntegrateWithPartialBins(
spec._hist,
spec.cal.E2Ch(self.cal.Ch2E(n - 0.5)),
spec.cal.E2Ch(self.cal.Ch2E(n + 0.5)),
)
# Note: Can't use Fill due to bin errors?
self._hist.SetBinContent(
n + 1, self._hist.GetBinContent(n + 1) - integral
)
# update display
if self.displayObj:
self.displayObj.SetHist(self._hist)
self.typeStr = "spectrum, modified (difference)"
def Multiply(self, factor):
"""
Multiply spectrum with factor
"""
self._hist.Scale(factor)
# update display
if self.displayObj:
self.displayObj.SetHist(self._hist)
self.typeStr = "spectrum, modified (multiplied)"
def Rebin(self, ngroup, calibrate=True):
"""
Rebin spectrum by adding ngroup bins into one
"""
bins = self._hist.GetNbinsX()
self._hist.RebinX(ngroup)
self._hist.GetXaxis().SetLimits(0, bins / ngroup)
# update display
if self.displayObj:
self.displayObj.SetHist(self._hist)
# update calibration
if calibrate:
if not self.cal:
self.cal.SetCal(0.0, 1.0)
self.cal.Rebin(ngroup)
self.displayObj.SetCal(self.cal)
hdtv.ui.info("Calibration updated for rebinned spectrum")
self.typeStr = f"spectrum, modified (rebinned, ngroup={ngroup})"
def Calbin(
self, binsize: float = 1.0, spline_order: int = 3, use_tv_binning: bool = True
):
"""
Rebin spectrum to match calibration unit
Args:
binsize: Size of calibrated bins
spline_order: Order of the spline interpolation (default: 3)
use_tv_binning: Center first bin on 0. (True) or
lower edge of first bin on 0. (False).
"""
nbins_old = self._hist.GetNbinsX()
lower_old = self.cal.Ch2E(0)
upper_old = self.cal.Ch2E(nbins_old - 1)
nbins = int(np.ceil(upper_old / binsize)) + 1
if use_tv_binning:
lower = -0.5 * binsize
upper = 0.5 * binsize + (upper_old // nbins) * (nbins - 1)
else:
lower = 0.0
upper = binsize + (upper_old // nbins) * (nbins - 1)
# Create new histogram with number of bins equal
# to the calibrated range of the old histogram
# Always -0.5 to create standard tv-type histogram
newhist = ROOT.TH1D(
self._hist.GetName(), self._hist.GetTitle(), nbins, -0.5, nbins - 0.5
)
input_bins_center, input_hist = np.transpose(
[
[
self.cal.Ch2E(n - 1),
self._hist.GetBinContent(n)
/ (self.cal.Ch2E(n) - self.cal.Ch2E(n - 1)),
]
for n in range(1, self._hist.GetNbinsX() + 1)
]
)
output_bins_low = np.arange(nbins) * binsize + lower
output_bins_high = output_bins_low + binsize
inter = InterpolatedUnivariateSpline(
input_bins_center, input_hist, k=spline_order
)
inter_integral_v = np.vectorize(inter.integral)
output_hist = np.maximum(
inter_integral_v(output_bins_low, output_bins_high), 0.0
)
# Suppress bins outside of original histogram range
min_bin = int((lower_old - lower) / binsize)
output_hist[:min_bin] = np.zeros(min_bin)
for i in range(0, nbins):
newhist.SetBinContent(i + 1, output_hist[i])
self._hist = newhist
if use_tv_binning:
if binsize != 1.0 or self.cal:
self.cal.SetCal(0, binsize)
else:
self.cal.SetCal(binsize / 2, binsize)
# update display
if self.displayObj:
self.displayObj.SetHist(self._hist)
# update calibration
self.displayObj.SetCal(self.cal)
hdtv.ui.info(f"Rebinned to calibration unit (binsize={binsize}).")
def Poisson(self):
"""
Randomize each bin content assuming a Poissonian distribution.
"""
for i in range(0, self._hist.GetNbinsX() + 1):
counts = self._hist.GetBinContent(i)
# error = self._hist.GetBinError(i)
varied = np.random.poisson(counts)
self._hist.SetBinContent(i, varied)
if self.displayObj:
self.displayObj.SetHist(self._hist)
def Draw(self, viewport):
"""
Draw this spectrum to the viewport
"""
if self.viewport is not None and not self.viewport == viewport:
# Unlike the DisplaySpec object of the underlying implementation,
# Spectrum() objects can only be drawn on a single viewport
raise RuntimeError("Spectrum can only be drawn on a single viewport")
self.viewport = viewport
# Lock updates
with LockViewport(self.viewport):
# Show spectrum
if self.displayObj is None and self._hist is not None:
if self.active:
color = self._activeColor
else:
color = self._passiveColor
self.displayObj = ROOT.HDTV.Display.DisplaySpec(self._hist, color)
self.displayObj.SetNorm(self.norm)
self.displayObj.Draw(self.viewport)
# add calibration
if self.cal:
self.displayObj.SetCal(self.cal)
# and ID
if self.ID is not None:
ID = str(self.ID).strip(".")
self.displayObj.SetID(ID)
def WriteSpectrum(self, fname, fmt):
"""
Write the spectrum to file
"""
fname = os.path.expanduser(fname)
try:
SpecReader.WriteSpectrum(self._hist, fname, fmt)
except SpecReaderError as msg:
hdtv.ui.error("Failed to write spectrum: %s (file: %s)" % (msg, fname))
return False
return True
def SetHistWithPrimitiveBinning(self, hist, caldegree=4, silent=False):
log = hdtv.ui.debug if silent else hdtv.ui.info
if HasPrimitiveBinning(hist):
self._hist = hist
else:
log(
hist.GetName()
+ " unconventional binning detected. Converting and trying to create calibration using a polynomial of order "
+ str(caldegree)
+ " ..."
)
self._hist = ROOT.TH1D(
hist.GetName(), hist.GetTitle(), hist.GetNbinsX(), 0, hist.GetNbinsX()
)
if caldegree:
cf = CalibrationFitter()
# TODO: Slow
for bin in range(0, hist.GetNbinsX()):
if caldegree:
cf.AddPair(bin, hist.GetXaxis().GetBinUpEdge(bin))
self._hist.SetBinContent(bin, hist.GetBinContent(bin))
# Original comment by JM in commit #dd438b7c44265072bf8b0528170cecc95780e38c:
# "TODO: Copy Errors?"
#
# Edit by UG: It makes sense to simply copy the uncertainties. There are two
# possible cases:
# 1. The ROOT histogram contains user-defined uncertainties per bin that can
# be retrieved by calling hist.GetBinError(). In this case, it can be
# assumed that the user knew what he was doing when the uncertainties
# were assigned.
# 2. The ROOT histogram contains no user-defined uncertainties. In this case,
# a call of hist.GetBinError() will return the square root of the bin
# content, which is a sensible assumption.
#
# Since text spectra are loaded in a completely analogous way, implicitly
# assuming that the uncertainties are Poissonian, there is no need to issue
# an additional warning.
self._hist.SetBinError(bin, hist.GetBinError(bin))
if caldegree:
cf.FitCal(caldegree)
self.cal = cf.calib
class FileHistogram(Histogram):
"""
File spectrum object
A spectrum that comes from a file in any of the formats supported by hdtv.
"""
def __init__(self, fname, fmt=None, color=hdtv.color.default, cal=None):
"""
Read a spectrum from file
"""
# check if file exists
try:
os.path.exists(fname)
except OSError:
hdtv.ui.error("File %s not found" % fname)
raise
# call to SpecReader to get the hist
try:
hist = SpecReader.GetSpectrum(fname, fmt)
except SpecReaderError as msg:
hdtv.ui.error(str(msg))
raise
self.fmt = fmt
self.filename = fname
Histogram.__init__(self, hist, color, cal)
self.typeStr = "spectrum, read from file"
@property
def info(self):
# get the info property of the baseclass
s = super(FileHistogram, self).info
s += "Filename: %s\n" % self.filename
if self.fmt:
s += "File format: %s\n" % self.fmt
else:
s += "File format: autodetected\n"
return s
def Refresh(self):
"""
Reload the spectrum from disk
"""
try:
os.path.exists(self.filename)
except OSError:
hdtv.ui.warning("File %s not found, keeping previous data" % self.filename)
return
# call to SpecReader to get the hist
try:
hist = SpecReader.GetSpectrum(self.filename, self.fmt)
except SpecReaderError as msg:
hdtv.ui.warning(
"Failed to load spectrum: %s (file: %s), keeping previous data"
% (msg, self.filename)
)
return
self.hist = hist
class CutHistogram(Histogram):
def __init__(self, hist, axis, gates, color=hdtv.color.default, cal=None):
Histogram.__init__(self, hist, color, cal)
self.gates = gates
self.axis = axis
@property
def info(self):
s = super(CutHistogram, self).info
s += "cut "
s += "on %s axis gate: " % self.axis
for i in range(len(self.gates)):
g = self.gates[i]
s += "%d - %d " % (g.p1.pos_cal, g.p2.pos_cal)
if not i == len(self.gates):
"and"
return s
class THnSparseWrapper(object):
"""
Wrapper around a 2d THnSparse object, providing ProjectionX and
ProjectionY.
"""
def __init__(self, hist):
if not (isinstance(hist, ROOT.THnSparse) and hist.GetNdimensions() == 2):
raise RuntimeError("Class needs a THnSparse histogram of dimension 2")
self.__dict__["_hist"] = hist
def __setattr__(self, name, value):
self.__dict__["_hist"].__setattr__(name, value)
def __getattr__(self, name):
return getattr(self.__dict__["_hist"], name)
def GetXaxis(self):
return self._hist.GetAxis(0)
def GetYaxis(self):
return self._hist.GetAxis(1)
def ProjectionX(self, name, b1, b2, opt):
a = self._hist.GetAxis(1)
if b1 > b2:
a.SetRange(0, a.GetNbins())
else:
a.SetRange(b1, b2)
proj = self._hist.Projection(0, opt)
a.SetRange(0, a.GetNbins())
proj.SetName(name)
return proj
def ProjectionY(self, name, b1, b2, opt):
a = self._hist.GetAxis(0)
if b1 > b2:
a.SetRange(0, a.GetNbins())
else:
a.SetRange(b1, b2)
proj = self._hist.Projection(1, opt)
a.SetRange(0, a.GetNbins())
proj.SetName(name)
return proj
class Histo2D(object):
def __init__(self):
pass
@property
def name(self):
return "generic 2D histogram"
@property
def xproj(self):
return None
@property
def yproj(self):
return None
def ExecuteCut(self, regionMarkers, bgMarkers, axis):
return None
class RHisto2D(Histo2D):
"""
ROOT TH2-backed matrix for projection
"""
def __init__(self, rhist):
self.rhist = rhist
# Lazy generation of projections
self._prx = None
self._pry = None
@property
def name(self):
return self.rhist.GetName()
@property
def xproj(self):
if self._prx is None:
name = self.rhist.GetName() + "_prx"
self._prx = self.rhist.ProjectionX(name, 0, -1, "e")
# do not store the Histogram object here because of garbage
# collection
prx = Histogram(self._prx)
prx.typeStr = "x projection"
return prx
@property
def yproj(self):
if self._pry is None:
name = self.rhist.GetName() + "_pry"
self._pry = self.rhist.ProjectionY(name, 0, -1, "e")
# do not store the Histogram object here because of garbage
# collection
pry = Histogram(self._pry)
pry.typeStr = "y projection"
return pry
def ExecuteCut(self, regionMarkers, bgMarkers, axis):
# _axis_ is the axis the markers refer to, so we project on the *other*
# axis. We call _axis_ the cut axis and the other axis the projection
# axis. If the matrix is symmetric, this does not matter, so _axis_ is
# "0" and the implementation can choose.
if len(regionMarkers) < 1:
raise RuntimeError("Need at least one gate for cut")
if axis == "0":
axis = "x"
if axis not in ("x", "y"):
raise ValueError("Bad value for axis parameter")
if axis == "x":
cutAxis = self.rhist.GetXaxis()
projector = self.rhist.ProjectionY
else:
cutAxis = self.rhist.GetYaxis()
projector = self.rhist.ProjectionX
b1 = cutAxis.FindBin(regionMarkers[0].p1.pos_uncal)
b2 = cutAxis.FindBin(regionMarkers[0].p2.pos_uncal)
name = self.rhist.GetName() + "_cut"
rhist = projector(name, min(b1, b2), max(b1, b2), "e")
# Ensure proper garbage collection for ROOT histogram objects
ROOT.SetOwnership(rhist, True)
numFgBins = abs(b2 - b1) + 1
for r in regionMarkers[1:]:
b1 = cutAxis.FindBin(r.p1.pos_uncal)
b2 = cutAxis.FindBin(r.p2.pos_uncal)
numFgBins += abs(b2 - b1) + 1
tmp = projector("proj_tmp", min(b1, b2), max(b1, b2), "e")
ROOT.SetOwnership(tmp, True)
rhist.Add(tmp, 1.0)
bgBins = []
numBgBins = 0
for b in bgMarkers:
b1 = cutAxis.FindBin(b.p1.pos_uncal)
b2 = cutAxis.FindBin(b.p2.pos_uncal)
numBgBins += abs(b2 - b1) + 1
bgBins.append((min(b1, b2), max(b1, b2)))
if numBgBins > 0:
bgFactor = -float(numFgBins) / float(numBgBins)
for b in bgBins:
tmp = projector("proj_tmp", b[0], b[1], "e")
ROOT.SetOwnership(tmp, True)
rhist.Add(tmp, bgFactor)
hist = CutHistogram(rhist, axis, regionMarkers)
hist.typeStr = "cut"
return hist
class MHisto2D(Histo2D):
"""
MFile-backed matrix for projection
"""
def __init__(self, fname, sym):
# check if file exists
try:
os.stat(fname)
except OSError as error:
hdtv.ui.error(str(error))
raise
self.GenerateFiles(fname, sym)
basename = self.GetBasename(fname)
# call to SpecReader to get the hist
try:
self.vmatrix = SpecReader.GetVMatrix(fname)
except SpecReaderError as msg:
hdtv.ui.error(str(msg))
raise
self._xproj = FileHistogram(basename + ".prx")
self._xproj.typeStr = "Projection"
if sym:
self._yproj = None
self.tvmatrix = self.vmatrix # Fixme
else:
self._yproj = FileHistogram(basename + ".pry")
self._yproj.typeStr = "Projection"
try:
self.tvmatrix = SpecReader.GetVMatrix(basename + ".tmtx")
except SpecReaderError as msg:
hdtv.ui.error(str(msg))
raise
self.filename = fname
@property
def xproj(self):
return self._xproj
@property
def yproj(self):
return self._yproj
def ExecuteCut(self, regionMarkers, bgMarkers, axis):
# _axis_ is the axis the markers refer to, so we project on the *other*
# axis. We call _axis_ the cut axis and the other axis the projection
# axis. If the matrix is symmetric, this does not matter, so _axis_ is
# "0" and the implementation can choose.
if len(regionMarkers) < 1:
raise RuntimeError("Need at least one gate for cut")
if axis == "0":
axis = "x"
if axis not in ("x", "y"):
raise ValueError("Bad value for axis parameter")
if axis == "x":
# FIXME: Calibrations for gated spectra asym/sym
thiscal = self._xproj.cal
if self._yproj:
othercal = self._yproj.cal
else:
othercal = self._xproj.cal
matrix = self.tvmatrix
else:
thiscal = self._yproj.cal
othercal = self._xproj.cal
matrix = self.vmatrix
matrix.ResetRegions()
for r in regionMarkers:
# FIXME: The region markers are not used correctly in many parts
# of the code. Workaround by explicitly using the cal here
b1 = matrix.FindCutBin(thiscal.E2Ch(r.p1.pos_cal))
b2 = matrix.FindCutBin(thiscal.E2Ch(r.p2.pos_cal))
matrix.AddCutRegion(b1, b2)
for b in bgMarkers:
b1 = matrix.FindCutBin(thiscal.E2Ch(b.p1.pos_cal))
b2 = matrix.FindCutBin(thiscal.E2Ch(b.p2.pos_cal))
matrix.AddBgRegion(b1, b2)
name = self.filename + "_cut"
rhist = matrix.Cut(name, name)
# Ensure proper garbage collection for ROOT histogram objects
ROOT.SetOwnership(rhist, True)
hist = CutHistogram(rhist, axis, regionMarkers)
hist.typeStr = "cut"
hist._cal = othercal
return hist
def GetBasename(self, fname):
if fname.endswith(".mtx") or fname.endswith(".mtx"):
return fname[:-4]
else:
return fname
def GenerateFiles(self, fname, sym):
"""
Generate projection(s) and possibly transpose (for asymmetric matrices),
if they do not exist yet.
"""
basename = self.GetBasename(fname)
# Generate projection(s)
prx_fname = basename + ".prx"
pry_fname = ""
if os.path.exists(prx_fname):
hdtv.ui.info("Using %s for x projection" % prx_fname)
prx_fname = ""
if not sym:
pry_fname = basename + ".pry"
if os.path.exists(pry_fname):
hdtv.ui.info("Using %s for y projection" % pry_fname)
pry_fname = ""
if prx_fname or pry_fname:
errno = ROOT.MatOp.Project(fname, prx_fname, pry_fname)
if errno != ROOT.MatOp.ERR_SUCCESS:
raise RuntimeError("Project: " + ROOT.MatOp.GetErrorString(errno))
if prx_fname:
hdtv.ui.info("Generated x projection: %s" % prx_fname)
if pry_fname:
hdtv.ui.info("Generated y projection: %s" % pry_fname)
# Generate transpose
if not sym:
trans_fname = basename + ".tmtx"
if os.path.exists(trans_fname):
hdtv.ui.info("Using %s for transpose" % trans_fname)
else:
errno = ROOT.MatOp.Transpose(fname, trans_fname)
if errno != ROOT.MatOp.ERR_SUCCESS:
raise RuntimeError("Transpose: " + ROOT.MatOp.GetErrorString(errno))
hdtv.ui.info("Generated transpose: %s" % trans_fname)
| gpl-2.0 | -1,190,877,172,737,850,600 | 32.272841 | 126 | 0.556517 | false | 3.752294 | false | false | false |
schneidersoft/KiBoM | bomlib/component.py | 1 | 17090 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from bomlib.columns import ColumnList
from bomlib.preferences import BomPref
import bomlib.units as units
from bomlib.sort import natural_sort
import re
import sys
DNF = [
"dnf",
"dnl",
"dnp",
"do not fit",
"do not place",
"do not load",
"nofit",
"nostuff",
"noplace",
"noload",
"not fitted",
"not loaded",
"not placed",
"no stuff",
]
class Component():
"""Class for a component, aka 'comp' in the xml netlist file.
This component class is implemented by wrapping an xmlElement instance
with accessors. The xmlElement is held in field 'element'.
"""
def __init__(self, xml_element, prefs=None):
self.element = xml_element
self.libpart = None
if not prefs:
prefs = BomPref()
self.prefs = prefs
# Set to true when this component is included in a component group
self.grouped = False
# Compare the value of this part, to the value of another part (see if they match)
def compareValue(self, other):
# Simple string comparison
if self.getValue().lower() == other.getValue().lower():
return True
# Otherwise, perform a more complicated value comparison
if units.compareValues(self.getValue(), other.getValue()):
return True
# Ignore value if both components are connectors
if self.prefs.groupConnectors:
if 'connector' in self.getLibName().lower() and 'connector' in other.getLibName().lower():
return True
# No match, return False
return False
# Determine if two parts have the same name
def comparePartName(self, other):
pn1 = self.getPartName().lower()
pn2 = other.getPartName().lower()
# Simple direct match
if pn1 == pn2:
return True
# Compare part aliases e.g. "c" to "c_small"
for alias in self.prefs.aliases:
if pn1 in alias and pn2 in alias:
return True
return False
def compareField(self, other, field):
this_field = self.getField(field).lower()
other_field = other.getField(field).lower()
# If blank comparisons are allowed
if this_field == "" or other_field == "":
if not self.prefs.mergeBlankFields:
return False
if this_field == other_field:
return True
return False
def __eq__(self, other):
"""
Equivalency operator is used to determine if two parts are 'equal'
"""
# 'fitted' value must be the same for both parts
if self.isFitted() != other.isFitted():
return False
if len(self.prefs.groups) == 0:
return False
for c in self.prefs.groups:
# Perform special matches
if c.lower() == ColumnList.COL_VALUE.lower():
if not self.compareValue(other):
return False
# Match part name
elif c.lower() == ColumnList.COL_PART.lower():
if not self.comparePartName(other):
return False
# Generic match
elif not self.compareField(other, c):
return False
return True
def setLibPart(self, part):
self.libpart = part
def getPrefix(self):
"""
Get the reference prefix
e.g. if this component has a reference U12, will return "U"
"""
prefix = ""
for c in self.getRef():
if c.isalpha():
prefix += c
else:
break
return prefix
def getSuffix(self):
"""
Return the reference suffix #
e.g. if this component has a reference U12, will return "12"
"""
suffix = ""
for c in self.getRef():
if c.isalpha():
suffix = ""
else:
suffix += c
return int(suffix)
def getLibPart(self):
return self.libpart
def getPartName(self):
return self.element.get("libsource", "part")
def getLibName(self):
return self.element.get("libsource", "lib")
def getDescription(self):
try:
return self.element.get("libsource", "description")
except:
# Compatibility with old KiCad versions (4.x)
ret = self.element.get("field", "name", "description")
if ret == "":
ret = self.libpart.getDescription()
return ret
def setValue(self, value):
"""Set the value of this component"""
v = self.element.getChild("value")
if v:
v.setChars(value)
def getValue(self):
return self.element.get("value")
def getField(self, name, ignoreCase=True, libraryToo=True):
"""Return the value of a field named name. The component is first
checked for the field, and then the components library part is checked
for the field. If the field doesn't exist in either, an empty string is
returned
Keywords:
name -- The name of the field to return the value for
libraryToo -- look in the libpart's fields for the same name if not found
in component itself
"""
fp = self.getFootprint().split(":")
if name.lower() == ColumnList.COL_REFERENCE.lower():
return self.getRef().strip()
elif name.lower() == ColumnList.COL_DESCRIPTION.lower():
return self.getDescription().strip()
elif name.lower() == ColumnList.COL_DATASHEET.lower():
return self.getDatasheet().strip()
# Footprint library is first element
elif name.lower() == ColumnList.COL_FP_LIB.lower():
if len(fp) > 1:
return fp[0].strip()
else:
# Explicit empty return
return ""
elif name.lower() == ColumnList.COL_FP.lower():
if len(fp) > 1:
return fp[1].strip()
elif len(fp) == 1:
return fp[0]
else:
return ""
elif name.lower() == ColumnList.COL_VALUE.lower():
return self.getValue().strip()
elif name.lower() == ColumnList.COL_PART.lower():
return self.getPartName().strip()
elif name.lower() == ColumnList.COL_PART_LIB.lower():
return self.getLibName().strip()
# Other fields (case insensitive)
for f in self.getFieldNames():
if f.lower() == name.lower():
field = self.element.get("field", "name", f)
if field == "" and libraryToo:
field = self.libpart.getField(f)
return field.strip()
# Could not find a matching field
return ""
def getFieldNames(self):
"""Return a list of field names in play for this component. Mandatory
fields are not included, and they are: Value, Footprint, Datasheet, Ref.
The netlist format only includes fields with non-empty values. So if a field
is empty, it will not be present in the returned list.
"""
fieldNames = []
fields = self.element.getChild('fields')
if fields:
for f in fields.getChildren():
fieldNames.append(f.get('field', 'name'))
return fieldNames
def getRef(self):
return self.element.get("comp", "ref")
# Determine if a component is FITTED or not
def isFitted(self):
check = self.getField(self.prefs.configField).lower()
# Check the value field first
if self.getValue().lower() in DNF:
return False
# Empty value means part is fitted
if check == "":
return True
opts = check.lower().split(",")
exclude = False
include = True
for opt in opts:
opt = opt.strip()
# Any option containing a DNF is not fitted
if opt in DNF:
exclude = True
break
# Options that start with '-' are explicitly removed from certain configurations
if opt.startswith("-") and str(opt[1:]) in [str(cfg) for cfg in self.prefs.pcbConfig]:
exclude = True
break
if opt.startswith("+"):
include = include or opt[1:] in [str(cfg) for cfg in self.prefs.pcbConfig]
return include and not exclude
# Test if this part should be included, based on any regex expressions provided in the preferences
def testRegExclude(self):
for reg in self.prefs.regExcludes:
if type(reg) == list and len(reg) == 2:
field_name, regex = reg
field_value = self.getField(field_name)
# Attempt unicode escaping...
# Filthy hack
try:
regex = regex.decode("unicode_escape")
except:
pass
if re.search(regex, field_value, flags=re.IGNORECASE) is not None:
if self.prefs.verbose:
print("Excluding '{ref}': Field '{field}' ({value}) matched '{reg}'".format(
ref=self.getRef(),
field=field_name,
value=field_value,
reg=regex).encode('utf-8'))
# Found a match
return True
# Default, could not find any matches
return False
def testRegInclude(self):
if len(self.prefs.regIncludes) == 0: # Nothing to match against
return True
for reg in self.prefs.regIncludes:
if type(reg) == list and len(reg) == 2:
field_name, regex = reg
field_value = self.getField(field_name)
print(field_name, field_value, regex)
if re.search(regex, field_value, flags=re.IGNORECASE) is not None:
if self.prefs.verbose:
print("")
# Found a match
return True
# Default, could not find a match
return False
def getFootprint(self, libraryToo=True):
ret = self.element.get("footprint")
if ret == "" and libraryToo:
if self.libpart:
ret = self.libpart.getFootprint()
return ret
def getDatasheet(self, libraryToo=True):
ret = self.element.get("datasheet")
if ret == "" and libraryToo:
ret = self.libpart.getDatasheet()
return ret
def getTimestamp(self):
return self.element.get("tstamp")
class joiner:
def __init__(self):
self.stack = []
def add(self, P, N):
if self.stack == []:
self.stack.append(((P, N), (P, N)))
return
S, E = self.stack[-1]
if N == E[1] + 1:
self.stack[-1] = (S, (P, N))
else:
self.stack.append(((P, N), (P, N)))
def flush(self, sep, N=None, dash='-'):
refstr = u''
c = 0
for Q in self.stack:
if bool(N) and c != 0 and c % N == 0:
refstr += u'\n'
elif c != 0:
refstr += sep
S, E = Q
if S == E:
refstr += "%s%d" % S
c += 1
else:
# Do we have space?
if bool(N) and (c + 1) % N == 0:
refstr += u'\n'
c += 1
refstr += "%s%d%s%s%d" % (S[0], S[1], dash, E[0], E[1])
c += 2
return refstr
class ComponentGroup():
"""
Initialize the group with no components, and default fields
"""
def __init__(self, prefs=None):
self.components = []
self.fields = dict.fromkeys(ColumnList._COLUMNS_DEFAULT) # Columns loaded from KiCad
if not prefs:
prefs = BomPref()
self.prefs = prefs
def getField(self, field):
if field not in self.fields.keys():
return ""
if not self.fields[field]:
return ""
return u''.join((self.fields[field]))
def getCount(self):
return len(self.components)
# Test if a given component fits in this group
def matchComponent(self, c):
if len(self.components) == 0:
return True
if c == self.components[0]:
return True
return False
def containsComponent(self, c):
# Test if a given component is already contained in this grop
if not self.matchComponent(c):
return False
for comp in self.components:
if comp.getRef() == c.getRef():
return True
return False
def addComponent(self, c):
# Add a component to the group
if self.containsComponent(c):
return
self.components.append(c)
def isFitted(self):
return any([c.isFitted() for c in self.components])
def getRefs(self):
# Return a list of the components
return " ".join([c.getRef() for c in self.components])
def getAltRefs(self, wrapN=None):
S = joiner()
for n in self.components:
P, N = (n.getPrefix(), n.getSuffix())
S.add(P, N)
return S.flush(' ', N=wrapN)
# Sort the components in correct order
def sortComponents(self):
self.components = sorted(self.components, key=lambda c: natural_sort(c.getRef()))
# Update a given field, based on some rules and such
def updateField(self, field, fieldData):
# Protected fields cannot be overwritten
if field in ColumnList._COLUMNS_PROTECTED:
return
if field is None or field == "":
return
elif fieldData == "" or fieldData is None:
return
if (field not in self.fields.keys()) or (self.fields[field] is None) or (self.fields[field] == ""):
self.fields[field] = fieldData
elif fieldData.lower() in self.fields[field].lower():
return
else:
print("Field conflict: ({refs}) [{name}] : '{flds}' <- '{fld}'".format(
refs=self.getRefs(),
name=field,
flds=self.fields[field],
fld=fieldData).encode('utf-8'))
self.fields[field] += " " + fieldData
def updateFields(self, usealt=False, wrapN=None):
for c in self.components:
for f in c.getFieldNames():
# These columns are handled explicitly below
if f in ColumnList._COLUMNS_PROTECTED:
continue
self.updateField(f, c.getField(f))
# Update 'global' fields
if usealt:
self.fields[ColumnList.COL_REFERENCE] = self.getAltRefs(wrapN)
else:
self.fields[ColumnList.COL_REFERENCE] = self.getRefs()
q = self.getCount()
self.fields[ColumnList.COL_GRP_QUANTITY] = "{n}{dnf}".format(
n=q,
dnf=" (DNF)" if not self.isFitted() else "")
self.fields[ColumnList.COL_GRP_BUILD_QUANTITY] = str(q * self.prefs.boards) if self.isFitted() else "0"
if self.prefs.agregateValues:
self.fields[ColumnList.COL_VALUE] = ','.join(sorted(set([c.getValue() for c in self.components])))
else:
self.fields[ColumnList.COL_VALUE] = self.components[0].getValue()
self.fields[ColumnList.COL_PART] = self.components[0].getPartName()
self.fields[ColumnList.COL_PART_LIB] = self.components[0].getLibName()
self.fields[ColumnList.COL_DESCRIPTION] = self.components[0].getDescription()
self.fields[ColumnList.COL_DATASHEET] = self.components[0].getDatasheet()
# Footprint field requires special attention
fp = self.components[0].getFootprint().split(":")
if len(fp) >= 2:
self.fields[ColumnList.COL_FP_LIB] = fp[0]
self.fields[ColumnList.COL_FP] = fp[1]
elif len(fp) == 1:
self.fields[ColumnList.COL_FP_LIB] = ""
self.fields[ColumnList.COL_FP] = fp[0]
else:
self.fields[ColumnList.COL_FP_LIB] = ""
self.fields[ColumnList.COL_FP] = ""
# Return a dict of the KiCad data based on the supplied columns
# NOW WITH UNICODE SUPPORT!
def getRow(self, columns):
row = []
for key in columns:
val = self.getField(key)
if val is None:
val = ""
else:
val = u'' + val
if sys.version_info[0] < 3:
val = val.encode('utf-8')
row.append(val)
return row
| mit | 2,352,314,765,385,590,300 | 28.618718 | 111 | 0.533645 | false | 4.219753 | false | false | false |
HackUCF/ppl | membership/views.py | 1 | 2160 | import json
from django.conf import settings
from django.contrib.auth.decorators import user_passes_test, login_required
from django.core.cache import cache
from django.core.serializers.json import DjangoJSONEncoder
from django.db.models import Q
from django.forms import model_to_dict
from django.http import HttpResponseBadRequest, HttpResponse, \
HttpResponseForbidden
from django.shortcuts import render
from django.views.decorators.http import require_safe, require_POST
from membership.api import download_sheet_with_user, user_can_download_sheet
from membership.forms import SearchForm
from membership.models import Member, update_membership
user_can_view_members = user_passes_test(
lambda u: user_can_download_sheet(u),
login_url=settings.LOGIN_URL + '?reason=no_member_view_permission'
)
def index(request):
return render(request, 'index.html')
@login_required
@user_can_view_members
@require_safe
def dashboard(request):
return render(request, 'dashboard.html', {
'form': SearchForm(),
'enable_member_update': cache.get('enable_member_update', True)
})
@login_required
@user_can_view_members
@require_POST
def search(request):
form = SearchForm(request.POST)
if not form.is_valid():
return HttpResponseBadRequest(content=form.errors)
query = form.cleaned_data['query']
members = Member.objects.filter(
Q(knights_email__icontains=query) |
Q(name__icontains=query)
)[:40]
data = json.dumps({
'results': {
'data': [model_to_dict(m) for m in members]
}
}, cls=DjangoJSONEncoder)
return HttpResponse(data, content_type='application/json')
@login_required
@user_can_view_members
@require_POST
def update(request):
if not request.is_ajax():
return HttpResponseBadRequest('Must be requested from page')
filename = 'membership.csv'
if not download_sheet_with_user(request.user, filename):
return HttpResponseForbidden('User cannot see the sheet 👎')
update_membership(filename)
cache.set('enable_member_update', False, 300)
# thumbs up unicode
return HttpResponse('👍')
| gpl-3.0 | -1,755,547,526,197,939,500 | 27.72 | 76 | 0.720984 | false | 3.778947 | false | false | false |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/pandas/tests/series/test_quantile.py | 7 | 7083 | # coding=utf-8
# pylint: disable-msg=E1101,W0612
import pytest
import numpy as np
import pandas as pd
from pandas import (Index, Series, _np_version_under1p9)
from pandas.core.indexes.datetimes import Timestamp
from pandas.core.dtypes.common import is_integer
import pandas.util.testing as tm
from .common import TestData
class TestSeriesQuantile(TestData):
def test_quantile(self):
q = self.ts.quantile(0.1)
assert q == np.percentile(self.ts.valid(), 10)
q = self.ts.quantile(0.9)
assert q == np.percentile(self.ts.valid(), 90)
# object dtype
q = Series(self.ts, dtype=object).quantile(0.9)
assert q == np.percentile(self.ts.valid(), 90)
# datetime64[ns] dtype
dts = self.ts.index.to_series()
q = dts.quantile(.2)
assert q == Timestamp('2000-01-10 19:12:00')
# timedelta64[ns] dtype
tds = dts.diff()
q = tds.quantile(.25)
assert q == pd.to_timedelta('24:00:00')
# GH7661
result = Series([np.timedelta64('NaT')]).sum()
assert result is pd.NaT
msg = 'percentiles should all be in the interval \\[0, 1\\]'
for invalid in [-1, 2, [0.5, -1], [0.5, 2]]:
with tm.assert_raises_regex(ValueError, msg):
self.ts.quantile(invalid)
def test_quantile_multi(self):
qs = [.1, .9]
result = self.ts.quantile(qs)
expected = pd.Series([np.percentile(self.ts.valid(), 10),
np.percentile(self.ts.valid(), 90)],
index=qs, name=self.ts.name)
tm.assert_series_equal(result, expected)
dts = self.ts.index.to_series()
dts.name = 'xxx'
result = dts.quantile((.2, .2))
expected = Series([Timestamp('2000-01-10 19:12:00'),
Timestamp('2000-01-10 19:12:00')],
index=[.2, .2], name='xxx')
tm.assert_series_equal(result, expected)
result = self.ts.quantile([])
expected = pd.Series([], name=self.ts.name, index=Index(
[], dtype=float))
tm.assert_series_equal(result, expected)
@pytest.mark.skipif(_np_version_under1p9,
reason="Numpy version is under 1.9")
def test_quantile_interpolation(self):
# see gh-10174
# interpolation = linear (default case)
q = self.ts.quantile(0.1, interpolation='linear')
assert q == np.percentile(self.ts.valid(), 10)
q1 = self.ts.quantile(0.1)
assert q1 == np.percentile(self.ts.valid(), 10)
# test with and without interpolation keyword
assert q == q1
@pytest.mark.skipif(_np_version_under1p9,
reason="Numpy version is under 1.9")
def test_quantile_interpolation_dtype(self):
# GH #10174
# interpolation = linear (default case)
q = pd.Series([1, 3, 4]).quantile(0.5, interpolation='lower')
assert q == np.percentile(np.array([1, 3, 4]), 50)
assert is_integer(q)
q = pd.Series([1, 3, 4]).quantile(0.5, interpolation='higher')
assert q == np.percentile(np.array([1, 3, 4]), 50)
assert is_integer(q)
@pytest.mark.skipif(not _np_version_under1p9,
reason="Numpy version is greater 1.9")
def test_quantile_interpolation_np_lt_1p9(self):
# GH #10174
# interpolation = linear (default case)
q = self.ts.quantile(0.1, interpolation='linear')
assert q == np.percentile(self.ts.valid(), 10)
q1 = self.ts.quantile(0.1)
assert q1 == np.percentile(self.ts.valid(), 10)
# interpolation other than linear
msg = "Interpolation methods other than "
with tm.assert_raises_regex(ValueError, msg):
self.ts.quantile(0.9, interpolation='nearest')
# object dtype
with tm.assert_raises_regex(ValueError, msg):
Series(self.ts, dtype=object).quantile(0.7, interpolation='higher')
def test_quantile_nan(self):
# GH 13098
s = pd.Series([1, 2, 3, 4, np.nan])
result = s.quantile(0.5)
expected = 2.5
assert result == expected
# all nan/empty
cases = [Series([]), Series([np.nan, np.nan])]
for s in cases:
res = s.quantile(0.5)
assert np.isnan(res)
res = s.quantile([0.5])
tm.assert_series_equal(res, pd.Series([np.nan], index=[0.5]))
res = s.quantile([0.2, 0.3])
tm.assert_series_equal(res, pd.Series([np.nan, np.nan],
index=[0.2, 0.3]))
def test_quantile_box(self):
cases = [[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03')],
[pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern'),
pd.Timestamp('2011-01-03', tz='US/Eastern')],
[pd.Timedelta('1 days'), pd.Timedelta('2 days'),
pd.Timedelta('3 days')],
# NaT
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'), pd.NaT],
[pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern'),
pd.Timestamp('2011-01-03', tz='US/Eastern'), pd.NaT],
[pd.Timedelta('1 days'), pd.Timedelta('2 days'),
pd.Timedelta('3 days'), pd.NaT]]
for case in cases:
s = pd.Series(case, name='XXX')
res = s.quantile(0.5)
assert res == case[1]
res = s.quantile([0.5])
exp = pd.Series([case[1]], index=[0.5], name='XXX')
tm.assert_series_equal(res, exp)
def test_datetime_timedelta_quantiles(self):
# covers #9694
assert pd.isnull(Series([], dtype='M8[ns]').quantile(.5))
assert pd.isnull(Series([], dtype='m8[ns]').quantile(.5))
def test_quantile_nat(self):
res = Series([pd.NaT, pd.NaT]).quantile(0.5)
assert res is pd.NaT
res = Series([pd.NaT, pd.NaT]).quantile([0.5])
tm.assert_series_equal(res, pd.Series([pd.NaT], index=[0.5]))
def test_quantile_empty(self):
# floats
s = Series([], dtype='float64')
res = s.quantile(0.5)
assert np.isnan(res)
res = s.quantile([0.5])
exp = Series([np.nan], index=[0.5])
tm.assert_series_equal(res, exp)
# int
s = Series([], dtype='int64')
res = s.quantile(0.5)
assert np.isnan(res)
res = s.quantile([0.5])
exp = Series([np.nan], index=[0.5])
tm.assert_series_equal(res, exp)
# datetime
s = Series([], dtype='datetime64[ns]')
res = s.quantile(0.5)
assert res is pd.NaT
res = s.quantile([0.5])
exp = Series([pd.NaT], index=[0.5])
tm.assert_series_equal(res, exp)
| mit | -3,280,415,586,390,864,400 | 32.889952 | 79 | 0.54059 | false | 3.518629 | true | false | false |
joxeankoret/diaphora | diaphora.py | 1 | 75782 | #!/usr/bin/python3
"""
Diaphora, a diffing plugin for IDA
Copyright (c) 2015-2021, Joxean Koret
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
import os
import re
import sys
import time
import json
import decimal
import sqlite3
import threading
from threading import Thread
from io import StringIO
from difflib import SequenceMatcher
from multiprocessing import cpu_count
from diaphora_heuristics import *
from jkutils.kfuzzy import CKoretFuzzyHashing
from jkutils.factor import (FACTORS_CACHE, difference, difference_ratio,
primesbelow as primes)
try:
import idaapi
is_ida = True
except ImportError:
is_ida = False
#-------------------------------------------------------------------------------
VERSION_VALUE = "2.0.5"
COPYRIGHT_VALUE="Copyright(c) 2015-2021 Joxean Koret"
COMMENT_VALUE="Diaphora diffing plugin for IDA version %s" % VERSION_VALUE
# Used to clean-up the pseudo-code and assembly dumps in order to get
# better comparison ratios
CMP_REPS = ["loc_", "j_nullsub_", "nullsub_", "j_sub_", "sub_",
"qword_", "dword_", "byte_", "word_", "off_", "def_", "unk_", "asc_",
"stru_", "dbl_", "locret_", "flt_", "jpt_"]
CMP_REMS = ["dword ptr ", "byte ptr ", "word ptr ", "qword ptr ", "short ptr"]
#-------------------------------------------------------------------------------
def result_iter(cursor, arraysize=1000):
""" An iterator that uses fetchmany to keep memory usage down. """
while True:
results = cursor.fetchmany(arraysize)
if not results:
break
for result in results:
yield result
#-------------------------------------------------------------------------------
def quick_ratio(buf1, buf2):
try:
if buf1 is None or buf2 is None or buf1 == "" or buf1 == "":
return 0
s = SequenceMatcher(None, buf1.split("\n"), buf2.split("\n"))
return s.quick_ratio()
except:
print("quick_ratio:", str(sys.exc_info()[1]))
return 0
#-------------------------------------------------------------------------------
def real_quick_ratio(buf1, buf2):
try:
if buf1 is None or buf2 is None or buf1 == "" or buf1 == "":
return 0
s = SequenceMatcher(None, buf1.split("\n"), buf2.split("\n"))
return s.real_quick_ratio()
except:
print("real_quick_ratio:", str(sys.exc_info()[1]))
return 0
#-------------------------------------------------------------------------------
def ast_ratio(ast1, ast2):
if ast1 == ast2:
return 1.0
elif ast1 is None or ast2 is None:
return 0
return difference_ratio(decimal.Decimal(ast1), decimal.Decimal(ast2))
#-------------------------------------------------------------------------------
def log(msg):
if isinstance(threading.current_thread(), threading._MainThread):
print(("[%s] %s" % (time.asctime(), msg)))
#-------------------------------------------------------------------------------
def log_refresh(msg, show=False, do_log=True):
log(msg)
#-------------------------------------------------------------------------------
def debug_refresh(msg, show=False):
if os.getenv("DIAPHORA_DEBUG"):
log(msg)
#-------------------------------------------------------------------------------
class CChooser():
class Item:
def __init__(self, ea, name, ea2 = None, name2 = None, desc="100% equal", ratio = 0, bb1 = 0, bb2 = 0):
self.ea = ea
self.vfname = name
self.ea2 = ea2
self.vfname2 = name2
self.description = desc
self.ratio = ratio
self.bb1 = int(bb1)
self.bb2 = int(bb2)
self.cmd_import_selected = None
self.cmd_import_all = None
self.cmd_import_all_funcs = None
def __str__(self):
return '%08x' % int(self.ea)
def __init__(self, title, bindiff, show_commands=True):
if title == "Unmatched in primary":
self.primary = False
else:
self.primary = True
self.title = title
self.n = 0
self.items = []
self.icon = 41
self.bindiff = bindiff
self.show_commands = show_commands
self.cmd_diff_asm = None
self.cmd_diff_graph = None
self.cmd_diff_c = None
self.cmd_import_selected = None
self.cmd_import_all = None
self.cmd_import_all_funcs = None
self.cmd_show_asm = None
self.cmd_show_pseudo = None
self.cmd_highlight_functions = None
self.cmd_unhighlight_functions = None
self.selected_items = []
def add_item(self, item):
if self.title.startswith("Unmatched in"):
self.items.append(["%05lu" % self.n, "%08x" % int(item.ea), item.vfname])
else:
self.items.append(["%05lu" % self.n, "%08x" % int(item.ea), item.vfname,
"%08x" % int(item.ea2), item.vfname2, "%.3f" % item.ratio,
"%d" % item.bb1, "%d" % item.bb2, item.description])
self.n += 1
def get_color(self):
if self.title.startswith("Best"):
return 0xffff99
elif self.title.startswith("Partial"):
return 0x99ff99
elif self.title.startswith("Unreliable"):
return 0x9999ff
#-------------------------------------------------------------------------------
MAX_PROCESSED_ROWS = 1000000
TIMEOUT_LIMIT = 60 * 3
#-------------------------------------------------------------------------------
class bytes_encoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, bytes):
return obj.decode("utf-8")
return json.JSONEncoder.default(self, obj)
#-------------------------------------------------------------------------------
class CBinDiff:
def __init__(self, db_name, chooser=CChooser):
self.names = dict()
self.primes = primes(2048*2048)
self.db_name = db_name
self.dbs_dict = {}
self.db = None # Used exclusively by the exporter!
self.open_db()
self.matched1 = set()
self.matched2 = set()
self.matches_cache = {}
self.total_functions1 = None
self.total_functions2 = None
self.equal_callgraph = False
self.kfh = CKoretFuzzyHashing()
# With this block size we're sure it will only apply to functions
# somehow big
self.kfh.bsize = 32
self.pseudo = {}
self.pseudo_hash = {}
self.pseudo_comments = {}
self.unreliable = self.get_value_for("unreliable", False)
self.relaxed_ratio = self.get_value_for("relaxed_ratio", False)
self.experimental = self.get_value_for("experimental", False)
self.slow_heuristics = self.get_value_for("slow_heuristics", False)
self.unreliable = False
self.relaxed_ratio = False
self.experimental = False
self.slow_heuristics = False
self.use_decompiler_always = True
self.exclude_library_thunk = True
self.project_script = None
self.hooks = None
# Create the choosers
self.chooser = chooser
# Create the choosers
self.create_choosers()
self.last_diff_db = None
self.re_cache = {}
####################################################################
# LIMITS
#
# Do not run heuristics for more than X seconds (by default, 3 minutes).
self.timeout = self.get_value_for("TIMEOUT_LIMIT", TIMEOUT_LIMIT)
# It's typical in SQL queries to get a cartesian product of the
# results in the functions tables. Do not process more than this
# value per each 20k functions.
self.max_processed_rows = self.get_value_for("MAX_PROCESSED_ROWS", MAX_PROCESSED_ROWS)
# Limits to filter the functions to export
self.min_ea = 0
self.max_ea = 0
# Export only non IDA automatically generated function names? I.e.,
# excluding these starting with sub_*
self.ida_subs = True
# Export only function summaries instead of also exporting both the
# basic blocks and all instructions used by functions?
self.function_summaries_only = False
# Ignore IDA's automatically generated sub_* names for heuristics
# like the 'Same name'?
self.ignore_sub_names = True
# Ignore any and all function names for the 'Same name' heuristic?
self.ignore_all_names = self.get_value_for("ignore_all_names", True)
# Ignore small functions?
self.ignore_small_functions = self.get_value_for("ignore_small_functions", False)
# Number of CPU threads/cores to use?
cpus = cpu_count() - 1
if cpus < 1:
cpus = 1
self.cpu_count = self.get_value_for("CPU_COUNT", cpus)
####################################################################
def __del__(self):
if self.db is not None:
try:
if self.last_diff_db is not None:
tid = threading.current_thread().ident
if tid in self.dbs_dict:
db = self.dbs_dict[tid]
with db.cursor() as cur:
cur.execute('detach "%s"' % self.last_diff_db)
except:
pass
self.db_close()
def get_value_for(self, value_name, default):
# Try to search for a DIAPHORA_<value_name> environment variable
value = os.getenv("DIAPHORA_%s" % value_name.upper())
if value is not None:
if type(value) != type(default):
value = type(default)(value)
return value
return default
def open_db(self):
db = sqlite3.connect(self.db_name, check_same_thread=True)
db.text_factory = str
db.row_factory = sqlite3.Row
tid = threading.current_thread().ident
self.dbs_dict[tid] = db
if isinstance(threading.current_thread(), threading._MainThread):
self.db = db
self.create_schema()
db.execute("analyze")
def get_db(self):
tid = threading.current_thread().ident
if not tid in self.dbs_dict:
self.open_db()
if self.last_diff_db is not None:
self.attach_database(self.last_diff_db)
return self.dbs_dict[tid]
def db_cursor(self):
db = self.get_db()
return db.cursor()
def db_close(self):
tid = threading.current_thread().ident
if tid in self.dbs_dict:
self.dbs_dict[tid].close()
del self.dbs_dict[tid]
if isinstance(threading.current_thread(), threading._MainThread):
self.db.close()
def create_schema(self):
cur = self.db_cursor()
cur.execute("PRAGMA foreign_keys = ON")
sql = """ create table if not exists functions (
id integer primary key,
name varchar(255),
address text unique,
nodes integer,
edges integer,
indegree integer,
outdegree integer,
size integer,
instructions integer,
mnemonics text,
names text,
prototype text,
cyclomatic_complexity integer,
primes_value text,
comment text,
mangled_function text,
bytes_hash text,
pseudocode text,
pseudocode_lines integer,
pseudocode_hash1 text,
pseudocode_primes text,
function_flags integer,
assembly text,
prototype2 text,
pseudocode_hash2 text,
pseudocode_hash3 text,
strongly_connected integer,
loops integer,
rva text unique,
tarjan_topological_sort text,
strongly_connected_spp text,
clean_assembly text,
clean_pseudo text,
mnemonics_spp text,
switches text,
function_hash text,
bytes_sum integer,
md_index text,
constants text,
constants_count integer,
segment_rva text,
assembly_addrs text,
kgh_hash text,
userdata text) """
cur.execute(sql)
sql = """ create table if not exists program (
id integer primary key,
callgraph_primes text,
callgraph_all_primes text,
processor text,
md5sum text
) """
cur.execute(sql)
sql = """ create table if not exists program_data (
id integer primary key,
name varchar(255),
type varchar(255),
value text
)"""
cur.execute(sql)
sql = """ create table if not exists version (value text) """
cur.execute(sql)
sql = """ create table if not exists instructions (
id integer primary key,
address text unique,
disasm text,
mnemonic text,
comment1 text,
comment2 text,
name text,
type text,
pseudocomment text,
pseudoitp integer) """
cur.execute(sql)
sql = """ create table if not exists basic_blocks (
id integer primary key,
num integer,
address text unique)"""
cur.execute(sql)
sql = """ create table if not exists bb_relations (
id integer primary key,
parent_id integer not null references basic_blocks(id) ON DELETE CASCADE,
child_id integer not null references basic_blocks(id) ON DELETE CASCADE)"""
cur.execute(sql)
sql = """ create table if not exists bb_instructions (
id integer primary key,
basic_block_id integer references basic_blocks(id) on delete cascade,
instruction_id integer references instructions(id) on delete cascade)"""
cur.execute(sql)
sql = """ create table if not exists function_bblocks (
id integer primary key,
function_id integer not null references functions(id) on delete cascade,
basic_block_id integer not null references basic_blocks(id) on delete cascade)"""
cur.execute(sql)
sql = """create table if not exists callgraph (
id integer primary key,
func_id integer not null references functions(id) on delete cascade,
address text not null,
type text not null)"""
cur.execute(sql)
sql = """create table if not exists constants (
id integer primary key,
func_id integer not null references functions(id) on delete cascade,
constant text not null)"""
cur.execute(sql)
cur.execute("select 1 from version")
row = cur.fetchone()
if not row:
cur.execute("insert into main.version values ('%s')" % VERSION_VALUE)
cur.close()
def create_indexes(self):
cur = self.db_cursor()
sql = "create index if not exists idx_assembly on functions(assembly)"
cur.execute(sql)
sql = "create index if not exists idx_bytes_hash on functions(bytes_hash)"
cur.execute(sql)
sql = "create index if not exists idx_pseudocode on functions(pseudocode)"
cur.execute(sql)
sql = "create index if not exists idx_name on functions(name)"
cur.execute(sql)
sql = "create index if not exists idx_mangled_name on functions(mangled_function)"
cur.execute(sql)
sql = "create index if not exists idx_names on functions(names)"
cur.execute(sql)
sql = "create index if not exists idx_asm_pseudo on functions(assembly, pseudocode)"
cur.execute(sql)
sql = "create index if not exists idx_nodes_edges_instructions on functions(nodes, edges, instructions)"
cur.execute(sql)
sql = "create index if not exists idx_composite1 on functions(nodes, edges, mnemonics, names, cyclomatic_complexity, prototype2, indegree, outdegree)"
cur.execute(sql)
sql = "create index if not exists idx_composite2 on functions(instructions, mnemonics, names)"
cur.execute(sql)
sql = "create index if not exists idx_composite3 on functions(nodes, edges, cyclomatic_complexity)"
cur.execute(sql)
sql = "create index if not exists idx_composite4 on functions(pseudocode_lines, pseudocode)"
cur.execute(sql)
sql = "create index if not exists idx_composite5 on functions(pseudocode_lines, pseudocode_primes)"
cur.execute(sql)
sql = "create index if not exists idx_composite6 on functions(names, mnemonics)"
cur.execute(sql)
sql = "create index if not exists idx_pseudocode_hash1 on functions(pseudocode_hash1)"
cur.execute(sql)
sql = "create index if not exists idx_pseudocode_hash2 on functions(pseudocode_hash2)"
cur.execute(sql)
sql = "create index if not exists idx_pseudocode_hash3 on functions(pseudocode_hash3)"
cur.execute(sql)
sql = "create index if not exists idx_pseudocode_hash on functions(pseudocode_hash1, pseudocode_hash2, pseudocode_hash3)"
cur.execute(sql)
sql = "create index if not exists idx_strongly_connected on functions(strongly_connected)"
cur.execute(sql)
sql = "create index if not exists idx_strongly_connected_spp on functions(strongly_connected_spp)"
cur.execute(sql)
sql = "create index if not exists idx_loops on functions(loops)"
cur.execute(sql)
sql = "create index if not exists idx_rva on functions(rva)"
cur.execute(sql)
sql = "create index if not exists idx_tarjan_topological_sort on functions(tarjan_topological_sort)"
cur.execute(sql)
sql = "create index if not exists idx_mnemonics_spp on functions(mnemonics_spp)"
cur.execute(sql)
sql = "create index if not exists idx_clean_asm on functions(clean_assembly)"
cur.execute(sql)
sql = "create index if not exists idx_clean_pseudo on functions(clean_pseudo)"
cur.execute(sql)
sql = "create index if not exists idx_switches on functions(switches)"
cur.execute(sql)
sql = "create index if not exists idx_function_hash on functions(function_hash)"
cur.execute(sql)
sql = "create index if not exists idx_bytes_sum on functions(bytes_sum)"
cur.execute(sql)
sql = "create index if not exists idx_md_index on functions(md_index)"
cur.execute(sql)
sql = "create index if not exists idx_kgh_hash on functions(kgh_hash)"
cur.execute(sql)
sql = "create index if not exists idx_constants on functions(constants_count, constants)"
cur.execute(sql)
sql = "create index if not exists idx_mdindex_constants on functions(md_index, constants_count, constants)"
cur.execute(sql)
sql = "create index if not exists idx_instructions_address on instructions (address)"
cur.execute(sql)
sql = "create index if not exists idx_bb_relations on bb_relations(parent_id, child_id)"
cur.execute(sql)
sql = "create index if not exists idx_bb_instructions on bb_instructions (basic_block_id, instruction_id)"
cur.execute(sql)
sql = "create index if not exists id_function_blocks on function_bblocks (function_id, basic_block_id)"
cur.execute(sql)
sql = "create index if not exists idx_constants on constants (constant)"
cur.execute(sql)
sql = "analyze"
cur.execute(sql)
cur.close()
def attach_database(self, diff_db):
cur = self.db_cursor()
cur.execute('attach "%s" as diff' % diff_db)
cur.close()
def equal_db(self):
cur = self.db_cursor()
sql = "select count(*) total from program p, diff.program dp where p.md5sum = dp.md5sum"
cur.execute(sql)
row = cur.fetchone()
ret = row["total"] == 1
if not ret:
sql = "select count(*) total from (select * from functions except select * from diff.functions) x"
cur.execute(sql)
row = cur.fetchone()
ret = row["total"] == 0
else:
log("Same MD5 in both databases")
cur.close()
return ret
def add_program_data(self, type_name, key, value):
cur = self.db_cursor()
sql = "insert into main.program_data (name, type, value) values (?, ?, ?)"
values = (key, type_name, value)
cur.execute(sql, values)
cur.close()
def get_instruction_id(self, addr):
cur = self.db_cursor()
sql = "select id from instructions where address = ?"
cur.execute(sql, (str(addr),))
row = cur.fetchone()
rowid = None
if row is not None:
rowid = row["id"]
cur.close()
return rowid
def get_bb_id(self, addr):
cur = self.db_cursor()
sql = "select id from basic_blocks where address = ?"
cur.execute(sql, (str(addr),))
row = cur.fetchone()
rowid = None
if row is not None:
rowid = row["id"]
cur.close()
return rowid
def save_function(self, props):
if props == False:
log("WARNING: Trying to save a non resolved function?")
return
# Phase 1: Fix data types and insert the function row.
cur = self.db_cursor()
new_props = []
# The last 4 fields are callers, callees, basic_blocks_data & bb_relations
for prop in props[:len(props)-4]:
# XXX: Fixme! This is a hack for 64 bit architectures kernels
if type(prop) is int and (prop > 0xFFFFFFFF or prop < -0xFFFFFFFF):
prop = str(prop)
elif type(prop) is bytes:
prop = prop.encode("utf-8")
if type(prop) is list or type(prop) is set:
new_props.append(json.dumps(list(prop), ensure_ascii=False, cls=bytes_encoder))
else:
new_props.append(prop)
sql = """insert into main.functions (name, nodes, edges, indegree, outdegree, size,
instructions, mnemonics, names, prototype,
cyclomatic_complexity, primes_value, address,
comment, mangled_function, bytes_hash, pseudocode,
pseudocode_lines, pseudocode_hash1, pseudocode_primes,
function_flags, assembly, prototype2, pseudocode_hash2,
pseudocode_hash3, strongly_connected, loops, rva,
tarjan_topological_sort, strongly_connected_spp,
clean_assembly, clean_pseudo, mnemonics_spp, switches,
function_hash, bytes_sum, md_index, constants,
constants_count, segment_rva, assembly_addrs, kgh_hash,
userdata)
values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?,
?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?,
?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"""
try:
cur.execute(sql, new_props)
except:
print("Props???", new_props)
raise
func_id = cur.lastrowid
# Phase 2: Save the callers and callees of the function
callers, callees = props[len(props)-4:len(props)-2]
sql = "insert into callgraph (func_id, address, type) values (?, ?, ?)"
for caller in callers:
cur.execute(sql, (func_id, str(caller), 'caller'))
for callee in callees:
cur.execute(sql, (func_id, str(callee), 'callee'))
# Phase 3: Insert the constants of the function
sql = "insert into constants (func_id, constant) values (?, ?)"
props_dict = self.create_function_dictionary(props)
for constant in props_dict["constants"]:
if type(constant) in [str, bytes] and len(constant) > 4:
cur.execute(sql, (func_id, constant))
# Phase 4: Save the basic blocks relationships
if not self.function_summaries_only:
# The last 2 fields are basic_blocks_data & bb_relations
bb_data, bb_relations = props[len(props)-2:]
instructions_ids = {}
sql = """insert into main.instructions (address, mnemonic, disasm,
comment1, comment2, name,
type, pseudocomment,
pseudoitp)
values (?, ?, ?, ?, ?, ?, ?, ?, ?)"""
self_get_instruction_id = self.get_instruction_id
cur_execute = cur.execute
for key in bb_data:
for insn in bb_data[key]:
addr, mnem, disasm, cmt1, cmt2, name, mtype = insn
db_id = self_get_instruction_id(str(addr))
if db_id is None:
pseudocomment = None
pseudoitp = None
if addr in self.pseudo_comments:
pseudocomment, pseudoitp = self.pseudo_comments[addr]
cur_execute(sql, (str(addr), mnem, disasm, cmt1, cmt2, name, mtype, pseudocomment, pseudoitp))
db_id = cur.lastrowid
instructions_ids[addr] = db_id
num = 0
bb_ids = {}
sql1 = "insert into main.basic_blocks (num, address) values (?, ?)"
sql2 = "insert into main.bb_instructions (basic_block_id, instruction_id) values (?, ?)"
self_get_bb_id = self.get_bb_id
for key in bb_data:
# Insert each basic block
num += 1
ins_ea = str(key)
last_bb_id = self_get_bb_id(ins_ea)
if last_bb_id is None:
cur_execute(sql1, (num, str(ins_ea)))
last_bb_id = cur.lastrowid
bb_ids[ins_ea] = last_bb_id
# Insert relations between basic blocks and instructions
for insn in bb_data[key]:
ins_id = instructions_ids[insn[0]]
cur_execute(sql2, (last_bb_id, ins_id))
# Insert relations between basic blocks
sql = "insert into main.bb_relations (parent_id, child_id) values (?, ?)"
for key in bb_relations:
for bb in bb_relations[key]:
bb = str(bb)
key = str(key)
try:
cur_execute(sql, (bb_ids[key], bb_ids[bb]))
except:
# key doesnt exist because it doesnt have forward references to any bb
log("Error: %s" % str(sys.exc_info()[1]))
# And finally insert the functions to basic blocks relations
sql = "insert into main.function_bblocks (function_id, basic_block_id) values (?, ?)"
for key in bb_ids:
bb_id = bb_ids[key]
cur_execute(sql, (func_id, bb_id))
cur.close()
def get_valid_definition(self, defs):
""" Try to get a valid structure definition by removing (yes) the
invalid characters typically found in IDA's generated structs."""
ret = defs.replace("?", "_").replace("@", "_")
ret = ret.replace("$", "_")
return ret
def prettify_asm(self, asm_source):
asm = []
for line in asm_source.split("\n"):
if not line.startswith("loc_"):
asm.append("\t" + line)
else:
asm.append(line)
return "\n".join(asm)
def re_sub(self, text, repl, string):
if text not in self.re_cache:
self.re_cache[text] = re.compile(text, flags=re.IGNORECASE)
re_obj = self.re_cache[text]
return re_obj.sub(repl, string)
def get_cmp_asm_lines(self, asm):
sio = StringIO(asm)
lines = []
get_cmp_asm = self.get_cmp_asm
for line in sio.readlines():
line = line.strip("\n")
lines.append(get_cmp_asm(line))
return "\n".join(lines)
def get_cmp_pseudo_lines(self, pseudo):
if pseudo is None:
return pseudo
# Remove all the comments
tmp = self.re_sub(" // .*", "", pseudo)
# Now, replace sub_, byte_, word_, dword_, loc_, etc...
for rep in CMP_REPS:
tmp = self.re_sub(rep + "[a-f0-9A-F]+", rep + "XXXX", tmp)
tmp = self.re_sub("v[0-9]+", "vXXX", tmp)
tmp = self.re_sub("a[0-9]+", "aXXX", tmp)
tmp = self.re_sub("arg_[0-9]+", "aXXX", tmp)
return tmp
def get_cmp_asm(self, asm):
if asm is None:
return asm
# Ignore the comments in the assembly dump
tmp = asm.split(";")[0]
tmp = tmp.split(" # ")[0]
# Now, replace sub_, byte_, word_, dword_, loc_, etc...
for rep in CMP_REPS:
tmp = self.re_sub(rep + "[a-f0-9A-F]+", "XXXX", tmp)
# Remove dword ptr, byte ptr, etc...
for rep in CMP_REMS:
tmp = self.re_sub(rep + "[a-f0-9A-F]+", "", tmp)
reps = ["\+[a-f0-9A-F]+h\+"]
for rep in reps:
tmp = self.re_sub(rep, "+XXXX+", tmp)
tmp = self.re_sub("\.\.[a-f0-9A-F]{8}", "XXX", tmp)
# Strip any possible remaining white-space character at the end of
# the cleaned-up instruction
tmp = self.re_sub("[ \t\n]+$", "", tmp)
# Replace aName_XXX with aXXX, useful to ignore small changes in
# offsets created to strings
tmp = self.re_sub("a[A-Z]+[a-z0-9]+_[0-9]+", "aXXX", tmp)
return tmp
def compare_graphs_pass(self, bblocks1, bblocks2, colours1, colours2, is_second = False):
dones1 = set()
dones2 = set()
# Now compare each basic block from the first function to all the
# basic blocks in the 2nd function
for key1 in bblocks1:
if key1 in dones1:
continue
for key2 in bblocks2:
if key2 in dones2:
continue
# Same number of instructions?
if len(bblocks1[key1]) == len(bblocks2[key2]):
mod = False
partial = True
i = 0
for ins1 in bblocks1[key1]:
ins2 = bblocks2[key2][i]
# Same mnemonic? The change can be only partial
if ins1[1] != ins2[1]:
partial = False
# Try to compare the assembly after doing some cleaning
cmp_asm1 = self.get_cmp_asm(ins1[2])
cmp_asm2 = self.get_cmp_asm(ins2[2])
if cmp_asm1 != cmp_asm2:
mod = True
if not partial:
continue
i += 1
if not mod:
# Perfect match, we discovered a basic block equal in both
# functions
colours1[key1] = 0xffffff
colours2[key2] = 0xffffff
dones1.add(key1)
dones2.add(key2)
break
elif not is_second and partial:
# Partial match, we discovered a basic block with the same
# mnemonics but something changed
#
# NOTE:
# Do not add the partial matches to the dones lists, as we
# can have complete matches after a partial match!
colours1[key1] = 0xCCffff
colours2[key2] = 0xCCffff
break
return colours1, colours2
def compare_graphs(self, g1, ea1, g2, ea2):
colours1 = {}
colours2 = {}
bblocks1 = g1[0]
bblocks2 = g2[0]
# Consider, by default, all blocks added, news
for key1 in bblocks1:
colours1[key1] = 0xCCCCFF
for key2 in bblocks2:
colours2[key2] = 0xCCCCFF
colours1, colours2 = self.compare_graphs_pass(bblocks1, bblocks2, colours1, colours2, False)
colours1, colours2 = self.compare_graphs_pass(bblocks1, bblocks2, colours1, colours2, True)
return colours1, colours2
def get_graph(self, ea1, primary=False):
if primary:
db = "main"
else:
db = "diff"
cur = self.db_cursor()
dones = set()
sql = """ select bb.address bb_address, ins.address ins_address,
ins.mnemonic ins_mnem, ins.disasm ins_disasm
from %s.function_bblocks fb,
%s.bb_instructions bbins,
%s.instructions ins,
%s.basic_blocks bb,
%s.functions f
where ins.id = bbins.instruction_id
and bbins.basic_block_id = bb.id
and bb.id = fb.basic_block_id
and f.id = fb.function_id
and f.address = ?
order by bb.address asc""" % (db, db, db, db, db)
cur.execute(sql, (str(ea1),))
bb_blocks = {}
for row in result_iter(cur):
bb_ea = str(int(row["bb_address"]))
ins_ea = str(int(row["ins_address"]))
mnem = row["ins_mnem"]
dis = row["ins_disasm"]
if ins_ea in dones:
continue
dones.add(ins_ea)
try:
bb_blocks[bb_ea].append([ins_ea, mnem, dis])
except KeyError:
bb_blocks[bb_ea] = [ [ins_ea, mnem, dis] ]
sql = """ select (select address
from %s.basic_blocks
where id = bbr.parent_id) ea1,
(select address
from %s.basic_blocks
where id = bbr.child_id) ea2
from %s.bb_relations bbr,
%s.function_bblocks fbs,
%s.basic_blocks bbs,
%s.functions f
where f.id = fbs.function_id
and bbs.id = fbs.basic_block_id
and fbs.basic_block_id = bbr.child_id
and f.address = ?
order by 1 asc, 2 asc""" % (db, db, db, db, db, db)
cur.execute(sql, (str(ea1), ))
rows = result_iter(cur)
bb_relations = {}
for row in rows:
bb_ea1 = str(row["ea1"])
bb_ea2 = str(row["ea2"])
try:
bb_relations[bb_ea1].add(bb_ea2)
except KeyError:
bb_relations[bb_ea1] = set([bb_ea2])
cur.close()
return bb_blocks, bb_relations
def delete_function(self, ea):
cur = self.db_cursor()
cur.execute("delete from functions where address = ?", (str(ea), ))
cur.close()
def is_auto_generated(self, name):
for rep in CMP_REPS:
if name.startswith(rep):
return True
return False
def check_callgraph(self):
cur = self.db_cursor()
sql = """select callgraph_primes, callgraph_all_primes from program
union all
select callgraph_primes, callgraph_all_primes from diff.program"""
cur.execute(sql)
rows = cur.fetchall()
if len(rows) == 2:
cg1 = decimal.Decimal(rows[0]["callgraph_primes"])
cg_factors1 = json.loads(rows[0]["callgraph_all_primes"])
cg2 = decimal.Decimal(rows[1]["callgraph_primes"])
cg_factors2 = json.loads(rows[1]["callgraph_all_primes"])
if cg1 == cg2:
self.equal_callgraph = True
log("Callgraph signature for both databases is equal, the programs seem to be 100% equal structurally")
Warning("Callgraph signature for both databases is equal, the programs seem to be 100% equal structurally")
else:
FACTORS_CACHE[cg1] = cg_factors1
FACTORS_CACHE[cg2] = cg_factors2
diff = difference(cg1, cg2)
total = sum(cg_factors1.values())
if total == 0 or diff == 0:
log("Callgraphs are 100% equal")
else:
percent = diff * 100. / total
if percent >= 100:
log("Callgraphs are absolutely different")
else:
log("Callgraphs from both programs differ in %f%%" % percent)
cur.close()
def find_equal_matches_parallel(self):
cur = self.db_cursor()
# Start by calculating the total number of functions in both databases
sql = """select count(*) total from functions
union all
select count(*) total from diff.functions"""
cur.execute(sql)
rows = cur.fetchall()
if len(rows) != 2:
Warning("Malformed database, only %d rows!" % len(rows))
raise Exception("Malformed database!")
self.total_functions1 = rows[0]["total"]
self.total_functions2 = rows[1]["total"]
sql = "select address ea, mangled_function, nodes from (select * from functions intersect select * from diff.functions) x"
cur.execute(sql)
rows = cur.fetchall()
if len(rows) > 0:
for row in rows:
name = row["mangled_function"]
ea = row["ea"]
nodes = int(row["nodes"])
self.best_chooser.add_item(CChooser.Item(ea, name, ea, name, "100% equal", 1, nodes, nodes))
self.matched1.add(name)
self.matched2.add(name)
cur.close()
if not self.ignore_all_names:
self.find_same_name(self.partial_chooser)
self.run_heuristics_for_category("Best")
def run_heuristics_for_category(self, arg_category):
total_cpus = self.cpu_count
if total_cpus < 1:
total_cpus = 1
mode = "[Parallel]"
if total_cpus == 1:
mode = "[Single thread]"
postfix = ""
if self.ignore_small_functions:
postfix = " and f.instructions > 5 and df.instructions > 5 "
if self.hooks is not None:
if 'get_queries_postfix' in dir(self.hooks):
postfix = self.hooks.get_queries_postfix(arg_category, postfix)
threads_list = []
heuristics = list(HEURISTICS)
if self.hooks is not None:
if 'get_heuristics' in dir(self.hooks):
heuristics = self.hooks.get_heuristics(arg_category, heuristics)
for heur in heuristics:
if len(self.matched1) == self.total_functions1 or len(self.matched2) == self.total_functions2:
log("All functions matched in at least one database, finishing.")
break
category = heur["category"]
if category != arg_category:
continue
name = heur["name"]
sql = heur["sql"]
ratio = heur["ratio"]
min_value = 0.0
if ratio == HEUR_TYPE_RATIO_MAX:
min_value = heur["min"]
flags = heur["flags"]
if flags & HEUR_FLAG_UNRELIABLE == HEUR_FLAG_UNRELIABLE and not self.unreliable:
log_refresh("Skipping unreliable heuristic '%s'" % name)
continue
if flags & HEUR_FLAG_SLOW == HEUR_FLAG_SLOW and not self.slow_heuristics:
log_refresh("Skipping slow heuristic '%s'" % name)
continue
if arg_category == "Unreliable":
best = self.partial_chooser
partial = self.unreliable_chooser
else:
best = self.best_chooser
partial = self.partial_chooser
log_refresh("%s Finding with heuristic '%s'" % (mode, name))
sql = sql.replace("%POSTFIX%", postfix)
if self.hooks is not None:
if 'on_launch_heuristic' in dir(self.hooks):
sql = self.hooks.on_launch_heuristic(name, sql)
if ratio == HEUR_TYPE_NO_FPS:
t = Thread(target=self.add_matches_from_query, args=(sql, best))
elif ratio == HEUR_TYPE_RATIO:
t = Thread(target=self.add_matches_from_query_ratio, args=(sql, best, partial))
elif ratio == HEUR_TYPE_RATIO_MAX:
t = Thread(target=self.add_matches_from_query_ratio_max, args=(sql, min_value))
else:
raise Exception("Invalid heuristic ratio calculation value!")
t.name = name
t.time = time.time()
t.start()
threads_list.append(t)
if total_cpus == 1:
t.join()
threads_list = []
while len(threads_list) >= total_cpus:
for i, t in enumerate(threads_list):
if not t.is_alive():
debug_refresh("[Parallel] Heuristic '%s' took %f..." % (t.name, time.time() - t.time))
del threads_list[i]
debug_refresh("[Parallel] Waiting for any of %d thread(s) running to finish..." % len(threads_list))
break
else:
log_refresh("[Parallel] %d thread(s) running, waiting for at least one to finish..." % len(threads_list), do_log=False)
t.join(0.1)
if is_ida:
self.refresh()
if len(threads_list) > 0:
log_refresh("[Parallel] Waiting for remaining %d thread(s) to finish..." % len(threads_list), do_log=False)
do_cancel = False
times = 0
while len(threads_list) > 0 and not do_cancel:
times += 1
for i, t in enumerate(threads_list):
t.join(0.1)
if not t.is_alive():
debug_refresh("[Parallel] Heuristic '%s' took %f..." % (t.name, time.time() - t.time))
del threads_list[i]
debug_refresh("[Parallel] Waiting for remaining %d thread(s) to finish..." % len(threads_list))
break
t.join(0.1)
if time.time() - t.time > TIMEOUT_LIMIT:
do_cancel = True
try:
log_refresh("Timeout, cancelling queries...")
self.get_db().interrupt()
except:
print(("database.interrupt(): %s" % str(sys.exc_info()[1])))
if times % 50 == 0:
names = []
for x in threads_list:
names.append(x.name)
log_refresh("[Parallel] %d thread(s) still running:\n\n%s" % (len(threads_list), ", ".join(names)))
def ast_ratio(self, ast1, ast2):
if not self.relaxed_ratio:
return 0
return ast_ratio(ast1, ast2)
def check_ratio(self, ast1, ast2, pseudo1, pseudo2, asm1, asm2, md1, md2):
fratio = quick_ratio
decimal_values = "{0:.2f}"
if self.relaxed_ratio:
fratio = real_quick_ratio
decimal_values = "{0:.1f}"
v3 = 0
ast_done = False
if self.relaxed_ratio and ast1 is not None and ast2 is not None and max(len(ast1), len(ast2)) < 16:
ast_done = True
v3 = self.ast_ratio(ast1, ast2)
if v3 == 1.0:
return v3
v1 = 0
if pseudo1 is not None and pseudo2 is not None and pseudo1 != "" and pseudo2 != "":
tmp1 = self.get_cmp_pseudo_lines(pseudo1)
tmp2 = self.get_cmp_pseudo_lines(pseudo2)
if tmp1 == "" or tmp2 == "":
log("Error cleaning pseudo-code!")
else:
v1 = fratio(tmp1, tmp2)
v1 = float(decimal_values.format(v1))
if v1 == 1.0:
# If real_quick_ratio returns 1 try again with quick_ratio
# because it can result in false positives. If real_quick_ratio
# says 'different', there is no point in continuing.
if fratio == real_quick_ratio:
v1 = quick_ratio(tmp1, tmp2)
if v1 == 1.0:
return 1.0
tmp_asm1 = self.get_cmp_asm_lines(asm1)
tmp_asm2 = self.get_cmp_asm_lines(asm2)
v2 = fratio(tmp_asm1, tmp_asm2)
v2 = float(decimal_values.format(v2))
if v2 == 1:
# Actually, same as the quick_ratio/real_quick_ratio check done
# with the pseudo-code
if fratio == real_quick_ratio:
v2 = quick_ratio(tmp_asm1, tmp_asm2)
if v2 == 1.0:
return 1.0
if self.relaxed_ratio and not ast_done:
v3 = fratio(ast1, ast2)
v3 = float(decimal_values.format(v3))
if v3 == 1:
return 1.0
v4 = 0.0
if md1 == md2 and md1 > 0.0:
# A MD-Index >= 10.0 is somehow rare
if self.relaxed_ratio and md1 > 10.0:
return 1.0
v4 = min((v1 + v2 + v3 + 3.0) / 5, 1.0)
r = max(v1, v2, v3, v4)
if r == 1.0 and md1 != md2:
# We cannot assign a 1.0 ratio if both MD indices are different, that's an
# error
r = 0
for v in [v1, v2, v3, v4]:
if v != 1.0 and v > r:
r = v
return r
def all_functions_matched(self):
return len(self.matched1) == self.total_functions1 or \
len(self.matched2) == self.total_functions2
def add_matches_from_query_ratio(self, sql, best, partial, unreliable=None, debug=False):
if self.all_functions_matched():
return
cur = self.db_cursor()
try:
cur.execute(sql)
except:
log("Error: %s" % str(sys.exc_info()[1]))
return
i = 0
t = time.time()
while self.max_processed_rows == 0 or (self.max_processed_rows != 0 and i < self.max_processed_rows):
if time.time() - t > self.timeout:
log("Timeout")
break
i += 1
if i % 50000 == 0:
log("Processed %d rows..." % i)
row = cur.fetchone()
if row is None:
break
ea = str(row["ea"])
name1 = row["name1"]
ea2 = row["ea2"]
name2 = row["name2"]
desc = row["description"]
pseudo1 = row["pseudo1"]
pseudo2 = row["pseudo2"]
asm1 = row["asm1"]
asm2 = row["asm2"]
ast1 = row["pseudo_primes1"]
ast2 = row["pseudo_primes2"]
bb1 = int(row["bb1"])
bb2 = int(row["bb2"])
md1 = row["md1"]
md2 = row["md2"]
if name1 in self.matched1 or name2 in self.matched2:
continue
r = self.check_ratio(ast1, ast2, pseudo1, pseudo2, asm1, asm2, md1, md2)
if debug:
print("0x%x 0x%x %d" % (int(ea), int(ea2), r))
should_add = True
if self.hooks is not None:
if 'on_match' in dir(self.hooks):
d1 = {"ea": ea, "bb": bb1, "name": name1, "ast": ast1, "pseudo": pseudo1, "asm": asm1, "md": md1}
d2 = {"ea": ea, "bb": bb2, "name": name2, "ast": ast2, "pseudo": pseudo2, "asm": asm2, "md": md2}
should_add, r = self.hooks.on_match(d1, d2, desc, r)
if not should_add or name1 in self.matched1 or name2 in self.matched2:
continue
if r == 1.0:
self.best_chooser.add_item(CChooser.Item(ea, name1, ea2, name2, desc, r, bb1, bb2))
self.matched1.add(name1)
self.matched2.add(name2)
elif r >= 0.5:
partial.add_item(CChooser.Item(ea, name1, ea2, name2, desc, r, bb1, bb2))
self.matched1.add(name1)
self.matched2.add(name2)
elif r < 0.5 and unreliable is not None:
unreliable.add_item(CChooser.Item(ea, name1, ea2, name2, desc, r, bb1, bb2))
self.matched1.add(name1)
self.matched2.add(name2)
else:
partial.add_item(CChooser.Item(ea, name1, ea2, name2, desc, r, bb1, bb2))
self.matched1.add(name1)
self.matched2.add(name2)
cur.close()
def add_matches_from_query_ratio_max(self, sql, val):
if self.all_functions_matched():
return
cur = self.db_cursor()
try:
cur.execute(sql)
except:
log("Error: %s" % str(sys.exc_info()[1]))
return
i = 0
t = time.time()
while self.max_processed_rows == 0 or (self.max_processed_rows != 0 and i < self.max_processed_rows):
if time.time() - t > self.timeout:
log("Timeout")
break
i += 1
if i % 50000 == 0:
log("Processed %d rows..." % i)
row = cur.fetchone()
if row is None:
break
ea = str(row["ea"])
name1 = row["name1"]
ea2 = row["ea2"]
name2 = row["name2"]
desc = row["description"]
pseudo1 = row["pseudo1"]
pseudo2 = row["pseudo2"]
asm1 = row["asm1"]
asm2 = row["asm2"]
ast1 = row["pseudo_primes1"]
ast2 = row["pseudo_primes2"]
bb1 = int(row["bb1"])
bb2 = int(row["bb2"])
md1 = row["md1"]
md2 = row["md2"]
if name1 in self.matched1 or name2 in self.matched2:
continue
r = self.check_ratio(ast1, ast2, pseudo1, pseudo2, asm1, asm2, md1, md2)
should_add = True
if self.hooks is not None:
if 'on_match' in dir(self.hooks):
d1 = {"ea": ea, "bb": bb1, "name": name1, "ast": ast1, "pseudo": pseudo1, "asm": asm1, "md": md1}
d2 = {"ea": ea, "bb": bb2, "name": name2, "ast": ast2, "pseudo": pseudo2, "asm": asm2, "md": md2}
should_add, r = self.hooks.on_match(d1, d2, desc, r)
if not should_add or name1 in self.matched1 or name2 in self.matched2:
continue
if r == 1.0:
self.best_chooser.add_item(CChooser.Item(ea, name1, ea2, name2, desc, r, bb1, bb2))
self.matched1.add(name1)
self.matched2.add(name2)
elif r >= 0.5:
self.partial_chooser.add_item(CChooser.Item(ea, name1, ea2, name2, desc, r, bb1, bb2))
self.matched1.add(name1)
self.matched2.add(name2)
elif r < 0.5 and r > val:
self.unreliable_chooser.add_item(CChooser.Item(ea, name1, ea2, name2, desc, r, bb1, bb2))
self.matched1.add(name1)
self.matched2.add(name2)
cur.close()
def add_matches_from_cursor_ratio_max(self, cur, best, partial, val):
if self.all_functions_matched():
return
matches = []
i = 0
t = time.time()
while self.max_processed_rows == 0 or (self.max_processed_rows != 0 and i < self.max_processed_rows):
if time.time() - t > self.timeout:
log("Timeout")
break
i += 1
if i % 50000 == 0:
log("Processed %d rows..." % i)
row = cur.fetchone()
if row is None:
break
ea = str(row["ea"])
name1 = row["name1"]
ea2 = row["ea2"]
name2 = row["name2"]
desc = row["description"]
pseudo1 = row["pseudo1"]
pseudo2 = row["pseudo2"]
asm1 = row["asm1"]
asm2 = row["asm2"]
ast1 = row["pseudo_primes1"]
ast2 = row["pseudo_primes2"]
bb1 = int(row["bb1"])
bb2 = int(row["bb2"])
md1 = row["md1"]
md2 = row["md2"]
if name1 in self.matched1 or name2 in self.matched2:
continue
r = self.check_ratio(ast1, ast2, pseudo1, pseudo2, asm1, asm2, md1, md2)
should_add = True
if self.hooks is not None:
if 'on_match' in dir(self.hooks):
d1 = {"ea": ea, "bb": bb1, "name": name1, "ast": ast1, "pseudo": pseudo1, "asm": asm1, "md": md1}
d2 = {"ea": ea, "bb": bb2, "name": name2, "ast": ast2, "pseudo": pseudo2, "asm": asm2, "md": md2}
should_add, r = self.hooks.on_match(d1, d2, desc, r)
if not should_add or name1 in self.matched1 or name2 in self.matched2:
continue
good_ratio = False
if r == 1.0:
item = CChooser.Item(ea, name1, ea2, name2, desc, r, bb1, bb2)
good_ratio = True
self.best_chooser.add_item(item)
self.matched1.add(name1)
self.matched2.add(name2)
elif r > val:
item = CChooser.Item(ea, name1, ea2, name2, desc, r, bb1, bb2)
good_ratio = True
best.add_item(item)
self.matched1.add(name1)
self.matched2.add(name2)
elif partial is not None:
item = CChooser.Item(ea, name1, ea2, name2, desc, r, bb1, bb2)
good_ratio = True
partial.add_item(item)
self.matched1.add(name1)
self.matched2.add(name2)
if good_ratio:
matches.append([0, "0x%x" % int(ea), name1, ea2, name2])
return matches
def add_matches_from_query(self, sql, choose):
""" Warning: use this *only* if the ratio is known to be 1.00 """
if self.all_functions_matched():
return
cur = self.db_cursor()
try:
cur.execute(sql)
except:
log("Error: %s" % str(sys.exc_info()[1]))
return
i = 0
while 1:
i += 1
if i % 1000 == 0:
log("Processed %d rows..." % i)
row = cur.fetchone()
if row is None:
break
ea = str(row["ea"])
name1 = row["name1"]
ea2 = str(row["ea2"])
name2 = row["name2"]
desc = row["description"]
pseudo1 = row["pseudo1"]
pseudo2 = row["pseudo2"]
asm1 = row["asm1"]
asm2 = row["asm2"]
ast1 = row["pseudo_primes1"]
ast2 = row["pseudo_primes2"]
bb1 = int(row["bb1"])
bb2 = int(row["bb2"])
md1 = row["md1"]
md2 = row["md2"]
if name1 in self.matched1 or name2 in self.matched2:
continue
should_add = True
if self.hooks is not None:
if 'on_match' in dir(self.hooks):
d1 = {"ea": ea, "bb": bb1, "name": name1, "ast": ast1, "pseudo": pseudo1, "asm": asm1, "md": md1}
d2 = {"ea": ea, "bb": bb2, "name": name2, "ast": ast2, "pseudo": pseudo2, "asm": asm2, "md": md2}
should_add, r = self.hooks.on_match(d1, d2, desc, 1.0)
if not should_add or name1 in self.matched1 or name2 in self.matched2:
continue
choose.add_item(CChooser.Item(ea, name1, ea2, name2, desc, 1, bb1, bb2))
self.matched1.add(name1)
self.matched2.add(name2)
cur.close()
def search_small_differences(self, choose):
cur = self.db_cursor()
# Same basic blocks, edges, mnemonics, etc... but different names
sql = """ select distinct f.address ea, f.name name1, df.name name2,
f.names f_names, df.names df_names, df.address ea2,
f.nodes bb1, df.nodes bb2,
f.pseudocode pseudo1, df.pseudocode pseudo2,
f.assembly asm1, df.assembly asm2,
f.pseudocode_primes pseudo_primes1, df.pseudocode_primes pseudo_primes2,
cast(f.md_index as real) md1, cast(df.md_index as real) md2
from functions f,
diff.functions df
where f.nodes = df.nodes
and f.edges = df.edges
and f.mnemonics = df.mnemonics
and f.cyclomatic_complexity = df.cyclomatic_complexity
and f.names != '[]'"""
cur.execute(sql)
rows = result_iter(cur)
for row in rows:
ea = str(row["ea"])
name1 = row["name1"]
name2 = row["name2"]
if name1 in self.matched1 or name2 in self.matched2:
continue
bb1 = int(row["bb1"])
bb2 = int(row["bb2"])
s1 = set(json.loads(row["f_names"]))
s2 = set(json.loads(row["df_names"]))
total = max(len(s1), len(s2))
commons = len(s1.intersection(s2))
ratio = (commons * 1.) / total
if ratio >= 0.5:
ea2 = row["ea2"]
pseudo1 = row["pseudo1"]
pseudo2 = row["pseudo2"]
asm1 = row["asm1"]
asm2 = row["asm2"]
ast1 = row["pseudo_primes1"]
ast2 = row["pseudo_primes2"]
md1 = row["md1"]
md2 = row["md2"]
desc = "Nodes, edges, complexity and mnemonics with small differences"
should_add = True
if self.hooks is not None:
if 'on_match' in dir(self.hooks):
d1 = {"ea": ea, "bb": bb1, "name": name1, "ast": ast1, "pseudo": pseudo1, "asm": asm1, "md": md1}
d2 = {"ea": ea, "bb": bb2, "name": name2, "ast": ast2, "pseudo": pseudo2, "asm": asm2, "md": md2}
should_add, ratio = self.hooks.on_match(d1, d2, desc, ratio)
if not should_add or name1 in self.matched1 or name2 in self.matched2:
continue
item = CChooser.Item(ea, name1, ea2, name2, desc, ratio, bb1, bb2)
if ratio == 1.0:
self.best_chooser.add_item(item)
else:
choose.add_item(item)
self.matched1.add(name1)
self.matched2.add(name2)
cur.close()
return
def find_same_name(self, choose):
cur = self.db_cursor()
sql = """select f.address ea1, f.mangled_function mangled1,
d.address ea2, f.name name, d.name name2,
d.mangled_function mangled2,
f.pseudocode pseudo1, d.pseudocode pseudo2,
f.assembly asm1, d.assembly asm2,
f.pseudocode_primes primes1,
d.pseudocode_primes primes2,
f.nodes bb1, d.nodes bb2,
cast(f.md_index as real) md1, cast(d.md_index as real) md2
from functions f,
diff.functions d
where (d.mangled_function = f.mangled_function
or d.name = f.name)
and f.name not like 'nullsub_%'"""
desc = "Perfect match, same name"
log_refresh("Finding with heuristic '%s'" % desc)
cur.execute(sql)
rows = cur.fetchall()
cur.close()
if len(rows) > 0 and not self.all_functions_matched():
for row in rows:
ea = row["ea1"]
name = row["mangled1"]
ea2 = row["ea2"]
name1 = row["name"]
name2 = row["name2"]
name2_1 = row["mangled2"]
if name in self.matched1 or name1 in self.matched1 or \
name2 in self.matched2 or name2_1 in self.matched2:
continue
if self.ignore_sub_names and name.startswith("sub_"):
continue
ast1 = row["primes1"]
ast2 = row["primes2"]
bb1 = int(row["bb1"])
bb2 = int(row["bb2"])
pseudo1 = row["pseudo1"]
pseudo2 = row["pseudo2"]
asm1 = row["asm1"]
asm2 = row["asm2"]
md1 = row["md1"]
md2 = row["md2"]
ratio = self.check_ratio(ast1, ast2, pseudo1, pseudo2, asm1, asm2, md1, md2)
should_add = True
if self.hooks is not None:
if 'on_match' in dir(self.hooks):
d1 = {"ea": ea, "bb": bb1, "name": name1, "ast": ast1, "pseudo": pseudo1, "asm": asm1, "md": md1}
d2 = {"ea": ea, "bb": bb2, "name": name2, "ast": ast2, "pseudo": pseudo2, "asm": asm2, "md": md2}
should_add, ratio = self.hooks.on_match(d1, d2, desc, ratio)
if not should_add or name1 in self.matched1 or name2 in self.matched2:
continue
if float(ratio) == 1.0 or (self.relaxed_ratio and md1 != 0 and md1 == md2):
self.best_chooser.add_item(CChooser.Item(ea, name1, ea2, name2, desc, 1, bb1, bb2))
else:
choose.add_item(CChooser.Item(ea, name1, ea2, name2, desc, ratio, bb1, bb2))
self.matched1.add(name)
self.matched1.add(name1)
self.matched2.add(name2)
self.matched2.add(name2_1)
def get_function_id(self, name, primary=True):
cur = self.db_cursor()
rid = None
db_name = "main"
if not primary:
db_name = "diff"
try:
sql = "select id from %s.functions where name = ?" % db_name
cur.execute(sql, (name,))
row = cur.fetchone()
if row:
rid = row["id"]
finally:
cur.close()
return rid
def find_matches_in_hole(self, last, item, row):
cur = self.db_cursor()
try:
postfix = ""
if self.ignore_small_functions:
postfix = " and instructions > 5"
desc = "Call address sequence"
id1 = row["id1"]
id2 = row["id2"]
sql = """ select * from functions where id = ? """ + postfix + """
union all
select * from diff.functions where id = ? """ + postfix
thresold = min(0.6, float(item[5]))
for j in range(0, min(10, id1 - last)):
for i in range(0, min(10, id1 - last)):
cur.execute(sql, (id1+j, id2+i))
rows = cur.fetchall()
if len(rows) == 2:
name1 = rows[0]["name"]
name2 = rows[1]["name"]
if name1 in self.matched1 or name2 in self.matched2:
continue
r = self.check_ratio(rows[0]["pseudocode_primes"], rows[1]["pseudocode_primes"], \
rows[0]["pseudocode"], rows[1]["pseudocode"], \
rows[0]["assembly"], rows[1]["assembly"], \
float(rows[0]["md_index"]), float(rows[1]["md_index"]))
if r < 0.5:
if rows[0]["names"] != "[]" and rows[0]["names"] == rows[1]["names"]:
r = 0.5001
if r > thresold:
ea = rows[0]["address"]
ea2 = rows[1]["address"]
bb1 = rows[0]["nodes"]
bb2 = rows[1]["nodes"]
ast1 = rows[0]["pseudocode_primes"]
ast2 = rows[1]["pseudocode_primes"]
pseudo1 = rows[0]["pseudocode"]
pseudo2 = rows[1]["pseudocode"]
asm1 = rows[0]["assembly"]
asm2 = rows[1]["assembly"]
md1 = rows[0]["md_index"]
md2 = rows[1]["md_index"]
# Pretty much every single heuristic fails with small functions,
# ignore them...
if bb1 <= 3 or bb2 <= 3:
continue
should_add = True
if self.hooks is not None:
if 'on_match' in dir(self.hooks):
d1 = {"ea": ea, "bb": bb1, "name": name1, "ast": ast1, "pseudo": pseudo1, "asm": asm1, "md": md1}
d2 = {"ea": ea, "bb": bb2, "name": name2, "ast": ast2, "pseudo": pseudo2, "asm": asm2, "md": md2}
should_add, r = self.hooks.on_match(d1, d2, desc, r)
if not should_add or name1 in self.matched1 or name2 in self.matched2:
continue
if r == 1:
self.best_chooser.add_item(CChooser.Item(ea, name1, ea2, name2, desc, r, bb1, bb2))
self.matched1.add(name1)
self.matched2.add(name2)
elif r > 0.5:
self.partial_chooser.add_item(CChooser.Item(ea, name1, ea2, name2, desc, r, bb1, bb2))
self.matched1.add(name1)
self.matched2.add(name2)
else:
self.unreliable_chooser.add_item(CChooser.Item(ea, name1, ea2, name2, desc, r, bb1, bb2))
self.matched1.add(name1)
self.matched2.add(name2)
finally:
cur.close()
def find_from_matches(self, the_items):
# XXX: FIXME: This is wrong in many ways, but still works... FIX IT!
# Rule 1: if a function A in program P has id X, and function B in
# the same program has id + 1, then, in program P2, function B maybe
# the next function to A in P2.
log_refresh("Finding with heuristic 'Call address sequence'")
cur = self.db_cursor()
try:
# Create a copy of all the functions
cur.execute("create temporary table best_matches (id, id1, ea1, name1, id2, ea2, name2)")
# Insert each matched function into the temporary table
i = 0
for match in the_items:
ea1 = match[1]
name1 = match[2]
ea2 = match[3]
name2 = match[4]
ratio = float(match[5])
if ratio < 0.5:
continue
id1 = self.get_function_id(name1)
id2 = self.get_function_id(name2, False)
sql = """insert into best_matches (id, id1, ea1, name1, id2, ea2, name2)
values (?, ?, ?, ?, ?, ?, ?)"""
cur.execute(sql, (i, id1, str(ea1), name1, id2, str(ea2), name2))
i += 1
last = None
cur.execute("select * from best_matches order by id1 asc")
for row in cur:
row_id = row["id1"]
if last is None or last+1 == row_id:
last = row_id
continue
item = the_items[row["id"]]
self.find_matches_in_hole(last, item, row)
last = row_id
cur.execute("drop table best_matches")
finally:
cur.close()
def find_callgraph_matches(self):
best_items = list(self.best_chooser.items)
self.find_callgraph_matches_from(best_items, 0.60)
partial_items = list(self.partial_chooser.items)
self.find_callgraph_matches_from(partial_items, 0.80)
def find_callgraph_matches_from(self, the_items, min_value):
sql = """select distinct f.address ea, f.name name1, df.address ea2, df.name name2,
'Callgraph match (%s)' description,
f.pseudocode pseudo1, df.pseudocode pseudo2,
f.assembly asm1, df.assembly asm2,
f.pseudocode_primes pseudo_primes1, df.pseudocode_primes pseudo_primes2,
f.nodes bb1, df.nodes bb2,
cast(f.md_index as real) md1, cast(df.md_index as real) md2,
df.tarjan_topological_sort, df.strongly_connected_spp
from functions f,
diff.functions df
where f.address in (%s)
and df.address in (%s)
and f.name not like 'nullsub_%%'
and df.name not like 'nullsub_%%'
and abs(f.md_index - df.md_index) < 1
and ((f.nodes > 5 and df.nodes > 5)
or (f.instructions > 10 and df.instructions > 10))"""
main_callers_sql = """select address from main.callgraph where func_id = ? and type = ?"""
diff_callers_sql = """select address from diff.callgraph where func_id = ? and type = ?"""
cur = self.db_cursor()
dones = set()
prev_best_matches = len(self.best_chooser.items)
prev_part_matches = len(self.partial_chooser.items)
total_dones = 0
while len(the_items) > 0:
total_dones += 1
if total_dones % 1000 == 0:
log("Processed %d callgraph matches..." % total_dones)
curr_best_matches = len(self.best_chooser.items)
curr_part_matches = len(self.partial_chooser.items)
fmt = "Queued item(s) %d, Best matches %d, Partial Matches %d (Previously %d and %d)"
log(fmt % (len(the_items), curr_best_matches, curr_part_matches, prev_best_matches, prev_part_matches))
match = the_items.pop()
ea1 = match[1]
name1 = match[2]
name2 = match[4]
if ea1 in dones:
continue
dones.add(ea1)
id1 = self.get_function_id(name1)
id2 = self.get_function_id(name2, False)
for call_type in ['caller', 'callee']:
cur.execute(main_callers_sql, (id1, call_type))
main_address_set = set()
for row in cur.fetchall():
main_address_set.add("'%s'" % row[0])
cur.execute(diff_callers_sql, (id2, call_type))
diff_address_set = set()
for row in cur.fetchall():
diff_address_set.add("'%s'" % row[0])
if len(main_address_set) > 0 and len(diff_address_set) > 0:
tname1 = name1.replace("'", "''")
tname2 = name2.replace("'", "''")
cur.execute(sql % (("%s of %s/%s" % (call_type, tname1, tname2)), ",".join(main_address_set), ",".join(diff_address_set)))
matches = self.add_matches_from_cursor_ratio_max(cur, self.partial_chooser, None, min_value)
if matches is not None and len(matches) > 0 and self.unreliable:
the_items.extend(matches)
def find_matches_parallel(self):
self.run_heuristics_for_category("Partial")
# Search using some of the previous criterias but calculating the
# edit distance
log_refresh("Finding with heuristic 'Small names difference'")
self.search_small_differences(self.partial_chooser)
def find_brute_force(self):
cur = self.db_cursor()
sql = "create temp table unmatched(id integer null primary key, address, main)"
cur.execute(sql)
# Find functions not matched in the primary database
sql = "select name, address from functions"
cur.execute(sql)
rows = cur.fetchall()
if len(rows) > 0:
for row in rows:
name = row["name"]
if name not in self.matched1:
ea = row[1]
sql = "insert into unmatched(address,main) values(?,?)"
cur.execute(sql, (ea, 1))
# Find functions not matched in the secondary database
sql = "select name, address from diff.functions"
cur.execute(sql)
rows = cur.fetchall()
if len(rows) > 0:
for row in rows:
name = row["name"]
if name not in self.matched2:
ea = row[1]
sql = "insert into unmatched(address,main) values(?,?)"
cur.execute(sql, (ea, 0))
cur.close()
cur = self.db_cursor()
sql = """select distinct f.address ea, f.name name1, df.address ea2, df.name name2,
'Brute forcing' description,
f.pseudocode pseudo1, df.pseudocode pseudo2,
f.assembly asm1, df.assembly asm2,
f.pseudocode_primes pseudo_primes1, df.pseudocode_primes pseudo_primes2,
f.nodes bb1, df.nodes bb2,
cast(f.md_index as real) md1, cast(df.md_index as real) md2,
df.tarjan_topological_sort, df.strongly_connected_spp
from functions f,
diff.functions df,
unmatched um
where ((f.address = um.address and um.main = 1)
or (df.address = um.address and um.main = 0))
and ((f.md_index = df.md_index
and f.md_index > 1 and df.md_index > 1)
or (f.kgh_hash = df.kgh_hash
and f.kgh_hash > 7 and df.kgh_hash > 7))"""
cur.execute(sql)
log_refresh("Finding via brute-forcing...")
self.add_matches_from_cursor_ratio_max(cur, self.unreliable_chooser, None, 0.5)
def find_experimental_matches(self):
self.run_heuristics_for_category("Experimental")
# Find using brute-force
log_refresh("Brute-forcing...")
self.find_brute_force()
def find_unreliable_matches(self):
self.run_heuristics_for_category("Unreliable")
def find_unmatched(self):
cur = self.db_cursor()
sql = "select name, address from functions"
cur.execute(sql)
rows = cur.fetchall()
if len(rows) > 0:
choose = self.chooser("Unmatched in secondary", self, False)
for row in rows:
name = row["name"]
if name not in self.matched1:
ea = row[1]
choose.add_item(CChooser.Item(ea, name))
self.unmatched_second = choose
sql = "select name, address from diff.functions"
cur.execute(sql)
rows = cur.fetchall()
if len(rows) > 0:
choose = self.chooser("Unmatched in primary", self, False)
for row in rows:
name = row["name"]
if name not in self.matched2:
ea = row["address"]
choose.add_item(CChooser.Item(ea, name))
self.unmatched_primary = choose
cur.close()
def create_choosers(self):
self.unreliable_chooser = self.chooser("Unreliable matches", self)
self.partial_chooser = self.chooser("Partial matches", self)
self.best_chooser = self.chooser("Best matches", self)
self.unmatched_second = self.chooser("Unmatched in secondary", self, False)
self.unmatched_primary = self.chooser("Unmatched in primary", self, False)
def save_results(self, filename):
if os.path.exists(filename):
os.remove(filename)
log("Previous diff results '%s' removed." % filename)
results_db = sqlite3.connect(filename, check_same_thread=True)
results_db.text_factory = str
cur = results_db.cursor()
try:
sql = "create table config (main_db text, diff_db text, version text, date text)"
cur.execute(sql)
sql = "insert into config values (?, ?, ?, ?)"
cur.execute(sql, (self.db_name, self.last_diff_db, VERSION_VALUE, time.asctime()))
sql = "create table results (type, line, address, name, address2, name2, ratio, bb1, bb2, description)"
cur.execute(sql)
sql = "create unique index uq_results on results(address, address2)"
cur.execute(sql)
sql = "create table unmatched (type, line, address, name)"
cur.execute(sql)
with results_db:
results_sql = "insert or ignore into results values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"
unmatched_sql = "insert into unmatched values (?, ?, ?, ?)"
for item in self.best_chooser.items:
l = list(item)
l.insert(0, 'best')
cur.execute(results_sql, l)
for item in self.partial_chooser.items:
l = list(item)
l.insert(0, 'partial')
cur.execute(results_sql, l)
for item in self.unreliable_chooser.items:
l = list(item)
l.insert(0, 'unreliable')
cur.execute(results_sql, l)
for item in self.unmatched_primary.items:
l = list(item)
l.insert(0, 'primary')
cur.execute(unmatched_sql, l)
for item in self.unmatched_second.items:
l = list(item)
l.insert(0, 'secondary')
cur.execute(unmatched_sql, l)
log("Diffing results saved in file '%s'." % filename)
finally:
cur.close()
results_db.close()
def try_attach(self, cur, db):
try:
cur.execute('attach "%s" as diff' % db)
except:
pass
def diff(self, db):
self.last_diff_db = db
cur = self.db_cursor()
self.try_attach(cur, db)
try:
cur.execute("select value from diff.version")
except:
log("Error: %s " % sys.exc_info()[1])
log("The selected file does not look like a valid Diaphora exported database!")
cur.close()
return False
row = cur.fetchone()
if not row:
log("Invalid database!")
return False
if row["value"] != VERSION_VALUE:
log("WARNING: The database is from a different version (current %s, database %s)!" % (VERSION_VALUE, row[0]))
try:
t0 = time.time()
log_refresh("Diffing...", True)
self.do_continue = True
if self.equal_db():
log("The databases seems to be 100% equal")
if self.do_continue:
# Compare the call graphs
self.check_callgraph()
if self.project_script is not None:
log("Loading project specific Python script...")
if not self.load_hooks():
return False
# Find the unmodified functions
log_refresh("Finding best matches...")
self.find_equal_matches_parallel()
# Find the modified functions
log_refresh("Finding partial matches")
self.find_matches_parallel()
# Call address sequence heuristic
self.find_from_matches(self.best_chooser.items)
if self.slow_heuristics:
# Find the functions from the callgraph
log_refresh("Finding with heuristic 'Callgraph matches'")
self.find_callgraph_matches()
if self.unreliable:
# Find using likely unreliable methods modified functions
log_refresh("Finding probably unreliable matches")
self.find_unreliable_matches()
if self.experimental:
# Find using experimental methods modified functions
log_refresh("Finding experimental matches")
self.find_from_matches(self.partial_chooser.items)
self.find_experimental_matches()
# Show the list of unmatched functions in both databases
log_refresh("Finding unmatched functions")
self.find_unmatched()
if self.hooks is not None:
if 'on_finish' in dir(self.hooks):
self.hooks.on_finish()
log("Done. Took {} seconds.".format(time.time() - t0))
finally:
cur.close()
return True
if __name__ == "__main__":
version_info = sys.version_info
if version_info[0] == 2:
log("WARNING: You are using Python 2 instead of Python 3. The main branch of Diaphora works exclusively with Python 3.")
log("TIP: There are other branches that contain backward compatability.")
do_diff = True
if os.getenv("DIAPHORA_AUTO_DIFF") is not None:
db1 = os.getenv("DIAPHORA_DB1")
if db1 is None:
raise Exception("No database file specified!")
db2 = os.getenv("DIAPHORA_DB2")
if db2 is None:
raise Exception("No database file to diff against specified!")
diff_out = os.getenv("DIAPHORA_DIFF_OUT")
if diff_out is None:
raise Exception("No output file for diff specified!")
elif is_ida:
diaphora_dir = os.path.dirname(__file__)
script = os.path.join(diaphora_dir, "diaphora_ida.py")
exec(compile(open(script, "rb").read(), script, 'exec'))
do_diff = False
else:
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("db1")
parser.add_argument("db2")
parser.add_argument("-o", "--outfile", help="Write output to <outfile>")
args = parser.parse_args()
db1 = args.db1
db2 = args.db2
if args.outfile:
diff_out = args.outfile
else:
diff_out = "{}_vs_{}.diaphora".format(
os.path.basename(os.path.splitext(db1)[0]),
os.path.basename(os.path.splitext(db2)[0]))
if do_diff:
bd = CBinDiff(db1)
if not is_ida:
bd.ignore_all_names = False
bd.db = sqlite3.connect(db1, check_same_thread=True)
bd.db.text_factory = str
bd.db.row_factory = sqlite3.Row
bd.diff(db2)
bd.save_results(diff_out)
| agpl-3.0 | -3,937,475,404,436,159,500 | 33.746447 | 154 | 0.56652 | false | 3.548843 | false | false | false |
ibm-cds-labs/simple-data-pipe-connector-flightstats | pixiedust_flightpredict/pixiedust_flightpredict/histogramDisplay.py | 1 | 1782 | # -------------------------------------------------------------------------------
# Copyright IBM Corp. 2016
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------------
from pixiedust.display.display import Display
from pixiedust.display.chart.renderers.baseChartDisplay import BaseChartDisplay
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from pixiedust.utils.shellAccess import ShellAccess
class HistogramDisplay(BaseChartDisplay):
def doRender(self, handlerId):
rdd = ShellAccess.sqlContext.sql("select deltaDeparture from training").map(lambda s: s.deltaDeparture)\
.filter(lambda s: s < 50 and s > 12)
histo = rdd.histogram(50)
bins = [i for i in histo[0]]
fig, ax = plt.subplots(figsize=(12,8))
ax.set_ylabel('Number of records')
ax.set_xlabel('Bin')
plt.title('Histogram')
intervals = [abs(j-i) for i,j in zip(bins[:-1], bins[1:])]
values=[sum(intervals[:i]) for i in range(0,len(intervals))]
ax.bar(values, histo[1], intervals, color='b', label = "Bins")
ax.set_xticks(bins[:-1],[int(i) for i in bins[:-1]])
ax.legend()
def doRenderChart(self):
pass
| apache-2.0 | 5,486,381,640,515,526,000 | 40.44186 | 112 | 0.627946 | false | 3.995516 | false | false | false |
openSUSE/vdsm | vdsm/libvirtev.py | 1 | 18363 | #
# Copyright 2011 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
#################################################################################
# Start off by implementing a general purpose event loop for anyones use
#################################################################################
import sys
import getopt
import os
import libvirt
import select
import time
import threading
#
# This general purpose event loop will support waiting for file handle
# I/O and errors events, as well as scheduling repeatable timers with
# a fixed interval.
#
# It is a pure python implementation based around the poll() API
#
class virEventLoopPure:
# This class contains the data we need to track for a
# single file handle
class virEventLoopPureHandle:
def __init__(self, handle, fd, events, cb, opaque):
self.handle = handle
self.fd = fd
self.events = events
self.cb = cb
self.opaque = opaque
def get_id(self):
return self.handle
def get_fd(self):
return self.fd
def get_events(self):
return self.events
def set_events(self, events):
self.events = events
def dispatch(self, events):
self.cb(self.handle,
self.fd,
events,
self.opaque[0],
self.opaque[1])
# This class contains the data we need to track for a
# single periodic timer
class virEventLoopPureTimer:
def __init__(self, timer, interval, cb, opaque):
self.timer = timer
self.interval = interval
self.cb = cb
self.opaque = opaque
self.lastfired = 0
def get_id(self):
return self.timer
def get_interval(self):
return self.interval
def set_interval(self, interval):
self.interval = interval
def get_last_fired(self):
return self.lastfired
def set_last_fired(self, now):
self.lastfired = now
def dispatch(self):
self.cb(self.timer,
self.opaque[0],
self.opaque[1])
def __init__(self, debug=False):
self.debugOn = debug
self.poll = select.poll()
self.pipetrick = os.pipe()
self.pendingWakeup = False
self.runningPoll = False
self.nextHandleID = 1
self.nextTimerID = 1
self.handles = []
self.timers = []
self.quit = False
# The event loop can be used from multiple threads at once.
# Specifically while the main thread is sleeping in poll()
# waiting for events to occur, another thread may come along
# and add/update/remove a file handle, or timer. When this
# happens we need to interrupt the poll() sleep in the other
# thread, so that it'll see the file handle / timer changes.
#
# Using OS level signals for this is very unreliable and
# hard to implement correctly. Thus we use the real classic
# "self pipe" trick. A anonymous pipe, with one end registered
# with the event loop for input events. When we need to force
# the main thread out of a poll() sleep, we simple write a
# single byte of data to the other end of the pipe.
self.debug("Self pipe watch %d write %d" %(self.pipetrick[0], self.pipetrick[1]))
self.poll.register(self.pipetrick[0], select.POLLIN)
def debug(self, msg):
if self.debugOn:
print msg
# Calculate when the next timeout is due to occurr, returning
# the absolute timestamp for the next timeout, or 0 if there is
# no timeout due
def next_timeout(self):
next = 0
for t in self.timers:
last = t.get_last_fired()
interval = t.get_interval()
if interval < 0:
continue
if next == 0 or (last + interval) < next:
next = last + interval
return next
# Lookup a virEventLoopPureHandle object based on file descriptor
def get_handle_by_fd(self, fd):
for h in self.handles:
if h.get_fd() == fd:
return h
return None
# Lookup a virEventLoopPureHandle object based on its event loop ID
def get_handle_by_id(self, handleID):
for h in self.handles:
if h.get_id() == handleID:
return h
return None
# This is the heart of the event loop, performing one single
# iteration. It asks when the next timeout is due, and then
# calcuates the maximum amount of time it is able to sleep
# for in poll() pending file handle events.
#
# It then goes into the poll() sleep.
#
# When poll() returns, there will zero or more file handle
# events which need to be dispatched to registered callbacks
# It may also be time to fire some periodic timers.
#
# Due to the coarse granularity of schedular timeslices, if
# we ask for a sleep of 500ms in order to satisfy a timer, we
# may return upto 1 schedular timeslice early. So even though
# our sleep timeout was reached, the registered timer may not
# technically be at its expiry point. This leads to us going
# back around the loop with a crazy 5ms sleep. So when checking
# if timeouts are due, we allow a margin of 20ms, to avoid
# these pointless repeated tiny sleeps.
def run_once(self):
sleep = -1
self.runningPoll = True
next = self.next_timeout()
self.debug("Next timeout due at %d" % next)
if next > 0:
now = int(time.time() * 1000)
if now >= next:
sleep = 0
else:
sleep = (next - now) / 1000.0
self.debug("Poll with a sleep of %d" % sleep)
events = self.poll.poll(sleep)
# Dispatch any file handle events that occurred
for (fd, revents) in events:
# See if the events was from the self-pipe
# telling us to wake up. if so, then discard
# the data just continue
if fd == self.pipetrick[0]:
self.pendingWakeup = False
os.read(fd, 1)
continue
h = self.get_handle_by_fd(fd)
if h:
self.debug("Dispatch fd %d handle %d events %d" % (fd, h.get_id(), revents))
h.dispatch(self.events_from_poll(revents))
now = int(time.time() * 1000)
for t in self.timers:
interval = t.get_interval()
if interval < 0:
continue
want = t.get_last_fired() + interval
# Deduct 20ms, since schedular timeslice
# means we could be ever so slightly early
if now >= (want-20):
self.debug("Dispatch timer %d now %s want %s" % (t.get_id(), str(now), str(want)))
t.set_last_fired(now)
t.dispatch()
self.runningPoll = False
# Actually the event loop forever
def run_loop(self):
self.quit = False
while not self.quit:
self.run_once()
def interrupt(self):
if self.runningPoll and not self.pendingWakeup:
self.pendingWakeup = True
os.write(self.pipetrick[1], 'c')
# Registers a new file handle 'fd', monitoring for 'events' (libvirt
# event constants), firing the callback cb() when an event occurs.
# Returns a unique integer identier for this handle, that should be
# used to later update/remove it
def add_handle(self, fd, events, cb, opaque):
handleID = self.nextHandleID + 1
self.nextHandleID = self.nextHandleID + 1
h = self.virEventLoopPureHandle(handleID, fd, events, cb, opaque)
self.handles.append(h)
self.poll.register(fd, self.events_to_poll(events))
self.interrupt()
self.debug("Add handle %d fd %d events %d" % (handleID, fd, events))
return handleID
# Registers a new timer with periodic expiry at 'interval' ms,
# firing cb() each time the timer expires. If 'interval' is -1,
# then the timer is registered, but not enabled
# Returns a unique integer identier for this handle, that should be
# used to later update/remove it
def add_timer(self, interval, cb, opaque):
timerID = self.nextTimerID + 1
self.nextTimerID = self.nextTimerID + 1
h = self.virEventLoopPureTimer(timerID, interval, cb, opaque)
self.timers.append(h)
self.interrupt()
self.debug("Add timer %d interval %d" % (timerID, interval))
return timerID
# Change the set of events to be monitored on the file handle
def update_handle(self, handleID, events):
h = self.get_handle_by_id(handleID)
if h:
h.set_events(events)
self.poll.unregister(h.get_fd())
self.poll.register(h.get_fd(), self.events_to_poll(events))
self.interrupt()
self.debug("Update handle %d fd %d events %d" % (handleID, h.get_fd(), events))
# Change the periodic frequency of the timer
def update_timer(self, timerID, interval):
for h in self.timers:
if h.get_id() == timerID:
h.set_interval(interval);
self.interrupt()
self.debug("Update timer %d interval %d" % (timerID, interval))
break
# Stop monitoring for events on the file handle
def remove_handle(self, handleID):
handles = []
for h in self.handles:
if h.get_id() == handleID:
self.poll.unregister(h.get_fd())
self.debug("Remove handle %d fd %d" % (handleID, h.get_fd()))
else:
handles.append(h)
self.handles = handles
self.interrupt()
# Stop firing the periodic timer
def remove_timer(self, timerID):
timers = []
for h in self.timers:
if h.get_id() != timerID:
timers.append(h)
self.debug("Remove timer %d" % timerID)
self.timers = timers
self.interrupt()
# Convert from libvirt event constants, to poll() events constants
def events_to_poll(self, events):
ret = 0
if events & libvirt.VIR_EVENT_HANDLE_READABLE:
ret |= select.POLLIN
if events & libvirt.VIR_EVENT_HANDLE_WRITABLE:
ret |= select.POLLOUT
if events & libvirt.VIR_EVENT_HANDLE_ERROR:
ret |= select.POLLERR;
if events & libvirt.VIR_EVENT_HANDLE_HANGUP:
ret |= select.POLLHUP;
return ret
# Convert from poll() event constants, to libvirt events constants
def events_from_poll(self, events):
ret = 0;
if events & select.POLLIN:
ret |= libvirt.VIR_EVENT_HANDLE_READABLE;
if events & select.POLLOUT:
ret |= libvirt.VIR_EVENT_HANDLE_WRITABLE;
if events & select.POLLNVAL:
ret |= libvirt.VIR_EVENT_HANDLE_ERROR;
if events & select.POLLERR:
ret |= libvirt.VIR_EVENT_HANDLE_ERROR;
if events & select.POLLHUP:
ret |= libvirt.VIR_EVENT_HANDLE_HANGUP;
return ret;
###########################################################################
# Now glue an instance of the general event loop into libvirt's event loop
###########################################################################
# This single global instance of the event loop wil be used for
# monitoring libvirt events
eventLoop = virEventLoopPure(debug=False)
# This keeps track of what thread is running the event loop,
# (if it is run in a background thread)
eventLoopThread = None
# These next set of 6 methods are the glue between the official
# libvirt events API, and our particular impl of the event loop
#
# There is no reason why the 'virEventLoopPure' has to be used.
# An application could easily may these 6 glue methods hook into
# another event loop such as GLib's, or something like the python
# Twisted event framework.
def virEventAddHandleImpl(fd, events, cb, opaque):
global eventLoop
return eventLoop.add_handle(fd, events, cb, opaque)
def virEventUpdateHandleImpl(handleID, events):
global eventLoop
return eventLoop.update_handle(handleID, events)
def virEventRemoveHandleImpl(handleID):
global eventLoop
return eventLoop.remove_handle(handleID)
def virEventAddTimerImpl(interval, cb, opaque):
global eventLoop
return eventLoop.add_timer(interval, cb, opaque)
def virEventUpdateTimerImpl(timerID, interval):
global eventLoop
return eventLoop.update_timer(timerID, interval)
def virEventRemoveTimerImpl(timerID):
global eventLoop
return eventLoop.remove_timer(timerID)
# This tells libvirt what event loop implementation it
# should use
def virEventLoopPureRegister():
libvirt.virEventRegisterImpl(virEventAddHandleImpl,
virEventUpdateHandleImpl,
virEventRemoveHandleImpl,
virEventAddTimerImpl,
virEventUpdateTimerImpl,
virEventRemoveTimerImpl)
# Directly run the event loop in the current thread
def virEventLoopPureRun():
global eventLoop
eventLoop.run_loop()
# Spawn a background thread to run the event loop
def virEventLoopPureStart():
global eventLoopThread
virEventLoopPureRegister()
eventLoopThread = threading.Thread(target=virEventLoopPureRun, name="libvirtEventLoop")
eventLoopThread.setDaemon(True)
eventLoopThread.start()
##########################################################################
# Everything that now follows is a simple demo of domain lifecycle events
##########################################################################
def eventToString(event):
eventStrings = ( "Added",
"Removed",
"Started",
"Suspended",
"Resumed",
"Stopped",
"Saved",
"Restored" );
return eventStrings[event];
def myDomainEventCallback1 (conn, dom, event, detail, opaque):
print "myDomainEventCallback1 EVENT: Domain %s(%s) %s %d" % (dom.name(), dom.ID(), eventToString(event), detail)
def myDomainEventCallback2 (conn, dom, event, detail, opaque):
print "myDomainEventCallback2 EVENT: Domain %s(%s) %s %d" % (dom.name(), dom.ID(), eventToString(event), detail)
def myDomainEventRebootCallback(conn, dom, opaque):
print "myDomainEventRebootCallback: Domain %s(%s)" % (dom.name(), dom.ID())
def myDomainEventRTCChangeCallback(conn, dom, utcoffset, opaque):
print "myDomainEventRTCChangeCallback: Domain %s(%s) %d" % (dom.name(), dom.ID(), utcoffset)
def myDomainEventWatchdogCallback(conn, dom, action, opaque):
print "myDomainEventWatchdogCallback: Domain %s(%s) %d" % (dom.name(), dom.ID(), action)
def myDomainEventIOErrorCallback(conn, dom, srcpath, devalias, action, opaque):
print "myDomainEventIOErrorCallback: Domain %s(%s) %s %s %d" % (dom.name(), dom.ID(), srcpath, devalias, action)
def myDomainEventGraphicsCallback(conn, dom, phase, localAddr, remoteAddr, authScheme, subject, opaque):
print "myDomainEventGraphicsCallback: Domain %s(%s) %d %s" % (dom.name(), dom.ID(), phase, authScheme)
def usage():
print "usage: "+os.path.basename(sys.argv[0])+" [uri]"
print " uri will default to qemu:///system"
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "h", ["help"] )
except getopt.GetoptError, err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
usage()
sys.exit(2)
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
if len(sys.argv) > 1:
uri = sys.argv[1]
else:
uri = "qemu:///system"
print "Using uri:" + uri
# Run a background thread with the event loop
virEventLoopPureStart()
vc = libvirt.open(uri)
# Close connection on exit (to test cleanup paths)
old_exitfunc = getattr(sys, 'exitfunc', None)
def exit():
print "Closing " + str(vc)
vc.close()
if (old_exitfunc): old_exitfunc()
sys.exitfunc = exit
#Add 2 callbacks to prove this works with more than just one
vc.domainEventRegister(myDomainEventCallback1,None)
vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE, myDomainEventCallback2, None)
vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_REBOOT, myDomainEventRebootCallback, None)
vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_RTC_CHANGE, myDomainEventRTCChangeCallback, None)
vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_IO_ERROR, myDomainEventIOErrorCallback, None)
vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_WATCHDOG, myDomainEventWatchdogCallback, None)
vc.domainEventRegisterAny(None, libvirt.VIR_DOMAIN_EVENT_ID_GRAPHICS, myDomainEventGraphicsCallback, None)
# The rest of your app would go here normally, but for sake
# of demo we'll just go to sleep. The other option is to
# run the event loop in your main thread if your app is
# totally event based.
while 1:
time.sleep(1)
if __name__ == "__main__":
main()
| gpl-2.0 | -3,574,015,940,346,871,300 | 35.218935 | 116 | 0.608506 | false | 4.059019 | false | false | false |
jnez71/navboxplus | demos/demo_motor.py | 1 | 6671 | #!/usr/bin/env python
"""
Using navboxplus to perfectly control a motor sensed with only a cheap encoder.
Model-augmented state is: [position, velocity, drag/inertia, b/inertia, disturbance].
"""
from __future__ import division
import numpy as np; npl = np.linalg
import matplotlib.pyplot as plt
from navboxplus import NavBoxPlus
# Motor dynamics
def motor(x, u, wf, dt):
xdot = np.array([x[1],
x[4] + x[3]*u - x[2]*x[1],
0, 0, 0]) # parameters "don't change" (we assume)
xnext = x + xdot*dt + wf
if xnext[2] < 0.5: xnext[2] = 0.5 # prevent parameter drift into nonphysical
if xnext[3] < 0.5: xnext[3] = 0.5
return xnext
# Encoder model (only noise in the form of discretization)
res = 512/360 # ticks/deg
z_per_t = 20 # samples/s
def encoder(x, u, wh):
return np.floor(res*x[0])
# True noise characteristics
wf0_true = np.array([0, 0, 0, 0, 0])
Cf_true = np.diag([0, 0, 1E-3, 1E-6, 0])
# Our guesses at the dynamics and sensor noise characteristics
# We cannot express any perfect confidence
wf0 = np.zeros(5)
Cf = np.diag([1E-7, 1E-4, 1E-3, 1E-6, 1E-2]) # disturbance is not really constant
wh0 = 0
Ch = 1 # because the encoder discretization acts like noise
# Simulation time domain (also chooses predict frequency)
T = 40 # s
dt = 0.05 # s
t = np.arange(0, T, dt) # s
i_per_z = int(1/(z_per_t*dt)) # iters/sample
assert 1/z_per_t >= dt # time between samples >= sim timestep ?
# Desired trajectory
# r = [180, 0] * np.ones((len(t), 2)) # setpoint, not much excitation information
rv = 0.5
r = 15*np.vstack((np.sin(rv*t), rv*np.cos(rv*t))).T # sinusoid, good excitation
# Unknown external disturbance (tracked as a state)
dist = 8*np.ones_like(t); dist[:len(t)//2] = 0 # sudden push
# dist = 3*np.cos(2*rv*(t+2)) + 3 # sinusoid
# Controller with feedback and feedforward based on estimated model
ulims = (-50, 50)
gains = 5*np.array([1, 1])
feedback = 0; feedforward = 0 # for externally recording these quantities
def controller(r, rnext, x, Cx, dt):
global feedback, feedforward
feedback = gains.dot(r - x[:2])
feedforward = (1/x[3]) * ((rnext[1] - r[1])/dt + x[2]*r[1] - x[4])
return np.clip(feedback + feedforward, ulims[0], ulims[1])
# State, estimate, covariance, measurement, and effort timeseries
x = np.zeros((len(t), 5))
xh = np.zeros((len(t), 5))
Cx = np.zeros((len(t), 5, 5))
z = np.zeros((len(t), 1))
u = np.zeros((len(t), 1))
uff = np.zeros((len(t), 1))
# Initial conditions
x[0] = [15, 0, 5, 2, dist[0]]
xh[0] = [-15, 10, 1, 1, 0]
Cx[0] = 10*np.eye(5)
u[0] = 0
uff[0] = 0
# Configure navboxplus
# (note that we will give a "smoothed" encoder model to capture its true behavior)
nav = NavBoxPlus(x0=np.copy(xh[0]),
Cx0=np.copy(Cx[0]),
g=controller,
f=motor,
hDict={'encoder': lambda x, u, wh: res*x[0] + wh},
n_r=2,
n_wf=5,
n_whDict={'encoder': 1})
# Simulation
for i, ti in enumerate(t[1:]):
# Chose control and predict next state
try:
u[i+1] = nav.predict(r[i], r[i+1], wf0, Cf, dt)
uff[i+1] = feedforward
except npl.linalg.LinAlgError:
print("Cholesky failed in predict!")
break
# Advance true state using control
wf = np.random.multivariate_normal(wf0_true, Cf_true)
x[i+1] = motor(x[i], u[i+1], wf, dt)
x[i+1, 4] = dist[i+1] # update disturbance
# When new measurement comes in...
if i % i_per_z == 0:
# Get new measurement from real world
z[i+1] = encoder(x[i+1], 0, 0)
# Update state estimate
try:
nav.correct('encoder', z[i+1], wh0, Ch)
except npl.linalg.LinAlgError:
print("Cholesky failed in correct!")
break
# ...otherwise hold last measurement (for plotting only)
else:
z[i+1] = np.copy(z[i])
# Record new estimate
xh[i+1], Cx[i+1] = nav.get_state_and_cov()
# Just checkin...
if not nav.is_pdef(nav.Cx):
print("WOAH your state estimate covariance is not posdef, how'd that happen?\n")
print("Final state estimate covariance:")
print(np.round(nav.Cx, 3))
#### Plots
fig1 = plt.figure()
fig1.suptitle("Estimation and Tracking via Online UKF-Learned Model", fontsize=22)
ax1 = fig1.add_subplot(6, 1, 1)
ax1.plot(t[:i], x[:i, 0], label="true", color='g', lw=3)
ax1.plot(t[:i], xh[:i, 0], label="estimate", color='k', ls=':', lw=3)
ax1.plot(t[:i], r[:i, 0], label="desired", color='r', ls='--')
ax1.set_xlim([0, ti])
ax1.set_ylabel("position\ndeg", fontsize=12)
ax1.legend(loc='upper right')
ax1.grid(True)
ax1 = fig1.add_subplot(6, 1, 2)
ax1.plot(t[:i], x[:i, 1], label="true", color='g', lw=3)
ax1.plot(t[:i], xh[:i, 1], label="estimate", color='k', ls=':', lw=3)
ax1.plot(t[:i], r[:i, 1], label="desired", color='r', ls='--')
ax1.set_xlim([0, ti])
ax1.set_ylabel("velocity\ndeg/s", fontsize=12)
ax1.grid(True)
ax1 = fig1.add_subplot(6, 1, 3)
ax1.plot(t[:i], x[:i, 2], label="true", color='g', lw=3)
ax1.plot(t[:i], xh[:i, 2], label="estimate", color='k', ls=':', lw=3)
ax1.set_xlim([0, ti])
ax1.set_ylabel("drag/inertia\n(deg/s^2)/(deg/s)", fontsize=12)
ax1.grid(True)
ax1 = fig1.add_subplot(6, 1, 4)
ax1.plot(t[:i], x[:i, 3], label="true", color='g', lw=3)
ax1.plot(t[:i], xh[:i, 3], label="estimate", color='k', ls=':', lw=3)
ax1.set_xlim([0, ti])
ax1.set_ylabel("b/inertia\n(deg/s^2)/V", fontsize=12)
ax1.grid(True)
ax1 = fig1.add_subplot(6, 1, 5)
ax1.plot(t[:i], x[:i, 4], label="true", color='g', lw=3)
ax1.plot(t[:i], xh[:i, 4], label="estimate", color='k', ls=':', lw=3)
ax1.set_xlim([0, ti])
ax1.set_ylabel("disturbance\ndeg/s^2", fontsize=12)
ax1.grid(True)
ax1 = fig1.add_subplot(6, 1, 6)
ax1.plot(t[:i], u[:i], label="total", color='r', lw=3)
ax1.plot(t[:i], uff[:i], label="feedforward", color='b', ls='--', lw=2)
ax1.set_xlim([0, ti])
ax1.set_ylabel("effort\nV", fontsize=12)
ax1.set_xlabel("time\ns", fontsize=12)
ax1.legend(loc='upper right')
ax1.grid(True)
fig2 = plt.figure()
fig2.suptitle("Covariance Diagonals", fontsize=22)
ax2 = fig2.add_subplot(1, 1, 1)
dvs = np.array(map(np.diag, Cx[:i]))
for xi in xrange(len(x[0])):
ax2.plot(t[:i], dvs[:, xi], label="State {}".format(xi))
ax2.set_xlim([0, ti])
ax2.set_ylabel("value", fontsize=16)
ax2.set_xlabel("time\ns", fontsize=16)
ax2.legend(loc='upper right')
ax2.grid(True)
fig3 = plt.figure()
fig3.suptitle("Absolute Encoder Measurements", fontsize=22)
ax3 = fig3.add_subplot(1, 1, 1)
ax3.plot(t[:i], z[:i], color='b', lw=2)
ax3.set_xlim([0, ti])
ax3.set_ylabel("ticks", fontsize=16)
ax3.set_xlabel("time\ns", fontsize=16)
ax3.grid(True)
plt.show()
| mit | -1,178,019,511,444,619,000 | 31.383495 | 85 | 0.611902 | false | 2.529769 | false | false | false |
codelieche/codelieche.com | apps/utils/tools/export.py | 1 | 7987 | # -*- coding:utf-8 -*-
"""
导出Model信息
待优化的点:加个filter过滤功能
"""
import time
import xlwt
from django.apps import apps
from django.http.response import HttpResponse
SECRET_FIELDS = ["admin_pwd", "password"]
def field_can_export(field):
"""
判断字段是否可以导出
:param field:
:return:
"""
if field in SECRET_FIELDS:
return False
else:
return True
def get_export_model(app_label, model_name):
"""
得到要导出的Model
:param app_label: app
:param model_name: model 注意大小写不敏感哦
:return: app.models.Model
"""
try:
model = apps.get_model(app_label=app_label, model_name=model_name)
return model
except Exception:
# 如果填写的信息有误,获取不到Model会报错
return None
def get_fields_verbosename(model, fields):
"""
获取字段的名字
:return:
"""
# 1. 获取到model的_meta.fields
model_fields = model._meta.fields
# 2. 获取到字段的verbose_name
fields_names = []
for field in fields:
find_field_flag = False
if "verbose_name" in field:
fields_names.append(field["verbose_name"])
find_field_flag = True
elif "manay" in field and field["many"]:
fields_names.append(field["name"])
find_field_flag = True
else:
for model_field in model_fields:
if model_field.name == field["name"]:
verbose_name = model_field.verbose_name
if verbose_name:
fields_names.append(verbose_name)
else:
fields_names.append(field["name"])
# 跳出循环
find_field_flag = True
break
if not find_field_flag:
raise Exception("没找到{}".format(field["name"]))
# 返回fields_names
return fields_names
def get_obj_fields_data(obj, fields):
"""
获取对象的各字段的值
:param obj:
:param fields:
:return:
"""
values = []
# 对每个字段进行处理
for field in fields:
# 第1步:如果这个字段不能导出,那么我们需要给它内容设置为:保密字段
if not field_can_export(field["name"]):
values.append("保密字段")
continue
# 进入下一个field
# 第2步:开始取出field的数据
# 2-1:得到field的数据,name:字段名称,如果是多对多的字段,需要传个manay
name = field["name"]
many = True if "many" in field and field["many"] else False
# 如果name中有.那么就表示是多级别的
# 比如:article.user.username, 文章用户的用户名
name_split = name.split('.')
length = len(name_split)
# 2-2: 得到第一级的值
value_levl_1 = getattr(obj, name_split[0])
if length > 1:
# 如果length大于1,就表示这个值要取几层
if many:
# 如果是多值的,那么先取出它的QuerySet,用.all()即可
value_levl_1_all = value_levl_1.all()
else:
# 不是多值的,那么把它变成列表,方便,后续迭代
value_levl_1_all = [value_levl_1]
values_list = []
for obj_i in value_levl_1_all:
v = ""
obj_i_tmp = obj_i
# v是最终要得到的值
for f in name_split[1:]:
# f是通过点号分割后的field,比如:article.user.username
try:
v = getattr(obj_i_tmp, f)
if v:
obj_i_tmp = v
except AttributeError:
# print(obj_i_tmp, f)
try:
v = obj_i_tmp.get(f, None)
if v:
obj_i_tmp = v
except Exception:
v = "---"
# 通过for 取到最后一层的field value
if v:
values_list.append(v)
# 把这个值用,连接起来【后续可能要改成可配置,默认用逗号】
if values_list:
value = ",".join(values_list)
else:
value = "---"
else:
# 如果,这个field["name"]通过点分割长度为1,那么直接取它的值
# 注意,没有点,那么就让它都是单值的,many_to_many的,name中请一定配置多级,加个点
value = value_levl_1
value = str(value)
# 把这个这个字段得到的value放入到values中
values.append(value)
# 第3步:返回这对象,这组field的值
return values
def exports_data_to_excel(data, filename=None):
"""
导出数据到excel表格中
:param data:
:param filename: 文件名
:return: response
"""
# 第1步:先创建个工作簿
wbook = xlwt.Workbook(encoding="utf-8", style_compression=0)
# 第2步:添加个工作表
wsheet = wbook.add_sheet(sheetname="导出数据")
row = 0
for line in data:
colum = 0
for value in line:
wsheet.write(row, colum, str(value))
colum += 1
row += 1
if not filename:
# 如果没有传文件名,就自动创建个
filename = "{}.xls".format(time.strftime("%Y%m%d%H%M%S"))
# 写入到文件
# wbook.save(filename_or_stream=filename)
# 写入到Response中
# 把要导出的内容写入到response中
response = HttpResponse(content_type='application/ms-excel')
response['Content-Disposition'] = 'attachment; filename="{}"'.format(filename)
wbook.save(filename_or_stream=response)
return response
def get_export_data(app_label, model_name, fields, filters=None):
"""
得到要导出的数据
:param app_label:
:param model_name:
:param fields: 字段列表
:param filters: 过滤列表
:return:
"""
# 第1步:先得到Model
model = get_export_model(app_label, model_name)
if not model:
return False
# 第2步:开始获取Model的数据
# 2-1: 先获取到满足条件的对象
objs = model.objects.all()
# 2-2:处理fields的verbose_name信息
fields_verbose_name_list = get_fields_verbosename(model=model, fields=fields)
# print(fields_verbose_name_list)
# 2-3: 处理filters信息
# [{"name": "id", flag: "__lt", value: ""}]
if isinstance(filters, list):
kwargs = {}
for _filter in filters:
filter_name = _filter["name"]
if _filter["flag"]:
filter_name += _filter["flag"]
filter_value = _filter["value"]
# 把这个过滤的字段,加入到kwargs中
kwargs[filter_name] = filter_value
objs = objs.filter(**kwargs)
data = [fields_verbose_name_list]
# 2-3:处理每个对象的数据
for obj in objs:
values = get_obj_fields_data(obj, fields)
# print(values)
data.append(values)
# 第3步:把数据写入到excel中
# print(data)
response = exports_data_to_excel(data)
return response
def test_export():
# 测试导出用户信息
app = "account"
model = "UserProfile"
fields = [
{"name": "id"},
{"name": "username", "verbose_name": "用户名"},
{"name": "nick_name", "verbose_name": "昵称"},
{"name": "last_login"},
{"name": "groups.name", "many": True, "verbose_name": "组"},
]
filters = [
{"name": "id", "flag": "__lt", "value": 15}
]
return get_export_data(app, model, fields, filters)
| mit | 1,785,901,874,491,865,000 | 26.023715 | 82 | 0.523036 | false | 2.738086 | false | false | false |
chrisfromthelc/python-rake | rake_example.py | 1 | 4417 | from __future__ import absolute_import
from __future__ import print_function
import six
import rake
import operator
import io
import csv
import os
import MySQLdb
import collections
import gc
import time
from os import system
import formatter
import htmllib
import cStringIO
# Pull in chats from MySQL
db = MySQLdb.connect(host="127.0.0.1", port=3306, user="USERNAME", passwd="PASSWORD", db="DBNAME")
cursor = db.cursor()
cleanup = "DELETE FROM tablename WHERE columnname LIKE '%Text to clean up%'"
cursor.execute(cleanup)
db.commit()
print('Database cleaned of status messages')
cursor.execute("SELECT DISTINCT columnname->\"$.text\" FROM tablename")
# rows = cursor.fetchall()
rows = [item[0] for item in cursor.fetchall()]
# Clean up MySQLdb's weirdness with tuples
rows = [row.replace('"','') for row in rows]
rows = [row.replace('\n',' ') for row in rows]
# Output to a plaintext file
sqloutput = open('sqloutput.txt', 'w')
for row in rows:
sqloutput.write("%s\n" % row)
print('Printed chat messages to text file')
# Clean up HTML
print('Cleaning up HTML tags')
sqloutput = open('sqloutput.txt', 'r')
dirtytext = sqloutput.read()
outstream = cStringIO.StringIO()
parser = htmllib.HTMLParser(formatter.AbstractFormatter(formatter.DumbWriter(outstream)))
parser.feed(dirtytext)
cleantext = outstream.getvalue()
outstream.close()
print('Rewriting cleaned text back to file')
sqloutput = open('sqloutput.txt', 'w')
sqloutput.write(cleantext)
# Garbage collection so the database connections will close properly
db.close()
gc.collect()
# Chill for a bit to make sure the file is done writing
print('Thinking...')
time.sleep(5)
print('Calculationating...')
# Set the stopwords list
stoppath = "SmartStoplist.txt"
# 1. initialize RAKE by providing a path to a stopwords file
rake_object = rake.Rake(stoppath, 3, 3, 5)
# 2. run on RAKE on a given text
sample_file = io.open("sqloutput.txt", 'r',encoding="iso-8859-1")
text = sample_file.read().encode('utf-8')
keywords = rake_object.run(text)
# 3. Print results to screen
print("Keywords:", keywords)
print("----------")
# 4. Print results to CSV
print("Writing results to CSV.")
def WriteListToCSV(csv_file,csv_columns,data_list):
try:
with open(csv_file, 'w') as csvfile:
writer = csv.writer(csvfile, dialect='excel', quoting=csv.QUOTE_NONNUMERIC)
writer.writerow(csv_columns)
for data in data_list:
writer.writerow(data)
except IOError as (errno, strerror):
print("I/O error({0}): {1}".format(errno, strerror))
return
csv_columns = ['Keyword','Score']
# Line 144 of rake.py rounds the score to 5 decimal places: word_score[item] = round(word_prescore, 5)
currentPath = os.getcwd()
csv_file = os.path.join("output","keywords.csv")
WriteListToCSV(csv_file,csv_columns,keywords)
print("Done!")
# #### More examples ####
#
# # Split text into sentences
# sentenceList = rake.split_sentences(text)
#
# # Outputs detected sentences to screen
# # for sentence in sentenceList:
# # print("Sentence:", sentence)
#
# ## Outputs detected phrases, candidates, and top 1/3rd scoring keywords to screen.
#
# # generate candidate keywords
# print(" ")
# print("----------")
# print("Phrases")
# print("----------")
# stopwordpattern = rake.build_stop_word_regex(stoppath)
# phraseList = rake.generate_candidate_keywords(sentenceList, stopwordpattern)
# for phrase in phraseList:
# # print("Phrases:", phraseList)
# print("Phrases: ", phrase)
#
# # calculate individual word scores
# wordscores = rake.calculate_word_scores(phraseList)
#
# # generate candidate keyword scores
# print(" ")
# print("----------")
# print("Candidates")
# print("----------")
# keywordcandidates = rake.generate_candidate_keyword_scores(phraseList, wordscores)
# for candidate in keywordcandidates.keys():
# print("Candidate: ", candidate, ", score: ", keywordcandidates.get(candidate))
#
# # sort candidates by score to determine top-scoring keywords
# sortedKeywords = sorted(six.iteritems(keywordcandidates), key=operator.itemgetter(1), reverse=True)
# totalKeywords = len(sortedKeywords)
#
# # for example, you could just take the top third as the final keywords
# print(" ")
# print("----------")
# print("Top Third")
# print("----------")
# for keyword in sortedKeywords[0:int(totalKeywords / 10)]:
# print("Keyword: ", keyword[0], " Score: ", keyword[1])
| mit | 1,181,583,639,862,126,300 | 26.60625 | 102 | 0.696627 | false | 3.397692 | false | false | false |
0x00ach/zer0m0n | signatures/recon_systeminfo.py | 6 | 1252 | # Copyright (C) 2012 Claudio "nex" Guarnieri (@botherder)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from lib.cuckoo.common.abstracts import Signature
class SystemInfo(Signature):
name = "recon_systeminfo"
description = "Collects information on the system (ipconfig, netstat, systeminfo)"
severity = 3
categories = ["recon"]
authors = ["nex"]
minimum = "1.0"
evented = True
def on_call(self, call, process):
return self.check_argument_call(
call, pattern="(^cmd\.exe).*[(systeminfo)|(ipconfig)|(netstat)]",
name="CommandLine",
category="process",
regex=True
)
| gpl-3.0 | 1,236,607,287,380,904,400 | 36.939394 | 86 | 0.691693 | false | 3.962025 | false | false | false |
domeger/SplunkTAforPuppetEnterprise | bin/input_module_puppet_enterprise_metrics.py | 1 | 2674 |
# encoding = utf-8
import os
import sys
import time
import datetime
import json
import jsonpath_rw
from datetime import datetime
def validate_input(helper, definition):
"""Implement your own validation logic to validate the input stanza configurations"""
# This example accesses the modular input variable
# server = definition.parameters.get('server', None)
# port = definition.parameters.get('port', None)
pass
def validate_input(helper, definition):
"""Implement your own validation logic to validate the input stanza configurations"""
# This example accesses the modular input variable
# server = definition.parameters.get('server', None)
# port = definition.parameters.get('port', None)
pass
def collect_events(helper, ew):
import datetime
import json
import jsonpath_rw
method = 'GET'
api_request = 'application/json'
api_token = helper.get_global_setting("token_")
server = helper.get_arg('server_')
port = helper.get_arg('port_')
pe_token = helper.get_arg('token_')
pe_link = helper.get_arg('puppet_enterprise_server_')
url = server + ":" + port + "/metrics/v1/mbeans/puppetlabs.puppetdb.mq%3Aname%3Dglobal.processing-time"
if pe_link:
input_source = pe_link
else:
input_source = pe_link
headers = {
'X-Authentication': pe_token,
'Content-type': api_request
}
response = helper.send_http_request(url,
method,
parameters=None,
payload=None,
headers=headers,
cookies=None,
verify=False,
cert=None,
timeout=None,
use_proxy=True)
r_status = response.status_code
response.raise_for_status()
helper.log_error (response.text)
r= response.json()
input_type = helper.get_input_type()
for stanza_name in helper.get_input_stanza_names():
data = json.dumps(r, sort_keys=False)
event = helper.new_event(source=input_source, index=helper.get_output_index(stanza_name), sourcetype=helper .get_sourcetype(stanza_name), data=data)
helper.log_error (response.text)
try:
ew.write_event(event)
helper.log_error (response.text)
except Exception as e:
raise e
return; | apache-2.0 | 2,656,314,683,567,090,000 | 30.470588 | 156 | 0.557218 | false | 4.369281 | false | false | false |
grandecose/global-toucan | main.py | 2 | 4308 | on = 1;
a = -9.800000000000
print("Welcome to Global Toucan, a physics calculator.\n");
valid = "Please choose a valid option\n";
#I've never been good at commenting, if you can't understand whats going on contact me and I will help.
while(on == 1):
a = -9.800000000000000000000000
choice = raw_input("What equation would like to use?\n1. Final Velocity\n2. Distance\n3. Help\n4. List Equations\n5. Quit\n")
if(choice == "1"):
z = raw_input("What do you want to find?\n1. Final Velocity\n2. Initial Velocity\n3. Acceleration\n4. Time\n");
if(z == "1"):
vi = int(input("What is the initial velocity?\n"));
time = int(input("What is the time?\n"));
vf = (vi * 1.000000000000000000000000000000000) + (a * time)
print("Your final velocity is %s m/s" % vf);
elif(z == "2"):
vf = int(input("What is the final velocity?\n"));
time = int(input("For how long is the object travelling?\n"));
vi = (vf * 1.00000000000000000000000000) - (a * time)
print("The initial velocity is %s m/s" % vi);
elif(z == "3"):
vf = int(input("What is the final velocity?\n"));
vi = int(input("What is your initial velocity?\n"));
time = int(input("What is the time?\n"));
a = ((vf * 1.00000000000000000000 - vi) / time)
print("The acceleration is %s m/s/s" % a);
elif(z == "4"):
vf = int(input("What is the final velocity?\n"));
vi = int(input("What is the initial velocity?\n"));
time = (vf - vi) / (a * 1.0000000000000000000000000)
print("The time is %s seconds" % time);
else:
print valid
elif(choice == "2"):
choice = raw_input("What do you want to find?\n1. Distance\n2. Initial Velocity\n3. Acceleration\n4. Time\n");
# d = vi * t + (.5 * a * (t ** 2))
if(choice == "1"):
vi = int(input("What is your initial velocity?\n"))
t = int(input("What is the time?\n"))
d = vi * 1.00000000000000000 * t + (.5 * a * (t ** 2))
print("This object has travelled %s meters" % d);
elif(choice == "2"):
d = int(input("What is the distance (please remember to include a - sign if movement is negative)?\n"))
t = int(input("What is the time?\n"))
vi = (d * 1.000000000000000000 - (.5 * a * (t ** 2))) / t
print("The final velocity is %s m/s" % vi);
elif(choice == "3"):
d = int(input("What is the distance (please remember to include a - sign if movement is negative)?\n"))
vi = int(input("What is the initial velocity?\n"))
t = int(input("What is the time?\n"))
a = (d * 1.000000000000000000000000 - (vi * t)) / (.5 * (t ** 2))
print("The acceleration is %s m/s/s" % a);
elif(choice == "4"):
d = int(input("What is the distance (please remember to include a - sign if movement is negative)?\n"))
vi = int(input("What is the initial velocity?\n"))
vf = int(input("What is the final velocity?\n"))
# currently using the distance formula python won't figure it out. I'll have to put in a function later. In the
# meantime use vf and vi. vf = vi + at
t = (vf * 1.000000000000000 - vi) / a
print("The time is %s seconds" % t);
else:
print valid
elif(choice == "3"):
print("Global Toucan is a simple program written in python made to solve physics problems concerning motion.\n The calculator will only take numbers, so please no units.\nWhen calculating for directions going down please remember to include '-'. For example if your calculation involves an apple falling 50 meters one would input -50 as the distance.\nEverything will be done in seconds for time, m/s for velocity, and m/s/s for acceleration.\nAs such, please do any conversions beforehand.\nGlobal Toucan is licensed under the GPLv3 a Free Software License.\n");
raw_input("Press enter to return to the main menu.\n");
elif(choice == "4"):
print("The following are the equations used by this program:\nFinal Velocity: vf = vi + at, where vf is final velocity, a is acceleration and t is time.\nDistance: d = vi * t + .5 a * (t ^ 2), where d is distance, vi is initial velocity, t is time, a is acceleration, and t is time.\n");
raw_input("Press enter when you are ready to return to the main menu.\n");
elif(choice == "5"):
raw_input("Global Toucan will now exit, please press enter.")
on = 0;
else:
print valid
| gpl-3.0 | 4,295,777,861,401,514,500 | 38.163636 | 565 | 0.637419 | false | 3.167647 | false | false | false |
jstac/recursive_utility_code | python/long_run_risk/src/ssy_fp_discretized.py | 1 | 1742 | """
Compute the equilibrium wealth consumption ratio in the SSY model by first
computing the fixed point of A = phi K.
"""
from ssy_discretized_test import *
import numpy as np
default_K, default_I, default_J = 4, 4, 4
def wealth_cons_ratio(ssyd,
tol=1e-7,
init_val=1,
max_iter=1_000_000,
verbose=False):
"""
Iterate to convergence on the Koopmans operator associated with the SSY
model and then return the wealth consumption ratio.
"""
# Unpack and set up parameters EpsteinZin parameters
ψ, γ, β = ssyd.ssy.ψ, ssyd.ssy.γ, ssyd.ssy.β
θ = (1 - γ) / (1 - 1/ψ)
ζ = 1 - β
K_matrix = compute_K(ssyd)
M = ssyd.K * ssyd.I * ssyd.J
w = np.ones(M) * init_val
iter = 0
error = tol + 1
r = compute_spec_rad(K_matrix)
if verbose:
print(f"Test value = {r**(1/θ)} and θ = {θ}")
print("Beginning iteration\n\n")
while error > tol and iter < max_iter:
Tw = ζ + β * (K_matrix @ (w**θ))**(1/θ)
error = np.max(np.abs(w - Tw))
w = Tw
iter += 1
if verbose:
print(f"Iteration converged after {iter} iterations")
return w / ζ
def average_wealth_cons(ssy,
K=default_K,
I=default_I,
J=default_J,
verbose=False):
"""
Computes the mean wealth consumption ratio under the stationary
distribution pi.
"""
ssyd = discretize(ssy, K, I, J, add_x_data=True)
w = wealth_cons_ratio(ssyd, verbose=verbose)
x_mc = MarkovChain(ssyd.x_P)
x_pi = x_mc.stationary_distributions[0]
mean_w = w @ x_pi
return mean_w
| mit | -9,144,569,409,453,873,000 | 22.283784 | 75 | 0.549042 | false | 2.991319 | false | false | false |
zachdj/ultimate-tic-tac-toe | scenes/GameCompleted.py | 1 | 5911 | import pygame, numpy
from .SceneBase import SceneBase
from .DrawingUtils import *
from widgets import Button
from models.game import Board, Move
from services import ImageService, FontService, SceneManager, SettingsService as Settings
class GameCompleted(SceneBase):
"""
This scene shows the result of a game by displaying the completed board and a message about which player won
"""
def __init__(self, game):
SceneBase.__init__(self)
# data needed to play the game
self.game = game
# calculate constants used for rendering
# (these are all done in the fixed transform space, so we can safely use constants)
self.MARGIN = 96
self.CELL_SIZE = 83
self.CELL_SPACING = 10
self.LOCAL_BOARD_SPACING = 25
self.BOARD_AREA_X = 1920 - self.MARGIN - 9*(self.CELL_SIZE + self.CELL_SPACING) - 2*self.LOCAL_BOARD_SPACING
self.BOARD_AREA_Y = self.MARGIN
self.FONT_SIZE = 48
# bounding box for the player who won
winner_box_width = 1920 - 3*self.MARGIN - self.BOARD_AREA_X
winner_box_height = self.FONT_SIZE * 3
self.WINNER_BOX = pygame.Rect(self.MARGIN, 0.5*1080 - self.MARGIN - winner_box_height, winner_box_width, winner_box_height)
# "Name" of winning player
winner = self.game.get_winner()
if winner == Board.X:
winner_name = "%s (X) wins!" % self.game.player1.name
elif winner == Board.O:
winner_name = "%s (O) wins!" % self.game.player2.name
else:
winner_name = "The Players Tie! Lame!"
self.winner_text = FontService.get_regular_font(self.FONT_SIZE)
self.winner_text_surface = self.winner_text.render(winner_name, False, Settings.theme['font'])
self.winner_text_size = self.winner_text.size(winner_name)
self.winner_text_location = (self.WINNER_BOX.centerx - 0.5 * self.winner_text_size[0],
self.WINNER_BOX.top + 0.5 * self.winner_text_size[1] + 10)
self.cell_sprites = ImageService.get_board_cell_sprites()
for key in self.cell_sprites.keys():
self.cell_sprites[key] = pygame.transform.scale(self.cell_sprites[key], (self.CELL_SIZE, self.CELL_SIZE))
# compute cell bounding boxes - Each element is a 4-tuple (left, top, right, bottom)
self.cell_locations = numpy.empty((3, 3, 3, 3), object)
for i in list(range(0, 9)):
metarow = i // 3
row = i % 3
for j in list(range(0, 9)):
metacol = j // 3
col = j % 3
# compute the location of the cell in the grid and shift it into the board area
location_x = (metacol * 3 + col)*(self.CELL_SIZE + self.CELL_SPACING) \
+ self.LOCAL_BOARD_SPACING*metacol \
+ self.BOARD_AREA_X
location_y = (metarow * 3 + row) * (self.CELL_SIZE + self.CELL_SPACING) \
+ self.LOCAL_BOARD_SPACING * metarow \
+ self.BOARD_AREA_Y
self.cell_locations[metarow][metacol][row][col] = (location_x, location_y, location_x + self.CELL_SIZE, location_y + self.CELL_SIZE)
exit_btn = Button(self.WINNER_BOX.left, 0.5*1080 + self.MARGIN,
self.WINNER_BOX.width, self.WINNER_BOX.height,
"Exit", lambda: SceneManager.go_to_main_menu(self))
self.widgets.append(exit_btn)
def process_input(self, events, pressed_keys):
for widget in self.widgets:
widget.process_input(events, pressed_keys)
def update(self):
pass
def render(self, screen):
bg = ImageService.get_game_bg()
screen.blit(bg, (0, 0))
# render the box for the winner info
if self.game.get_winner() == Board.X:
border_color = Settings.theme['primary']
elif self.game.get_winner() == Board.O:
border_color = Settings.theme['secondary']
else:
border_color = Settings.theme['widget_highlight']
# draw box
aa_border_rounded_rect(screen, self.WINNER_BOX, Settings.theme['widget_background'], border_color)
screen.blit(self.winner_text_surface, self.winner_text_location) # name of winner
# render the board
current_player_symbol = self.game.active_player.number
for i in list(range(0, 9)):
metarow = i // 3
row = i % 3
for j in list(range(0, 9)):
metacol = j // 3
col = j % 3
board_winner = self.game.board.check_cell(metarow, metacol)
cell_owner = self.game.board.check_small_cell(metarow, metacol, row, col)
move_object = Move(current_player_symbol, metarow, metacol, row, col)
# compute the location of the cell in the grid and shift it into the board area
location = self.cell_locations[metarow][metacol][row][col]
location_x, location_y = location[0], location[1]
# render the correct background for the cell:
if board_winner == Board.X :
screen.blit(self.cell_sprites['p1_won'], (location_x, location_y))
elif board_winner == Board.O:
screen.blit(self.cell_sprites['p2_won'], (location_x, location_y))
else:
screen.blit(self.cell_sprites['blank'], (location_x, location_y))
# render the cell's owner:
if cell_owner == Board.X:
screen.blit(self.cell_sprites['p1_marker'], (location_x, location_y))
elif cell_owner == Board.O:
screen.blit(self.cell_sprites['p2_marker'], (location_x, location_y))
for widget in self.widgets:
widget.render(screen)
| mit | -2,822,672,325,319,119,400 | 44.821705 | 148 | 0.581627 | false | 3.593313 | false | false | false |
olivierdalang/stdm | ui/reports/ui_rpt_builder.py | 1 | 23463 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui_rpt_builder.ui'
#
# Created: Sun Jun 08 13:58:36 2014
# by: PyQt4 UI code generator 4.10.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_ReportBuilder(object):
def setupUi(self, ReportBuilder):
ReportBuilder.setObjectName(_fromUtf8("ReportBuilder"))
ReportBuilder.resize(656, 523)
self.gridLayout = QtGui.QGridLayout(ReportBuilder)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.tabWidget = QtGui.QTabWidget(ReportBuilder)
self.tabWidget.setObjectName(_fromUtf8("tabWidget"))
self.tab = QtGui.QWidget()
self.tab.setObjectName(_fromUtf8("tab"))
self.gridLayout_3 = QtGui.QGridLayout(self.tab)
self.gridLayout_3.setObjectName(_fromUtf8("gridLayout_3"))
self.groupBox = QtGui.QGroupBox(self.tab)
self.groupBox.setTitle(_fromUtf8(""))
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.gridLayout_2 = QtGui.QGridLayout(self.groupBox)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.comboBox = QtGui.QComboBox(self.groupBox)
self.comboBox.setMinimumSize(QtCore.QSize(0, 30))
self.comboBox.setObjectName(_fromUtf8("comboBox"))
self.gridLayout_2.addWidget(self.comboBox, 0, 1, 1, 1)
self.label = QtGui.QLabel(self.groupBox)
self.label.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout_2.addWidget(self.label, 0, 0, 1, 1)
self.gridLayout_3.addWidget(self.groupBox, 0, 0, 1, 1)
self.groupBox_2 = QtGui.QGroupBox(self.tab)
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.gridLayout_4 = QtGui.QGridLayout(self.groupBox_2)
self.gridLayout_4.setObjectName(_fromUtf8("gridLayout_4"))
self.label_2 = QtGui.QLabel(self.groupBox_2)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout_4.addWidget(self.label_2, 0, 0, 1, 1)
self.label_3 = QtGui.QLabel(self.groupBox_2)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.gridLayout_4.addWidget(self.label_3, 0, 2, 1, 1)
self.listWidget = QtGui.QListWidget(self.groupBox_2)
self.listWidget.setSelectionMode(QtGui.QAbstractItemView.MultiSelection)
self.listWidget.setObjectName(_fromUtf8("listWidget"))
self.gridLayout_4.addWidget(self.listWidget, 1, 0, 1, 1)
self.listWidget_2 = QtGui.QListWidget(self.groupBox_2)
self.listWidget_2.setObjectName(_fromUtf8("listWidget_2"))
self.gridLayout_4.addWidget(self.listWidget_2, 1, 2, 1, 1)
self.groupBox_3 = QtGui.QGroupBox(self.groupBox_2)
self.groupBox_3.setTitle(_fromUtf8(""))
self.groupBox_3.setFlat(True)
self.groupBox_3.setObjectName(_fromUtf8("groupBox_3"))
self.verticalLayout = QtGui.QVBoxLayout(self.groupBox_3)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.btnAddField = QtGui.QPushButton(self.groupBox_3)
self.btnAddField.setText(_fromUtf8(""))
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/plugins/stdm/images/icons/next.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.btnAddField.setIcon(icon)
self.btnAddField.setObjectName(_fromUtf8("btnAddField"))
self.verticalLayout.addWidget(self.btnAddField)
self.btnRemField = QtGui.QPushButton(self.groupBox_3)
self.btnRemField.setText(_fromUtf8(""))
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(_fromUtf8(":/plugins/stdm/images/icons/previous.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.btnRemField.setIcon(icon1)
self.btnRemField.setObjectName(_fromUtf8("btnRemField"))
self.verticalLayout.addWidget(self.btnRemField)
self.btnAddAllFields = QtGui.QPushButton(self.groupBox_3)
self.btnAddAllFields.setText(_fromUtf8(""))
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(_fromUtf8(":/plugins/stdm/images/icons/last.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.btnAddAllFields.setIcon(icon2)
self.btnAddAllFields.setObjectName(_fromUtf8("btnAddAllFields"))
self.verticalLayout.addWidget(self.btnAddAllFields)
self.btnRemAllFields = QtGui.QPushButton(self.groupBox_3)
self.btnRemAllFields.setText(_fromUtf8(""))
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(_fromUtf8(":/plugins/stdm/images/icons/first.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.btnRemAllFields.setIcon(icon3)
self.btnRemAllFields.setObjectName(_fromUtf8("btnRemAllFields"))
self.verticalLayout.addWidget(self.btnRemAllFields)
self.gridLayout_4.addWidget(self.groupBox_3, 1, 1, 1, 1)
self.groupBox_4 = QtGui.QGroupBox(self.groupBox_2)
self.groupBox_4.setTitle(_fromUtf8(""))
self.groupBox_4.setFlat(True)
self.groupBox_4.setObjectName(_fromUtf8("groupBox_4"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.groupBox_4)
self.verticalLayout_2.setSpacing(0)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.btnRptFieldUp = QtGui.QPushButton(self.groupBox_4)
self.btnRptFieldUp.setText(_fromUtf8(""))
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap(_fromUtf8(":/plugins/stdm/images/icons/down.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.btnRptFieldUp.setIcon(icon4)
self.btnRptFieldUp.setObjectName(_fromUtf8("btnRptFieldUp"))
self.verticalLayout_2.addWidget(self.btnRptFieldUp)
self.btnRptFieldDwn = QtGui.QPushButton(self.groupBox_4)
self.btnRptFieldDwn.setText(_fromUtf8(""))
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap(_fromUtf8(":/plugins/stdm/images/icons/up.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.btnRptFieldDwn.setIcon(icon5)
self.btnRptFieldDwn.setObjectName(_fromUtf8("btnRptFieldDwn"))
self.verticalLayout_2.addWidget(self.btnRptFieldDwn)
self.gridLayout_4.addWidget(self.groupBox_4, 1, 3, 1, 1)
self.gridLayout_3.addWidget(self.groupBox_2, 1, 0, 1, 1)
self.tabWidget.addTab(self.tab, _fromUtf8(""))
self.tab_2 = QtGui.QWidget()
self.tab_2.setObjectName(_fromUtf8("tab_2"))
self.gridLayout_7 = QtGui.QGridLayout(self.tab_2)
self.gridLayout_7.setObjectName(_fromUtf8("gridLayout_7"))
self.label_4 = QtGui.QLabel(self.tab_2)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.gridLayout_7.addWidget(self.label_4, 0, 0, 1, 2)
self.groupBox_5 = QtGui.QGroupBox(self.tab_2)
self.groupBox_5.setObjectName(_fromUtf8("groupBox_5"))
self.gridLayout_6 = QtGui.QGridLayout(self.groupBox_5)
self.gridLayout_6.setObjectName(_fromUtf8("gridLayout_6"))
self.lstFields = QtGui.QListWidget(self.groupBox_5)
self.lstFields.setObjectName(_fromUtf8("lstFields"))
self.gridLayout_6.addWidget(self.lstFields, 0, 0, 1, 1)
self.lstUniqVal = QtGui.QListWidget(self.groupBox_5)
self.lstUniqVal.setObjectName(_fromUtf8("lstUniqVal"))
self.gridLayout_6.addWidget(self.lstUniqVal, 0, 1, 1, 1)
self.btnUniqVals = QtGui.QPushButton(self.groupBox_5)
self.btnUniqVals.setObjectName(_fromUtf8("btnUniqVals"))
self.gridLayout_6.addWidget(self.btnUniqVals, 1, 1, 1, 1)
self.gridLayout_7.addWidget(self.groupBox_5, 1, 0, 1, 6)
self.groupBox_6 = QtGui.QGroupBox(self.tab_2)
self.groupBox_6.setObjectName(_fromUtf8("groupBox_6"))
self.gridLayout_5 = QtGui.QGridLayout(self.groupBox_6)
self.gridLayout_5.setObjectName(_fromUtf8("gridLayout_5"))
self.btnOpEqual = QtGui.QPushButton(self.groupBox_6)
self.btnOpEqual.setMinimumSize(QtCore.QSize(0, 30))
self.btnOpEqual.setObjectName(_fromUtf8("btnOpEqual"))
self.gridLayout_5.addWidget(self.btnOpEqual, 0, 0, 1, 1)
self.btnOpNotEqual = QtGui.QPushButton(self.groupBox_6)
self.btnOpNotEqual.setMinimumSize(QtCore.QSize(0, 30))
self.btnOpNotEqual.setObjectName(_fromUtf8("btnOpNotEqual"))
self.gridLayout_5.addWidget(self.btnOpNotEqual, 0, 1, 1, 1)
self.btnOpLike = QtGui.QPushButton(self.groupBox_6)
self.btnOpLike.setMinimumSize(QtCore.QSize(0, 30))
self.btnOpLike.setObjectName(_fromUtf8("btnOpLike"))
self.gridLayout_5.addWidget(self.btnOpLike, 0, 2, 1, 1)
self.btnOpGreater = QtGui.QPushButton(self.groupBox_6)
self.btnOpGreater.setMinimumSize(QtCore.QSize(0, 30))
self.btnOpGreater.setObjectName(_fromUtf8("btnOpGreater"))
self.gridLayout_5.addWidget(self.btnOpGreater, 1, 0, 1, 1)
self.btnOpGreaterEq = QtGui.QPushButton(self.groupBox_6)
self.btnOpGreaterEq.setMinimumSize(QtCore.QSize(0, 30))
self.btnOpGreaterEq.setObjectName(_fromUtf8("btnOpGreaterEq"))
self.gridLayout_5.addWidget(self.btnOpGreaterEq, 1, 1, 1, 1)
self.btnOpAnd = QtGui.QPushButton(self.groupBox_6)
self.btnOpAnd.setMinimumSize(QtCore.QSize(0, 30))
self.btnOpAnd.setObjectName(_fromUtf8("btnOpAnd"))
self.gridLayout_5.addWidget(self.btnOpAnd, 1, 2, 1, 1)
self.btnOpLess = QtGui.QPushButton(self.groupBox_6)
self.btnOpLess.setMinimumSize(QtCore.QSize(0, 30))
self.btnOpLess.setObjectName(_fromUtf8("btnOpLess"))
self.gridLayout_5.addWidget(self.btnOpLess, 2, 0, 1, 1)
self.btnOpLess_2 = QtGui.QPushButton(self.groupBox_6)
self.btnOpLess_2.setMinimumSize(QtCore.QSize(0, 30))
self.btnOpLess_2.setObjectName(_fromUtf8("btnOpLess_2"))
self.gridLayout_5.addWidget(self.btnOpLess_2, 2, 1, 1, 1)
self.btnOpOr = QtGui.QPushButton(self.groupBox_6)
self.btnOpOr.setMinimumSize(QtCore.QSize(0, 30))
self.btnOpOr.setObjectName(_fromUtf8("btnOpOr"))
self.gridLayout_5.addWidget(self.btnOpOr, 2, 2, 1, 1)
self.gridLayout_7.addWidget(self.groupBox_6, 2, 0, 2, 1)
self.lblSqlEntity = QtGui.QLabel(self.tab_2)
self.lblSqlEntity.setObjectName(_fromUtf8("lblSqlEntity"))
self.gridLayout_7.addWidget(self.lblSqlEntity, 2, 1, 1, 5)
self.txtSqlParser = QtGui.QTextEdit(self.tab_2)
self.txtSqlParser.setObjectName(_fromUtf8("txtSqlParser"))
self.gridLayout_7.addWidget(self.txtSqlParser, 3, 1, 1, 5)
self.btnSQLClr = QtGui.QPushButton(self.tab_2)
self.btnSQLClr.setMinimumSize(QtCore.QSize(0, 30))
self.btnSQLClr.setObjectName(_fromUtf8("btnSQLClr"))
self.gridLayout_7.addWidget(self.btnSQLClr, 4, 1, 1, 1)
self.btnSQLVer = QtGui.QPushButton(self.tab_2)
self.btnSQLVer.setMinimumSize(QtCore.QSize(0, 30))
self.btnSQLVer.setObjectName(_fromUtf8("btnSQLVer"))
self.gridLayout_7.addWidget(self.btnSQLVer, 4, 2, 1, 1)
self.btnMap = QtGui.QPushButton(self.tab_2)
self.btnMap.setMinimumSize(QtCore.QSize(0, 30))
self.btnMap.setObjectName(_fromUtf8("btnMap"))
self.gridLayout_7.addWidget(self.btnMap, 4, 4, 1, 1)
self.btnSQLApply = QtGui.QPushButton(self.tab_2)
self.btnSQLApply.setMinimumSize(QtCore.QSize(0, 30))
self.btnSQLApply.setObjectName(_fromUtf8("btnSQLApply"))
self.gridLayout_7.addWidget(self.btnSQLApply, 4, 3, 1, 1)
self.tabWidget.addTab(self.tab_2, _fromUtf8(""))
self.tab_3 = QtGui.QWidget()
self.tab_3.setObjectName(_fromUtf8("tab_3"))
self.gridLayout_8 = QtGui.QGridLayout(self.tab_3)
self.gridLayout_8.setObjectName(_fromUtf8("gridLayout_8"))
self.label_5 = QtGui.QLabel(self.tab_3)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.gridLayout_8.addWidget(self.label_5, 0, 0, 1, 1)
self.lstRptFields = QtGui.QListWidget(self.tab_3)
self.lstRptFields.setObjectName(_fromUtf8("lstRptFields"))
self.gridLayout_8.addWidget(self.lstRptFields, 1, 0, 1, 1)
self.groupBox_7 = QtGui.QGroupBox(self.tab_3)
self.groupBox_7.setTitle(_fromUtf8(""))
self.groupBox_7.setFlat(True)
self.groupBox_7.setObjectName(_fromUtf8("groupBox_7"))
self.verticalLayout_3 = QtGui.QVBoxLayout(self.groupBox_7)
self.verticalLayout_3.setSpacing(0)
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.btnAddGpField = QtGui.QPushButton(self.groupBox_7)
self.btnAddGpField.setText(_fromUtf8(""))
self.btnAddGpField.setIcon(icon)
self.btnAddGpField.setObjectName(_fromUtf8("btnAddGpField"))
self.verticalLayout_3.addWidget(self.btnAddGpField)
self.btnRemGpField = QtGui.QPushButton(self.groupBox_7)
self.btnRemGpField.setText(_fromUtf8(""))
self.btnRemGpField.setIcon(icon1)
self.btnRemGpField.setObjectName(_fromUtf8("btnRemGpField"))
self.verticalLayout_3.addWidget(self.btnRemGpField)
self.gridLayout_8.addWidget(self.groupBox_7, 1, 1, 1, 1)
self.tbGroupFields = QtGui.QTableWidget(self.tab_3)
self.tbGroupFields.setObjectName(_fromUtf8("tbGroupFields"))
self.tbGroupFields.setColumnCount(1)
self.tbGroupFields.setRowCount(0)
item = QtGui.QTableWidgetItem()
self.tbGroupFields.setHorizontalHeaderItem(0, item)
self.tbGroupFields.horizontalHeader().setDefaultSectionSize(95)
self.tbGroupFields.horizontalHeader().setStretchLastSection(True)
self.gridLayout_8.addWidget(self.tbGroupFields, 1, 2, 1, 1)
self.chIncludeGpFields = QtGui.QCheckBox(self.tab_3)
self.chIncludeGpFields.setObjectName(_fromUtf8("chIncludeGpFields"))
self.gridLayout_8.addWidget(self.chIncludeGpFields, 2, 0, 1, 1)
self.tabWidget.addTab(self.tab_3, _fromUtf8(""))
self.tab_4 = QtGui.QWidget()
self.tab_4.setObjectName(_fromUtf8("tab_4"))
self.verticalLayout_4 = QtGui.QVBoxLayout(self.tab_4)
self.verticalLayout_4.setObjectName(_fromUtf8("verticalLayout_4"))
self.label_6 = QtGui.QLabel(self.tab_4)
self.label_6.setObjectName(_fromUtf8("label_6"))
self.verticalLayout_4.addWidget(self.label_6)
self.tbSortFields = QtGui.QTableWidget(self.tab_4)
self.tbSortFields.setObjectName(_fromUtf8("tbSortFields"))
self.tbSortFields.setColumnCount(3)
self.tbSortFields.setRowCount(0)
item = QtGui.QTableWidgetItem()
self.tbSortFields.setHorizontalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
self.tbSortFields.setHorizontalHeaderItem(1, item)
item = QtGui.QTableWidgetItem()
self.tbSortFields.setHorizontalHeaderItem(2, item)
self.tbSortFields.horizontalHeader().setDefaultSectionSize(130)
self.tbSortFields.horizontalHeader().setHighlightSections(False)
self.tbSortFields.horizontalHeader().setStretchLastSection(True)
self.verticalLayout_4.addWidget(self.tbSortFields)
self.tabWidget.addTab(self.tab_4, _fromUtf8(""))
self.tab_5 = QtGui.QWidget()
self.tab_5.setObjectName(_fromUtf8("tab_5"))
self.gridLayout_9 = QtGui.QGridLayout(self.tab_5)
self.gridLayout_9.setObjectName(_fromUtf8("gridLayout_9"))
self.label_7 = QtGui.QLabel(self.tab_5)
self.label_7.setObjectName(_fromUtf8("label_7"))
self.gridLayout_9.addWidget(self.label_7, 0, 0, 1, 1)
self.trRptSettings = QtGui.QTreeWidget(self.tab_5)
self.trRptSettings.setObjectName(_fromUtf8("trRptSettings"))
item_0 = QtGui.QTreeWidgetItem(self.trRptSettings)
item_1 = QtGui.QTreeWidgetItem(item_0)
item_1 = QtGui.QTreeWidgetItem(item_0)
item_1 = QtGui.QTreeWidgetItem(item_0)
item_1 = QtGui.QTreeWidgetItem(item_0)
item_1 = QtGui.QTreeWidgetItem(item_0)
item_0 = QtGui.QTreeWidgetItem(self.trRptSettings)
item_0 = QtGui.QTreeWidgetItem(self.trRptSettings)
self.trRptSettings.header().setVisible(False)
self.gridLayout_9.addWidget(self.trRptSettings, 1, 0, 1, 1)
self.stackedWidget = QtGui.QStackedWidget(self.tab_5)
self.stackedWidget.setObjectName(_fromUtf8("stackedWidget"))
self.gridLayout_9.addWidget(self.stackedWidget, 1, 1, 1, 1)
self.tabWidget.addTab(self.tab_5, _fromUtf8(""))
self.gridLayout.addWidget(self.tabWidget, 0, 0, 1, 4)
self.btnLoad = QtGui.QPushButton(ReportBuilder)
self.btnLoad.setMinimumSize(QtCore.QSize(0, 30))
self.btnLoad.setObjectName(_fromUtf8("btnLoad"))
self.gridLayout.addWidget(self.btnLoad, 1, 0, 1, 1)
self.btnRptCancel = QtGui.QPushButton(ReportBuilder)
self.btnRptCancel.setMinimumSize(QtCore.QSize(0, 30))
self.btnRptCancel.setObjectName(_fromUtf8("btnRptCancel"))
self.gridLayout.addWidget(self.btnRptCancel, 1, 3, 1, 1)
self.btnGenRpt = QtGui.QPushButton(ReportBuilder)
self.btnGenRpt.setMinimumSize(QtCore.QSize(0, 30))
self.btnGenRpt.setObjectName(_fromUtf8("btnGenRpt"))
self.gridLayout.addWidget(self.btnGenRpt, 1, 2, 1, 1)
self.btnSave = QtGui.QPushButton(ReportBuilder)
self.btnSave.setMinimumSize(QtCore.QSize(0, 30))
self.btnSave.setObjectName(_fromUtf8("btnSave"))
self.gridLayout.addWidget(self.btnSave, 1, 1, 1, 1)
self.retranslateUi(ReportBuilder)
self.tabWidget.setCurrentIndex(0)
self.stackedWidget.setCurrentIndex(-1)
QtCore.QMetaObject.connectSlotsByName(ReportBuilder)
def retranslateUi(self, ReportBuilder):
ReportBuilder.setWindowTitle(_translate("ReportBuilder", "STDM Report Builder", None))
self.label.setText(_translate("ReportBuilder", "Entity", None))
self.groupBox_2.setTitle(_translate("ReportBuilder", "Report Contents Fields:", None))
self.label_2.setText(_translate("ReportBuilder", "Available Fields:", None))
self.label_3.setText(_translate("ReportBuilder", "Report Fields:", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("ReportBuilder", "Fields", None))
self.label_4.setText(_translate("ReportBuilder", "Enter a WHERE clause to select records that will be appended to the report.", None))
self.groupBox_5.setTitle(_translate("ReportBuilder", "Report Fields", None))
self.btnUniqVals.setText(_translate("ReportBuilder", "Get Unique Values", None))
self.groupBox_6.setTitle(_translate("ReportBuilder", "Operators:", None))
self.btnOpEqual.setText(_translate("ReportBuilder", "=", None))
self.btnOpNotEqual.setText(_translate("ReportBuilder", "<>", None))
self.btnOpLike.setText(_translate("ReportBuilder", "LIKE", None))
self.btnOpGreater.setText(_translate("ReportBuilder", ">", None))
self.btnOpGreaterEq.setText(_translate("ReportBuilder", ">=", None))
self.btnOpAnd.setText(_translate("ReportBuilder", "AND", None))
self.btnOpLess.setText(_translate("ReportBuilder", "<", None))
self.btnOpLess_2.setText(_translate("ReportBuilder", "<=", None))
self.btnOpOr.setText(_translate("ReportBuilder", "OR", None))
self.lblSqlEntity.setText(_translate("ReportBuilder", "Select * FROM [ENTITY] WHERE:", None))
self.btnSQLClr.setText(_translate("ReportBuilder", "Clear", None))
self.btnSQLVer.setText(_translate("ReportBuilder", "Verify", None))
self.btnMap.setText(_translate("ReportBuilder", "Show on Map", None))
self.btnSQLApply.setText(_translate("ReportBuilder", "Apply", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("ReportBuilder", "Filter", None))
self.label_5.setText(_translate("ReportBuilder", "Report Fields:", None))
item = self.tbGroupFields.horizontalHeaderItem(0)
item.setText(_translate("ReportBuilder", "Fields", None))
self.chIncludeGpFields.setText(_translate("ReportBuilder", "Include Group Fields", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_3), _translate("ReportBuilder", "Grouping", None))
self.label_6.setText(_translate("ReportBuilder", "Sort records by a maximum of three fields in either ascending or descending order.", None))
item = self.tbSortFields.horizontalHeaderItem(0)
item.setText(_translate("ReportBuilder", "Fields", None))
item = self.tbSortFields.horizontalHeaderItem(1)
item.setText(_translate("ReportBuilder", "Sort", None))
item = self.tbSortFields.horizontalHeaderItem(2)
item.setText(_translate("ReportBuilder", "Order", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_4), _translate("ReportBuilder", "Sorting", None))
self.label_7.setText(_translate("ReportBuilder", "Settings:", None))
self.trRptSettings.headerItem().setText(0, _translate("ReportBuilder", "1", None))
__sortingEnabled = self.trRptSettings.isSortingEnabled()
self.trRptSettings.setSortingEnabled(False)
self.trRptSettings.topLevelItem(0).setText(0, _translate("ReportBuilder", "Elements", None))
self.trRptSettings.topLevelItem(0).child(0).setText(0, _translate("ReportBuilder", "Title", None))
self.trRptSettings.topLevelItem(0).child(1).setText(0, _translate("ReportBuilder", "Subtitle", None))
self.trRptSettings.topLevelItem(0).child(2).setText(0, _translate("ReportBuilder", "Field Names", None))
self.trRptSettings.topLevelItem(0).child(3).setText(0, _translate("ReportBuilder", "Date", None))
self.trRptSettings.topLevelItem(0).child(4).setText(0, _translate("ReportBuilder", "Page Numbering", None))
self.trRptSettings.topLevelItem(1).setText(0, _translate("ReportBuilder", "Fields", None))
self.trRptSettings.topLevelItem(2).setText(0, _translate("ReportBuilder", "Groups", None))
self.trRptSettings.setSortingEnabled(__sortingEnabled)
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_5), _translate("ReportBuilder", "Display", None))
self.btnLoad.setText(_translate("ReportBuilder", "Load...", None))
self.btnRptCancel.setText(_translate("ReportBuilder", "Cancel", None))
self.btnGenRpt.setText(_translate("ReportBuilder", "Generate Report", None))
self.btnSave.setText(_translate("ReportBuilder", "Save", None))
| gpl-2.0 | -3,415,312,747,470,689,300 | 59.907652 | 149 | 0.678004 | false | 3.558235 | false | false | false |
Mapkin/tally | tally/api/models.py | 1 | 1603 | from django.contrib.auth.models import User
from django.db import models
from django.utils.timezone import now
import os
class Client(models.Model):
api_key = models.TextField(unique=True, blank=True)
app_name = models.TextField()
user = models.ForeignKey(User)
date_created = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return u"{0}:{1}".format(self.user.username, self.app_name)
def _keygen(self, length):
alphabet = ('0123456789'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'abcdefghijklmnopqrstuvwxyz')
junk = os.urandom(20)
key = [alphabet[ord(j) % len(alphabet)] for j in junk]
return ''.join(key)
def save(self, *args, **kwargs):
while True:
if len(self.api_key) == 0:
self.api_key = self._keygen(20)
objs = Client.objects.filter(api_key=self.api_key)
if len(objs) == 0:
return super(Client, self).save(*args, **kwargs)
else:
self.api_key = ''
class Counter(models.Model):
name = models.TextField(unique=True)
count = models.PositiveIntegerField(default=0)
last_modified = models.DateField(auto_now=True)
class Meta:
ordering = ['id']
def __unicode__(self):
return u"{0}:{1}".format(self.name, self.count)
def increment(self):
if self.last_modified < now().date():
self.count = 1
else:
self.count += 1
self.save()
def reset(self):
self.count = 0
self.save()
| mit | -8,551,973,503,065,906,000 | 26.637931 | 67 | 0.577043 | false | 3.871981 | false | false | false |
Ayase-252/waife-crawler | crawler/selector.py | 1 | 1675 | """
Selector
Selector decides whether a picture should be download.
"""
import copy
class Selector:
"""
Selector middleware implements a queue of selector functions. Candidates
go through a series of functions and are flitered.
"""
def __init__(self):
self._selector_queue = []
self._decisive_selector_queue = []
def add_normal_selector(self, selector_function):
"""
Add selector into queue. The selector who first is added will be the
first to take effective.
"""
self._selector_queue.append(selector_function)
def select(self, candidates):
"""
Select eligible picture from candidates.
"""
candidates_copy = copy.deepcopy(candidates)
eligible_pictures = []
for decisive_selector in self._decisive_selector_queue:
for candidate in candidates:
if decisive_selector(candidate):
eligible_pictures.append(candidate)
candidates_copy.remove(candidate)
for selector in self._selector_queue:
remove_list = []
for candidate in candidates_copy:
if not selector(candidate):
remove_list.append(candidate)
for remove_item in remove_list:
candidates_copy.remove(remove_item)
return candidates_copy + eligible_pictures
def add_decisive_selector(self, decisive_selector):
"""
Add decisive selector into queue.
Picture passing test of any decisive selector will be selected.
"""
self._decisive_selector_queue.append(decisive_selector)
| mit | 4,757,792,966,291,532,000 | 30.603774 | 76 | 0.615522 | false | 4.691877 | false | false | false |
playfire/django-autologin | django_autologin/middleware.py | 2 | 1519 | from django.shortcuts import redirect
from django.utils.cache import add_never_cache_headers
from django.core.signing import TimestampSigner, BadSignature
from django.contrib.auth.models import User
from . import app_settings
from .utils import login, strip_token, get_user_salt
class AutomaticLoginMiddleware(object):
def process_request(self, request):
token = request.GET.get(app_settings.KEY)
if not token:
return
r = redirect(strip_token(request.get_full_path()))
try:
pk = int(token.split(':', 1)[0])
# Only change user if necessary. We strip the token in any case.
# The AnonymousUser class has no 'pk' attribute (#18093)
if getattr(request.user, 'pk', request.user.id) == pk:
return r
user = User.objects.get(pk=pk)
except (ValueError, User.DoesNotExist):
return r
try:
TimestampSigner(salt=get_user_salt(user)).unsign(
token, max_age=app_settings.MAX_AGE,
)
except BadSignature:
return r
response = self.render(
request,
user,
token,
strip_token(request.get_full_path()),
)
add_never_cache_headers(response)
return response
def render(self, request, user, token, path):
"""
Subclasses may override this behaviour.
"""
login(request, user)
return redirect(path)
| bsd-3-clause | 5,125,886,268,442,271,000 | 27.12963 | 76 | 0.59052 | false | 4.29096 | false | false | false |
paulscherrerinstitute/bsread_python | bsread/data/serialization.py | 1 | 3893 | from logging import getLogger
import numpy
from bsread.data.compression import NoCompression, BitshuffleLZ4
_logger = getLogger(__name__)
def deserialize_number(numpy_array):
"""
Return single value arrays as a scalar.
:param numpy_array: Numpy array containing a number to deserialize.
:return: Array or scalar, based on array size.
"""
if numpy_array is None:
return numpy_array
if len(numpy_array) == 1:
return numpy_array[0]
else:
return numpy_array
def deserialize_string(numpy_array):
"""
Return string that is serialized as a numpy array.
:param numpy_array: Array to deserialize (UTF-8 is assumed)
:return: String.
"""
return numpy_array.tobytes().decode()
def serialize_numpy(numpy_number, dtype=None):
"""
Serialize the provided numpy array.
:param numpy_number: Number to serialize.
:param dtype: Ignored. Here just to have a consistent interface.
:return: Numpy array.
"""
# Numpy array are already the format we are looking for.
return numpy.array([numpy_number], dtype=numpy_number.dtype)
def serialize_python_number(value, dtype):
"""
Serialize a python number by converting it into a numpy array and getting its bytes.
:param value: Value to serialize.
:param dtype: Numpy value representation.
:return: Numpy array.
"""
return numpy.array([value], dtype=dtype)
def serialize_python_string(value, dtype):
"""
Serialize string into numpy array.
:param value: Value to serialize.
:param dtype: Dtype to use (UTF-8 is assumed, use u1)
:return: Numpy array.
"""
return numpy.frombuffer(value.encode(), dtype=dtype)
def serialize_python_list(value, dtype):
"""
Convert python list into ndarray.
:param value: List to convert.
:param dtype: Ignored. Type if retrieved from the list items.
:return: Numpy array.
"""
return numpy.array(value, dtype=dtype)
# Compression string to compression provider mapping.
compression_provider_mapping = {
None: NoCompression,
"none": NoCompression,
"bitshuffle_lz4": BitshuffleLZ4
}
# Channel type to numpy dtype and serializer mapping.
# channel_type: (dtype, deserializer)
channel_type_deserializer_mapping = {
# Default value if no channel_type specified.
None: ("f8", deserialize_number),
'int8': ('i1', deserialize_number),
'uint8': ('u1', deserialize_number),
'int16': ('i2', deserialize_number),
'uint16': ('u2', deserialize_number),
'int32': ('i4', deserialize_number),
'uint32': ('u4', deserialize_number),
'int64': ('i8', deserialize_number),
'uint64': ('u8', deserialize_number),
'float32': ('f4', deserialize_number),
'float64': ('f8', deserialize_number),
'string': ('u1', deserialize_string),
'bool': ('u1', deserialize_number)
}
# Value to send to channel type and serializer mapping.
# type(value): (dtype, channel_type, serializer, shape)
channel_type_scalar_serializer_mapping = {
# Default value if no channel_type specified.
type(None): ("f8", "float64", serialize_python_number, [1]),
float: ('f8', "float64", serialize_python_number, [1]),
int: ('i8', "int64", serialize_python_number, [1]),
str: ('u1', "string", serialize_python_string, [1]),
numpy.int8: ('i1', 'int8', serialize_numpy, [1]),
numpy.uint8: ('u1', 'uint8', serialize_numpy, [1]),
numpy.int16: ('i2', 'int16', serialize_numpy, [1]),
numpy.uint16: ('u2', 'uint16', serialize_numpy, [1]),
numpy.int32: ('i4', 'int32', serialize_numpy, [1]),
numpy.uint32: ('u4', 'uint32', serialize_numpy, [1]),
numpy.int64: ('i8', 'int64', serialize_numpy, [1]),
numpy.uint64: ('u8', 'uint64', serialize_numpy, [1]),
numpy.float32: ('f4', 'float32', serialize_numpy, [1]),
numpy.float64: ('f8', 'float64', serialize_numpy, [1]),
}
| gpl-3.0 | 5,786,850,743,777,654,000 | 31.173554 | 88 | 0.655792 | false | 3.548769 | false | false | false |
CompileInc/agua | agua/evaluate.py | 1 | 1212 | from agua.config import get_btr_columns
from agua.utils import get_check_function
from agua.validators import EMPTY_VALUES
def evaluate(data, config):
result = [None] * len(config)
for i, c in enumerate(config):
column, test_column, result_column = get_btr_columns(config[i])
check_function = get_check_function(c['comparator'])
kwargs = c.get('kwargs', {})
column_result = {'attempted': 0, 'success': 0}
separator = c.get('separator')
for row in data:
r = None
if row[test_column] not in EMPTY_VALUES:
column_result['attempted'] += 1
test_value = row[test_column]
if separator:
base_values = row[column].split(separator)
else:
base_values = [row[column]]
for base_value in base_values:
r = check_function(base_value, test_value, **kwargs)
if r:
break
if r:
column_result['success'] += 1
row[result_column] = r
result[i] = column_result
return {'data': data, 'result': result}
| mit | -1,278,688,474,731,033,000 | 29.3 | 72 | 0.523102 | false | 4.222997 | false | false | false |
ClonedOne/pandalog_investigator | pandaloginvestigator/core/io/db_manager.py | 1 | 1559 | import sqlite3
"""
This module is used to obtain the name of the starting malware tested in each log file.
Malware process names are the first 14 characters of the md5, the log file name is actually the uuid.
"""
db_name = 'panda.db'
table_name = 'samples'
column1 = 'uuid'
column2 = 'filename'
column3 = 'md5'
def acquire_malware_file_dict(dir_database_path):
"""
Read the panda database file (SQLite) and returns a dictionary mapping panda log file names (uuids) to
malicious process names (md5 hashes) only the first 14 characters.
:param dir_database_path:
:return:
"""
conn = sqlite3.connect(dir_database_path + '/' + db_name)
c = conn.cursor()
uuid_md5_dict = {}
c.execute('SELECT {col1},{col2} FROM {tn}'.format(tn=table_name, col1=column1, col2=column3))
all_rows = c.fetchall()
for row in all_rows:
uuid_md5_dict[row[0]] = row[1][:14]
conn.close()
return uuid_md5_dict
def acquire_malware_file_dict_full(dir_database_path):
"""
Read the panda database file (SQLite) and returns a dictionary mapping panda log file names (uuids) to
malicious process names (md5 hashes).
:param dir_database_path:
:return:
"""
conn = sqlite3.connect(dir_database_path + '/' + db_name)
c = conn.cursor()
uuid_md5_dict = {}
c.execute('SELECT {col1},{col2} FROM {tn}'.format(tn=table_name, col1=column1, col2=column3))
all_rows = c.fetchall()
for row in all_rows:
uuid_md5_dict[row[0]] = row[1]
conn.close()
return uuid_md5_dict
| mit | -1,615,236,946,322,152,000 | 27.87037 | 107 | 0.65619 | false | 3.188139 | false | false | false |
hmunfru/fiware-paas | automatization_scripts/tools/http.py | 1 | 4843 | # -*- coding: utf-8 -*-
# Copyright 2014 Telefonica Investigación y Desarrollo, S.A.U
#
# This file is part of FI-WARE project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For those usages not covered by the Apache version 2.0 License please
# contact with [email protected]
__author__ = 'henar'
import httplib
from xml.dom.minidom import parse, parseString
from urlparse import urlparse
import sys
import json
import httplib
import mimetypes
def post_multipart(host, port, selector, fields, files):
content_type, body = encode_multipart_formdata(fields, files)
h = httplib.HTTP(host, port)
h.putrequest('POST', selector)
h.putheader('content-type', content_type)
h.putheader('content-length', str(len(body)))
h.endheaders()
h.send(body)
errcode, errmsg, headers = h.getreply()
print errcode
return h.file.read()
def encode_multipart_formdata(fields, files):
LIMIT = '100'
dd = '\r\n'
L = []
for (key, value) in fields:
L.append('--' + LIMIT)
L.append('Content-Disposition: form-data; name="%s"' % key)
L.append('')
L.append(value)
print files
for (filename, value) in files:
L.append('--' + LIMIT)
L.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (filename, filename))
L.append('Content-Type: %s' % get_content_type(filename))
L.append('')
L.append(value)
L.append('--' + LIMIT + '--')
L.append('')
print L
body = dd.join(L)
content_type = 'multipart/form-data; boundary=%s' % LIMIT
return content_type, body
def get_content_type(filename):
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
def __do_http_req(method, url, headers, payload):
parsed_url = urlparse(url)
con = httplib.HTTPConnection(parsed_url.netloc)
con.request(method, parsed_url.path, payload, headers)
return con.getresponse()
##
## Metod que hace el HTTP-GET
##
def get(url, headers):
return __do_http_req("GET", url, headers, None)
def delete(url, headers):
return __do_http_req("DELETE", url, headers, None)
##
## Metod que hace el HTTP-PUT
##
def __put(url, headers):
return __do_http_req("PUT", url, headers, None)
##
## Metod que hace el HTTP-POST
##
def post(url, headers, payload):
return __do_http_req("POST", url, headers, payload)
def get_token(keystone_url, tenant, user, password):
# url="%s/%s" %(keystone_url,"v2.0/tokens")
print keystone_url
headers = {'Content-Type': 'application/json',
'Accept': "application/xml"}
payload = '{"auth":{"tenantName":"' + tenant + '","passwordCredentials":{"username":"' + user + '","password":"' + password + '"}}}'
print payload
response = post(keystone_url, headers, payload)
data = response.read()
## Si la respuesta es la adecuada, creo el diccionario de los datos en JSON.
if response.status != 200:
print 'error to obtain the token ' + str(response.status)
sys.exit(1)
else:
dom = parseString(data)
try:
result = (dom.getElementsByTagName('token'))[0]
var = result.attributes["id"].value
return var
except:
print ("Error in the processing enviroment")
sys.exit(1)
def processTask(headers, taskdom):
try:
print taskdom
href = taskdom["@href"]
status = taskdom["@status"]
while status == 'RUNNING':
data1 = get_task(href, headers)
data = json.loads(data1)
status = data["@status"]
if status == 'ERROR':
error = taskdom["error"]
message = error["message"]
majorErrorCode = error["majorErrorCode"]
print "ERROR : " + message + " " + majorErrorCode
return status
except:
print "Unexpected error:", sys.exc_info()[0]
sys.exit(1)
def get_task(url, headers):
# url="%s/%s" %(keystone_url,"v2.0/tokens")
response = get(url, headers)
## Si la respuesta es la adecuada, creo el diccionario de los datos en JSON.
if response.status != 200:
print 'error to obtain the token ' + str(response.status)
sys.exit(1)
else:
data = response.read()
return data
| apache-2.0 | 3,568,522,416,927,700,000 | 27.650888 | 136 | 0.622057 | false | 3.605361 | false | false | false |
brkrishna/freelance | albonazionalegestoriambientali_it/archive/cittametropolitina.py | 1 | 1151 | # -- coding: utf-8 --
# encoding=utf8
import requests, os, time, random, csv
from lxml import html
from lxml.html.clean import Cleaner
url = 'http://www.cittametropolitana.mi.it/cultura/progetti/integrando/cd-online/htm/tab_riassuntiva.htm'
base_url = 'http://www.cittametropolitana.mi.it/cultura/progetti/integrando/cd-online'
cleaner = Cleaner(style=True, links=True, add_nofollow=True, page_structure=False, safe_attrs_only=False)
def main():
try:
headers = {'User-Agent': 'Mozilla/5.0'}
r = requests.get(url, headers=headers)
tree = html.fromstring(r.content)
anchors = tree.xpath("//a[contains(@href, 'javascript:openBrWindow')]/@href")
with open('cittametropolitana', 'w') as f:
for anchor in anchors:
link = base_url + "/" + anchor[anchor.find("/")+1:anchor.find(".htm")+4]
r2 = requests.get(link, headers=headers)
tree2 = html.fromstring(cleaner.clean_html(r2.content))
line = "$$$".join(tree2.xpath("*//text()[normalize-space()]")).replace("\r", "###").replace("\n", "%%%").strip()
f.write(line + "\n")
except Exception as e:
print(e.__doc__)
print(e.args)
if __name__ == '__main__':
main()
| gpl-2.0 | 7,814,547,587,145,674,000 | 30.108108 | 116 | 0.664639 | false | 2.773494 | false | false | false |
otknoy/michishiki_api_server | post.py | 1 | 1209 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import cgi
import sqlite3
import time
import config
def valid(qs):
required_keys = ['title', 'comment', 'posted_by', 'localite', 'latitude', 'longitude']
return all([qs.has_key(k) for k in required_keys])
def post(title, comment, posted_by, localite, latitude, longitude):
rate = 0
created_at = int(time.time()*1000)
updated_at = created_at
sql = u'insert into posts (id, title, comment, posted_by, localite, rate, latitude, longitude, created_at, updated_at) values (null,?,?,?,?,?,?,?,?,?);'
con = sqlite3.connect(config.db_path, isolation_level=None)
con.execute(sql, (title, comment, posted_by, localite, rate, latitude, longitude, created_at, updated_at))
con.close()
if __name__ == '__main__':
import utils
qs = utils.fs2dict(cgi.FieldStorage())
if valid(qs):
keys = ['title', 'comment', 'posted_by', 'localite', 'latitude', 'longitude']
query_string = [qs[k].decode('utf-8') for k in keys]
post(*query_string)
result = '{"message": "Successfully posted!"}'
else:
result = '{"message": "Invalid query string"}'
utils.cgi_header()
print result
| mit | 544,783,474,685,951,600 | 30 | 156 | 0.622002 | false | 3.377095 | false | false | false |
makerdao/keeper | pymaker/numeric.py | 1 | 14935 | # This file is part of Maker Keeper Framework.
#
# Copyright (C) 2017-2018 reverendus
# Copyright (C) 2018 bargst
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import math
from functools import total_ordering, reduce
from decimal import *
_context = Context(prec=1000, rounding=ROUND_DOWN)
@total_ordering
class Wad:
"""Represents a number with 18 decimal places.
`Wad` implements comparison, addition, subtraction, multiplication and division operators. Comparison, addition,
subtraction and division only work with other instances of `Wad`. Multiplication works with instances
of `Wad` and `Ray` and also with `int` numbers. The result of multiplication is always a `Wad`.
`Wad`, along with `Ray`, are the two basic numeric types used by Maker contracts.
Notes:
The internal representation of `Wad` is an unbounded integer, the last 18 digits of it being treated
as decimal places. It is similar to the representation used in Maker contracts (`uint128`).
"""
def __init__(self, value):
"""Creates a new Wad number.
Args:
value: an instance of `Wad`, `Ray` or an integer. In case of an integer, the internal representation
of Maker contracts is used which means that passing `1` will create an instance of `Wad`
with a value of `0.000000000000000001'.
"""
if isinstance(value, Wad):
self.value = value.value
elif isinstance(value, Ray):
self.value = int((Decimal(value.value) // (Decimal(10)**Decimal(9))).quantize(1, context=_context))
elif isinstance(value, Rad):
self.value = int((Decimal(value.value) // (Decimal(10)**Decimal(27))).quantize(1, context=_context))
elif isinstance(value, int):
# assert(value >= 0)
self.value = value
else:
raise ArithmeticError
@classmethod
def from_number(cls, number):
# assert(number >= 0)
pwr = Decimal(10) ** 18
dec = Decimal(str(number)) * pwr
return Wad(int(dec.quantize(1, context=_context)))
def __repr__(self):
return "Wad(" + str(self.value) + ")"
def __str__(self):
tmp = str(self.value).zfill(19)
return (tmp[0:len(tmp)-18] + "." + tmp[len(tmp)-18:len(tmp)]).replace("-.", "-0.")
def __add__(self, other):
if isinstance(other, Wad):
return Wad(self.value + other.value)
else:
raise ArithmeticError
def __sub__(self, other):
if isinstance(other, Wad):
return Wad(self.value - other.value)
else:
raise ArithmeticError
def __mod__(self, other):
if isinstance(other, Wad):
return Wad(self.value % other.value)
else:
raise ArithmeticError
# z = cast((uint256(x) * y + WAD / 2) / WAD);
def __mul__(self, other):
if isinstance(other, Wad):
result = Decimal(self.value) * Decimal(other.value) / (Decimal(10) ** Decimal(18))
return Wad(int(result.quantize(1, context=_context)))
elif isinstance(other, Ray):
result = Decimal(self.value) * Decimal(other.value) / (Decimal(10) ** Decimal(27))
return Wad(int(result.quantize(1, context=_context)))
elif isinstance(other, Rad):
result = Decimal(self.value) * Decimal(other.value) / (Decimal(10) ** Decimal(45))
return Wad(int(result.quantize(1, context=_context)))
elif isinstance(other, int):
return Wad(int((Decimal(self.value) * Decimal(other)).quantize(1, context=_context)))
else:
raise ArithmeticError
def __truediv__(self, other):
if isinstance(other, Wad):
return Wad(int((Decimal(self.value) * (Decimal(10) ** Decimal(18)) / Decimal(other.value)).quantize(1, context=_context)))
else:
raise ArithmeticError
def __abs__(self):
return Wad(abs(self.value))
def __eq__(self, other):
if isinstance(other, Wad):
return self.value == other.value
else:
raise ArithmeticError
def __hash__(self):
return hash(self.value)
def __lt__(self, other):
if isinstance(other, Wad):
return self.value < other.value
else:
raise ArithmeticError
def __int__(self):
return int(self.value / 10**18)
def __float__(self):
return self.value / 10**18
def __round__(self, ndigits: int = 0):
return Wad(round(self.value, -18 + ndigits))
def __sqrt__(self):
return Wad.from_number(math.sqrt(self.__float__()))
@staticmethod
def min(*args):
"""Returns the lower of the Wad values"""
return reduce(lambda x, y: x if x < y else y, args[1:], args[0])
@staticmethod
def max(*args):
"""Returns the higher of the Wad values"""
return reduce(lambda x, y: x if x > y else y, args[1:], args[0])
@total_ordering
class Ray:
"""Represents a number with 27 decimal places.
`Ray` implements comparison, addition, subtraction, multiplication and division operators. Comparison, addition,
subtraction and division only work with other instances of `Ray`. Multiplication works with instances
of `Ray` and `Wad` and also with `int` numbers. The result of multiplication is always a `Ray`.
`Ray`, along with `Wad`, are the two basic numeric types used by Maker contracts.
Notes:
The internal representation of `Ray` is an unbounded integer, the last 27 digits of it being treated
as decimal places. It is similar to the representation used in Maker contracts (`uint128`).
"""
def __init__(self, value):
"""Creates a new Ray number.
Args:
value: an instance of `Ray`, `Wad` or an integer. In case of an integer, the internal representation
of Maker contracts is used which means that passing `1` will create an instance of `Ray`
with a value of `0.000000000000000000000000001'.
"""
if isinstance(value, Ray):
self.value = value.value
elif isinstance(value, Wad):
self.value = int((Decimal(value.value) * (Decimal(10)**Decimal(9))).quantize(1, context=_context))
elif isinstance(value, Rad):
self.value = int((Decimal(value.value) / (Decimal(10)**Decimal(18))).quantize(1, context=_context))
elif isinstance(value, int):
# assert(value >= 0)
self.value = value
else:
raise ArithmeticError
@classmethod
def from_number(cls, number):
# assert(number >= 0)
pwr = Decimal(10) ** 27
dec = Decimal(str(number)) * pwr
return Ray(int(dec.quantize(1, context=_context)))
def __repr__(self):
return "Ray(" + str(self.value) + ")"
def __str__(self):
tmp = str(self.value).zfill(28)
return (tmp[0:len(tmp)-27] + "." + tmp[len(tmp)-27:len(tmp)]).replace("-.", "-0.")
def __add__(self, other):
if isinstance(other, Ray):
return Ray(self.value + other.value)
else:
raise ArithmeticError
def __sub__(self, other):
if isinstance(other, Ray):
return Ray(self.value - other.value)
else:
raise ArithmeticError
def __mod__(self, other):
if isinstance(other, Ray):
return Ray(self.value % other.value)
else:
raise ArithmeticError
def __mul__(self, other):
if isinstance(other, Ray):
result = Decimal(self.value) * Decimal(other.value) / (Decimal(10) ** Decimal(27))
return Ray(int(result.quantize(1, context=_context)))
elif isinstance(other, Wad):
result = Decimal(self.value) * Decimal(other.value) / (Decimal(10) ** Decimal(18))
return Ray(int(result.quantize(1, context=_context)))
elif isinstance(other, Rad):
result = Decimal(self.value) * Decimal(other.value) / (Decimal(10) ** Decimal(45))
return Ray(int(result.quantize(1, context=_context)))
elif isinstance(other, int):
return Ray(int((Decimal(self.value) * Decimal(other)).quantize(1, context=_context)))
else:
raise ArithmeticError
def __truediv__(self, other):
if isinstance(other, Ray):
return Ray(int((Decimal(self.value) * (Decimal(10) ** Decimal(27)) / Decimal(other.value)).quantize(1, context=_context)))
else:
raise ArithmeticError
def __abs__(self):
return Ray(abs(self.value))
def __eq__(self, other):
if isinstance(other, Ray):
return self.value == other.value
else:
raise ArithmeticError
def __hash__(self):
return hash(self.value)
def __lt__(self, other):
if isinstance(other, Ray):
return self.value < other.value
else:
raise ArithmeticError
def __int__(self):
return int(self.value / 10**27)
def __float__(self):
return self.value / 10**27
def __round__(self, ndigits: int = 0):
return Ray(round(self.value, -27 + ndigits))
def __sqrt__(self):
return Ray.from_number(math.sqrt(self.__float__()))
@staticmethod
def min(*args):
"""Returns the lower of the Ray values"""
return reduce(lambda x, y: x if x < y else y, args[1:], args[0])
@staticmethod
def max(*args):
"""Returns the higher of the Ray values"""
return reduce(lambda x, y: x if x > y else y, args[1:], args[0])
@total_ordering
class Rad:
"""Represents a number with 45 decimal places.
`Rad` implements comparison, addition, subtraction, multiplication and division operators. Comparison, addition,
subtraction and division only work with other instances of `Rad`. Multiplication works with instances
of `Rad`, `Ray and `Wad` and also with `int` numbers. The result of multiplication is always a `Rad`.
`Rad` is rad is a new unit that exists to prevent precision loss in the core CDP engine of MCD.
Notes:
The internal representation of `Rad` is an unbounded integer, the last 45 digits of it being treated
as decimal places.
"""
def __init__(self, value):
"""Creates a new Rad number.
Args:
value: an instance of `Rad`, `Ray`, `Wad` or an integer. In case of an integer, the internal representation
of Maker contracts is used which means that passing `1` will create an instance of `Rad`
with a value of `0.000000000000000000000000000000000000000000001'.
"""
if isinstance(value, Rad):
self.value = value.value
elif isinstance(value, Ray):
self.value = int((Decimal(value.value) * (Decimal(10)**Decimal(18))).quantize(1, context=_context))
elif isinstance(value, Wad):
self.value = int((Decimal(value.value) * (Decimal(10)**Decimal(27))).quantize(1, context=_context))
elif isinstance(value, int):
# assert(value >= 0)
self.value = value
else:
raise ArithmeticError
@classmethod
def from_number(cls, number):
# assert(number >= 0)
pwr = Decimal(10) ** 45
dec = Decimal(str(number)) * pwr
return Rad(int(dec.quantize(1, context=_context)))
def __repr__(self):
return "Rad(" + str(self.value) + ")"
def __str__(self):
tmp = str(self.value).zfill(46)
return (tmp[0:len(tmp)-45] + "." + tmp[len(tmp)-45:len(tmp)]).replace("-.", "-0.")
def __add__(self, other):
if isinstance(other, Rad):
return Rad(self.value + other.value)
else:
raise ArithmeticError
def __sub__(self, other):
if isinstance(other, Rad):
return Rad(self.value - other.value)
else:
raise ArithmeticError
def __mod__(self, other):
if isinstance(other, Rad):
return Rad(self.value % other.value)
else:
raise ArithmeticError
def __mul__(self, other):
if isinstance(other, Rad):
result = Decimal(self.value) * Decimal(other.value) / (Decimal(10) ** Decimal(45))
return Rad(int(result.quantize(1, context=_context)))
elif isinstance(other, Ray):
result = Decimal(self.value) * Decimal(other.value) / (Decimal(10) ** Decimal(27))
return Rad(int(result.quantize(1, context=_context)))
elif isinstance(other, Wad):
result = Decimal(self.value) * Decimal(other.value) / (Decimal(10) ** Decimal(18))
return Rad(int(result.quantize(1, context=_context)))
elif isinstance(other, int):
return Rad(int((Decimal(self.value) * Decimal(other)).quantize(1, context=_context)))
else:
raise ArithmeticError
def __truediv__(self, other):
if isinstance(other, Rad):
return Rad(int((Decimal(self.value) * (Decimal(10) ** Decimal(45)) / Decimal(other.value)).quantize(1, context=_context)))
else:
raise ArithmeticError
def __abs__(self):
return Rad(abs(self.value))
def __eq__(self, other):
if isinstance(other, Rad):
return self.value == other.value
else:
raise ArithmeticError
def __hash__(self):
return hash(self.value)
def __lt__(self, other):
if isinstance(other, Rad):
return self.value < other.value
else:
raise ArithmeticError
def __int__(self):
return int(self.value / 10**45)
def __float__(self):
return self.value / 10**45
def __round__(self, ndigits: int = 0):
return Rad(round(self.value, -45 + ndigits))
def __sqrt__(self):
return Rad.from_number(math.sqrt(self.__float__()))
@staticmethod
def min(*args):
"""Returns the lower of the Rad values"""
return reduce(lambda x, y: x if x < y else y, args[1:], args[0])
@staticmethod
def max(*args):
"""Returns the higher of the Rad values"""
return reduce(lambda x, y: x if x > y else y, args[1:], args[0])
| agpl-3.0 | 5,257,306,446,090,939,000 | 35.25 | 134 | 0.595581 | false | 3.956291 | false | false | false |
ideascube/ideascube | ideascube/management/utils.py | 1 | 1747 | from django.utils.termcolors import colorize
class Reporter:
"""Store reports and render them on demand."""
ERROR = 1
WARNING = 2
NOTICE = 3
LEVEL_LABEL = {
ERROR: 'errors',
WARNING: 'warnings',
NOTICE: 'notices',
}
def __init__(self, verbosity):
self.verbosity = verbosity
self._reports = {
self.ERROR: {},
self.WARNING: {},
self.NOTICE: {}
}
def compile(self):
lines = []
def write(text, **kwargs):
lines.append(colorize(text=text, **kwargs))
if self._reports:
write('{space}Reports{space}'.format(space=' '*32), bg='blue',
fg='white')
for level, reports in self._reports.items():
if reports:
write(self.LEVEL_LABEL[level].title())
for msg, data in reports.items():
write('- {} ({})'.format(msg, len(data)))
if self.verbosity >= level:
for item in data:
fg = 'red' if level == self.ERROR else 'white'
write(' . {}'.format(item), fg=fg)
return lines
def __str__(self):
return '\n'.join(self.compile())
def _report(self, level, msg, data):
self._reports[level].setdefault(msg, [])
self._reports[level][msg].append(data)
def error(self, msg, data):
self._report(self.ERROR, msg, data)
def warning(self, msg, data):
self._report(self.WARNING, msg, data)
def notice(self, msg, data):
self._report(self.NOTICE, msg, data)
def has_errors(self):
return bool(self._reports[self.ERROR])
| agpl-3.0 | 1,777,486,645,642,965,800 | 27.639344 | 74 | 0.503721 | false | 4.169451 | false | false | false |
dotslash/MiniProjects | SecSharing/revealSecret.py | 1 | 1164 | def Bfunc(number,k):
if( (number & (1<<k)) ==0 ): return False
return True
def gcd2(num1,num2):
n1 = max(num1,num2)
n2 = min(num1,num2)
while n2!=0:
n = n2
n2 = n1%n2
n1 = n
return n1
def gcdn(num_lst):
if(len(num_lst)==0): return 0
if(len(num_lst)==1): return num_lst[0]
start = gcd2(num_lst[0],num_lst[1])
for num in num_lst[2:]:
start = gcd2(num,start)
return start
def revealPrimes(secrets):
num_users = len(secrets)
tmp = []
for i in xrange(1,1<<num_users):
tmp.append((bin(i).count("1"),i))
tmp.sort()
tmp.reverse()
primes = []
for numb in tmp:
set_id = numb[1]
numbers_in_set = []
for i in xrange(1,num_users+1):
if(Bfunc(set_id,num_users-i)):
numbers_in_set.append(i)
#print set_id,numbers_in_set
tmp_lst = []
for num in numbers_in_set:
tmp_lst.append(secrets[num-1])
gcd_nums = gcdn(tmp_lst)
primes.append(gcd_nums)
for num in numbers_in_set:
secrets[num-1] = secrets[num-1]/gcd_nums
return primes
| mit | -7,619,012,290,567,015,000 | 24.304348 | 52 | 0.524914 | false | 2.852941 | false | false | false |
jwodder/headerparser | src/headerparser/scanner.py | 1 | 9222 | import re
from warnings import warn
from .errors import MalformedHeaderError, UnexpectedFoldingError
from .util import ascii_splitlines
def scan_string(s, **kwargs):
"""
Scan a string for RFC 822-style header fields and return a generator of
``(name, value)`` pairs for each header field in the input, plus a ``(None,
body)`` pair representing the body (if any) after the header section.
See `scan()` for more information on the exact behavior of the scanner.
:param s: a string which will be broken into lines on CR, LF, and CR LF
boundaries and passed to `scan()`
:param kwargs: :ref:`scanner options <scan_opts>`
:rtype: generator of pairs of strings
:raises ScannerError: if the header section is malformed
"""
return scan(ascii_splitlines(s), **kwargs)
def scan_file(fp, **kwargs):
"""
Scan a file for RFC 822-style header fields and return a generator of
``(name, value)`` pairs for each header field in the input, plus a ``(None,
body)`` pair representing the body (if any) after the header section.
See `scan()` for more information on the exact behavior of the scanner.
.. deprecated:: 0.4.0
Use `scan()` instead.
:param fp: A file-like object than can be iterated over to produce lines to
pass to `scan()`. Opening the file in universal newlines mode is
recommended.
:param kwargs: :ref:`scanner options <scan_opts>`
:rtype: generator of pairs of strings
:raises ScannerError: if the header section is malformed
"""
warn("scan_file() is deprecated. Use scan() instead.", DeprecationWarning)
return scan(fp, **kwargs)
def scan_lines(fp, **kwargs):
"""
Scan an iterable of lines for RFC 822-style header fields and return a
generator of ``(name, value)`` pairs for each header field in the input,
plus a ``(None, body)`` pair representing the body (if any) after the
header section.
See `scan()` for more information on the exact behavior of the scanner.
.. deprecated:: 0.4.0
Use `scan()` instead.
:param iterable: an iterable of strings representing lines of input
:param kwargs: :ref:`scanner options <scan_opts>`
:rtype: generator of pairs of strings
:raises ScannerError: if the header section is malformed
"""
warn("scan_lines() is deprecated. Use scan() instead.", DeprecationWarning)
return scan(fp, **kwargs)
def scan(iterable, **kwargs):
"""
.. versionadded:: 0.4.0
Scan a text-file-like object or iterable of lines for RFC 822-style header
fields and return a generator of ``(name, value)`` pairs for each header
field in the input, plus a ``(None, body)`` pair representing the body (if
any) after the header section.
All lines after the first blank line are concatenated & yielded as-is in a
``(None, body)`` pair. (Note that body lines which do not end with a line
terminator will not have one appended.) If there is no empty line in
``iterable``, then no body pair is yielded. If the empty line is the last
line in ``iterable``, the body will be the empty string. If the empty line
is the *first* line in ``iterable`` and the ``skip_leading_newlines``
option is false (the default), then all other lines will be treated as part
of the body and will not be scanned for header fields.
:param iterable: a text-file-like object or iterable of strings
representing lines of input
:param kwargs: :ref:`scanner options <scan_opts>`
:rtype: generator of pairs of strings
:raises ScannerError: if the header section is malformed
"""
lineiter = iter(iterable)
for name, value in _scan_next_stanza(lineiter, **kwargs):
if name is not None:
yield (name, value)
elif value:
yield (None, "".join(lineiter))
def scan_next_stanza(iterator, **kwargs):
"""
.. versionadded:: 0.4.0
Scan a text-file-like object or iterator of lines for RFC 822-style header
fields and return a generator of ``(name, value)`` pairs for each header
field in the input. Input processing stops as soon as a blank line is
encountered, leaving the rest of the iterator unconsumed (If
``skip_leading_newlines`` is true, the function only stops on a blank line
after a non-blank line).
:param iterator: a text-file-like object or iterator of strings
representing lines of input
:param kwargs: :ref:`scanner options <scan_opts>`
:rtype: generator of pairs of strings
:raises ScannerError: if the header section is malformed
"""
for name, value in _scan_next_stanza(iterator, **kwargs):
if name is not None:
yield (name, value)
def _scan_next_stanza(
iterator,
separator_regex=re.compile(r"[ \t]*:[ \t]*"), # noqa: B008
skip_leading_newlines=False,
):
"""
.. versionadded:: 0.4.0
Like `scan_next_stanza()`, except it additionally yields as its last item a
``(None, flag)`` pair where ``flag`` is `True` iff the stanza was
terminated by a blank line (thereby suggesting there is more input left to
process), `False` iff the stanza was terminated by EOF.
This is the core function that all other scanners ultimately call.
"""
name = None
value = ""
begun = False
more_left = False
if not hasattr(separator_regex, "match"):
separator_regex = re.compile(separator_regex)
for line in iterator:
line = line.rstrip("\r\n")
if line.startswith((" ", "\t")):
begun = True
if name is not None:
value += "\n" + line
else:
raise UnexpectedFoldingError(line)
else:
m = separator_regex.search(line)
if m:
begun = True
if name is not None:
yield (name, value)
name = line[: m.start()]
value = line[m.end() :]
elif line == "":
if skip_leading_newlines and not begun:
continue
else:
more_left = True
break
else:
raise MalformedHeaderError(line)
if name is not None:
yield (name, value)
yield (None, more_left)
def scan_next_stanza_string(s, **kwargs):
"""
.. versionadded:: 0.4.0
Scan a string for RFC 822-style header fields and return a pair ``(fields,
extra)`` where ``fields`` is a list of ``(name, value)`` pairs for each
header field in the input up to the first blank line and ``extra`` is
everything after the first blank line (If ``skip_leading_newlines`` is
true, the dividing point is instead the first blank line after a non-blank
line); if there is no appropriate blank line in the input, ``extra`` is the
empty string.
:param s: a string to scan
:param kwargs: :ref:`scanner options <scan_opts>`
:rtype: pair of a list of pairs of strings and a string
:raises ScannerError: if the header section is malformed
"""
lineiter = iter(ascii_splitlines(s))
fields = list(scan_next_stanza(lineiter, **kwargs))
body = "".join(lineiter)
return (fields, body)
def scan_stanzas(iterable, **kwargs):
"""
.. versionadded:: 0.4.0
Scan a text-file-like object or iterable of lines for zero or more stanzas
of RFC 822-style header fields and return a generator of lists of ``(name,
value)`` pairs, where each list represents a stanza of header fields in the
input.
The stanzas are terminated by blank lines. Consecutive blank lines between
stanzas are treated as a single blank line. Blank lines at the end of the
input are discarded without creating a new stanza.
:param iterable: a text-file-like object or iterable of strings
representing lines of input
:param kwargs: :ref:`scanner options <scan_opts>`
:rtype: generator of lists of pairs of strings
:raises ScannerError: if the header section is malformed
"""
lineiter = iter(iterable)
while True:
fields = list(_scan_next_stanza(lineiter, **kwargs))
more_left = fields.pop()[1]
if fields or more_left:
yield fields
else:
break
kwargs["skip_leading_newlines"] = True
def scan_stanzas_string(s, **kwargs):
"""
.. versionadded:: 0.4.0
Scan a string for zero or more stanzas of RFC 822-style header fields and
return a generator of lists of ``(name, value)`` pairs, where each list
represents a stanza of header fields in the input.
The stanzas are terminated by blank lines. Consecutive blank lines between
stanzas are treated as a single blank line. Blank lines at the end of the
input are discarded without creating a new stanza.
:param s: a string which will be broken into lines on CR, LF, and CR LF
boundaries and passed to `scan_stanzas()`
:param kwargs: :ref:`scanner options <scan_opts>`
:rtype: generator of lists of pairs of strings
:raises ScannerError: if the header section is malformed
"""
return scan_stanzas(ascii_splitlines(s), **kwargs)
| mit | 2,834,413,941,573,353,000 | 37.107438 | 80 | 0.650727 | false | 4.161552 | false | false | false |
rasmuse/graph-prov-test | gpc/fsdb.py | 1 | 2890 | import os
import json
import sqlite3
import functools
import itertools
import shutil
from os.path import abspath
import logging
from gpc import hexdigest
logger = logging.getLogger(__name__)
class DatabaseError(Exception): pass
class Database(object):
"""Class to interact with the plain text-backed sqlite database."""
def __init__(self, path):
"""Open a database"""
super(Database, self).__init__()
path = abspath(path)
schema_script = Database._get_schema_script(path)
self._data_dir = Database._get_data_dir(path)
def data_statements():
for file in os.listdir(self._data_dir):
stmt_path = os.path.join(self._data_dir, file)
with open(stmt_path, 'r') as f:
sql = f.read()
yield sql
self._conn = sqlite3.connect(':memory:')
with self._conn as conn:
conn.executescript(schema_script)
for stmt in data_statements():
conn.execute(stmt)
@staticmethod
def _get_data_dir(path):
return abspath(os.path.join(path, 'data'))
@staticmethod
def _get_schema_script(path):
with open(Database._get_schema_path(path), 'r') as f:
return f.read()
@staticmethod
def _get_schema_path(path):
return abspath(os.path.join(path, 'schema.sql'))
def write(self):
statements = self._conn.iterdump()
def should_be_saved(stmt):
return stmt.startswith('INSERT')
for stmt in filter(should_be_saved, statements):
digest = hexdigest(stmt)
path = os.path.join(self._data_dir, digest)
if not os.path.exists(path):
with open(path, 'w') as file:
file.write(stmt)
file.write('\n')
def __enter__(self):
return self._conn.__enter__()
def __exit__(self, *args, **kwargs):
self._conn.__exit__(*args, **kwargs)
def execute(self, *args, **kwargs):
return self._conn.execute(*args, **kwargs)
def executemany(self, *args, **kwargs):
return self._conn.executemany(*args, **kwargs)
def executescript(self, *args, **kwargs):
return self._conn.executescript(*args, **kwargs)
@classmethod
def create(cls, path, schema):
"""
Create a new database
Raises:
DatabaseError: If path exists.
"""
path = abspath(path)
if os.path.exists(path):
raise DatabaseError('Path must not exist when creating database!')
os.makedirs(Database._get_data_dir(path))
with open(Database._get_schema_path(path), 'w') as f:
f.write(schema)
# Test it
try:
db = Database(path)
except Exception as e:
shutil.rmtree(path)
raise e
| lgpl-3.0 | -3,387,949,105,836,521,000 | 26.009346 | 78 | 0.567128 | false | 4.081921 | false | false | false |
ankanch/tieba-zhuaqu | tieba-zhuaqu/linux-dist/old_dist/tieba-title_old.py | 1 | 5182 | #coding=utf-8
import urllib.request
import re
import os
import sys
import threading
import datetime
import pickle
import time
import MailService
begURL = 'http://tieba.baidu.com/f?'
PATH_DOWNLOAD_CACHE = sys.path[0]+'\\dlcache\\'
GV_DOWNLOAD_ALL = []
GV_THEAD_COUNT = 4
page = 0
x=0
max_page = 0
sum = 0
pocessList=[]
def setupfiles():
if os.path.exists('result.txt') == False:
f = open('result.txt','w')
if os.path.exists('result_add') == False:
f = open('result_add','w')
def getHtml(url):
page = urllib.request.urlopen(url)
html = page.read()
return html
def getTitle(html):
# <a href="/p/4745088342" title="DDD" target="_blank" class="j_th_tit ">DDDD</a>
reg = r"<a href=\"/p/.*?class=\"j_th_tit \">.*?</a>"
imgre = re.compile(reg)
titlelist = re.findall(imgre,html)
t=1
dstr = '\r\n\t\t'
for dta in titlelist:
k = re.sub("<a href=\"/p/.*?class=\"j_th_tit \">","",dta)
k = re.sub("</a>","",k)
#print('\t',k.encode('utf-8'))
dstr = dstr + '\r\n\t\t' + k
t+=1
return t,dstr
def savetofile(data,path):
f = open(path,'wb')
f.write(data.encode('gb18030'))
f.close()
def downloadPage(psum,count,beg=0):
x=beg
page = x*50
GV_DOWNLOAD_ALL.append(False)
while x < psum:
#os.system('cls')
print('>>>>>thead '+str(count)+':now downloading page[',str(x + 1)+'/'+str(psum),']')
html = getHtml(begURL + str(page))
pocessList.append(html)
x += 1
page +=50
print('[thead'+str(count)+']<<<<<All pages downloaded!')
GV_DOWNLOAD_ALL[count-1] = True
def pocessDataList(GV_COUNT):
titlesum = 0
titlelist = ''
count = 0
dstr = '0x0'
m = 0
NO_OUT = True
while NO_OUT:
if( len(pocessList) > 0 ):
count += 1
print('>>>>>now pocess page[',count,'],------[',titlesum,']pieces of data in all')
m , dstr= getTitle(pocessList[0].decode('utf-8','ignore'))
del pocessList[0]
titlelist += dstr
titlesum += m
x = 0
for item in GV_DOWNLOAD_ALL:
if item == True:
x += 1
if x == GV_COUNT:
NO_OUT = False
break
return titlesum,titlelist
setupfiles()
os.system('clear')
print('>>>>> This script used to download data from Tieba\n>>>>>by Kanch [email protected]')
isize = os.path.getsize('result.txt')
if isize > 10:
f = open('result_add','rb')
xs = pickle.load(f)
f.close()
print('>>>>>data dectecrd\n\t>>>size:'+str(isize)+' bytes,with '+str(xs['sum'])+' pieeces of data,created on:'+str(xs['time']) +'\n')
opt = input('\r\n>>>>>Would you like to set the Tieba with script going to collect?(if not,script will collect CUIT ba)(Y/N):____\b\b')
if opt == 'Y':
tieba_name = input('>>>>>please enter the name you wish to collect:______________________\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b')
print('>>>>>script will collect [SET NO SHOW ]!')
else:
tieba_name = '成都信息工程大学'
print('>>>>>no settleed Tieba,collect CUIT defaultly')
KWD = urllib.parse.urlencode({'kw':tieba_name})
begURL = begURL + KWD + '&ie=utf-8&pn='
max_page = input('>>>>>how many page you wish to collect?:______\b\b\b\b\b')
TC = input('how many theads you\'d like to run?____\b\b\b')
GV_THEAD_COUNT = int(TC)
mstr = "============================================================\r\nRESULT\r\n============================================================="
createdtime = datetime.datetime.now()
createdtime.strftime('%Y-%m-%d %H:%M:%S')
time1 = time.time()
#下面是多线程方案
MAX_PAGE = int(max_page)
#创建线程
t = []
x = 0
deltaX = MAX_PAGE / GV_THEAD_COUNT
BEG = 0
END = deltaX
while x < GV_THEAD_COUNT:
tn = threading.Thread(target=downloadPage,args=(int(END),x+1,int(BEG),))
t.append(tn)
x += 1
BEG += deltaX
END += deltaX
for item in t:
item.setDaemon(True)
item.start()
#循环处理数据
sum,mstr = pocessDataList(GV_THEAD_COUNT)
#===================================全部处理完毕,储存至文件======================================
now = datetime.datetime.now()
now.strftime('%Y-%m-%d %H:%M:%S')
last_data_source = {'sum':sum,'time':now}
savetofile(mstr,'result.txt')
f = open('result_add','wb')
pickle.dump(last_data_source, f,2)
f.close()
time2 = time.time()
tc = time2 - time1
print('>>>>>Collect Success,total time cost:',str(tc),'sec\n>>>>>total data collect[',sum,']\n>>>>>result save to ','result.txt')
Title = "Download Success! Finised on " + str(now) + '.'
line1 = "Tieba job created on " + str(createdtime) + " now has been finised!\r\n=========================\r\nSummary\r\n\r\n"
line2 = "\r\nJob Created on: \t"+str(createdtime)+'\r\nJob finished on: \t'+str(now) +"\r\nPieces of data retrived: " + str(sum) +"\r\nTotal time cost: \t" + str(tc) + " seconds"
line3 = "\r\n\r\n\r\n This mail is send by Kanch's PythonBot @ 216.45.55.153\r\n=========================\r\n"
Content = line1 + line2 + line3
#print(Title,'\r\n',Content)
MailService.SendMail('[email protected]',Title,Content) | gpl-3.0 | 6,584,390,364,780,618,000 | 30.91875 | 180 | 0.555229 | false | 2.794745 | false | false | false |
kidaa/pySDC | pySDC/sweeper_classes/generic_LU.py | 1 | 4411 | import scipy.linalg as LA
import numpy as np
from pySDC.Sweeper import sweeper
class generic_LU(sweeper):
"""
Custom sweeper class, implements Sweeper.py
LU sweeper using LU decomposition of the Q matrix for the base integrator
Attributes:
Qd: U^T of Q^T = L*U
"""
def __init__(self,params):
"""
Initialization routine for the custom sweeper
Args:
coll: collocation object
"""
# call parent's initialization routine
super(generic_LU,self).__init__(params)
# LU integration matrix
self.Qd = self.__get_Qd()
pass
def __get_Qd(self):
"""
Compute LU decomposition of Q^T
Returns:
Qd: U^T of Q^T = L*U
"""
# strip Qmat by initial value u0
QT = self.coll.Qmat[1:,1:].T
# do LU decomposition of QT
[P,L,U] = LA.lu(QT,overwrite_a=True)
# enrich QT by initial value u0
Qd = np.zeros(np.shape(self.coll.Qmat))
Qd[1:,1:] = U.T
return Qd
def integrate(self):
"""
Integrates the right-hand side
Returns:
list of dtype_u: containing the integral as values
"""
# get current level and problem description
L = self.level
P = L.prob
me = []
# integrate RHS over all collocation nodes
for m in range(1,self.coll.num_nodes+1):
# new instance of dtype_u, initialize values with 0
me.append(P.dtype_u(P.init,val=0))
for j in range(1,self.coll.num_nodes+1):
me[-1] += L.dt*self.coll.Qmat[m,j]*L.f[j]
return me
def update_nodes(self):
"""
Update the u- and f-values at the collocation nodes -> corresponds to a single sweep over all nodes
Returns:
None
"""
# get current level and problem description
L = self.level
P = L.prob
# only if the level has been touched before
assert L.status.unlocked
# get number of collocation nodes for easier access
M = self.coll.num_nodes
# gather all terms which are known already (e.g. from the previous iteration)
# this corresponds to u0 + QF(u^k) - QdF(u^k) + tau
# get QF(u^k)
integral = self.integrate()
for m in range(M):
# get -QdF(u^k)_m
for j in range(M+1):
integral[m] -= L.dt*self.Qd[m+1,j]*L.f[j]
# add initial value
integral[m] += L.u[0]
# add tau if associated
if L.tau is not None:
integral[m] += L.tau[m]
# do the sweep
for m in range(0,M):
# build rhs, consisting of the known values from above and new values from previous nodes (at k+1)
rhs = P.dtype_u(integral[m])
for j in range(m+1):
rhs += L.dt*self.Qd[m+1,j]*L.f[j]
# implicit solve with prefactor stemming from the diagonal of Qd
L.u[m+1] = P.solve_system(rhs,L.dt*self.Qd[m+1,m+1],L.u[m+1],L.time+L.dt*self.coll.nodes[m])
# update function values
L.f[m+1] = P.eval_f(L.u[m+1],L.time+L.dt*self.coll.nodes[m])
# indicate presence of new values at this level
L.status.updated = True
return None
def compute_end_point(self):
"""
Compute u at the right point of the interval
The value uend computed here might be a simple copy from u[M] (if right point is a collocation node) or
a full evaluation of the Picard formulation (if right point is not a collocation node)
"""
# get current level and problem description
L = self.level
P = L.prob
# check if Mth node is equal to right point (flag is set in collocation class)
if self.coll.right_is_node:
# a copy is sufficient
L.uend = P.dtype_u(L.u[-1])
else:
# start with u0 and add integral over the full interval (using coll.weights)
L.uend = P.dtype_u(L.u[0])
for m in range(self.coll.num_nodes):
L.uend += L.dt*self.coll.weights[m]*L.f[m+1]
# add up tau correction of the full interval (last entry)
if L.tau is not None:
L.uend += L.tau[-1]
return None | bsd-2-clause | -4,815,010,697,354,567,000 | 28.026316 | 111 | 0.547495 | false | 3.577453 | false | false | false |
marcusbuffett/command-line-chess | src/main.py | 1 | 5340 | import random
import sys
from src.AI import AI
from src.Board import Board
from src.InputParser import InputParser
WHITE = True
BLACK = False
def askForPlayerSide():
playerChoiceInput = input(
"What side would you like to play as [wB]? ").lower()
if 'w' in playerChoiceInput:
print("You will play as white")
return WHITE
else:
print("You will play as black")
return BLACK
def askForDepthOfAI():
depthInput = 2
try:
depthInput = int(input("How deep should the AI look for moves?\n"
"Warning : values above 3 will be very slow."
" [2]? "))
except KeyboardInterrupt:
sys.exit()
except:
print("Invalid input, defaulting to 2")
return depthInput
def printCommandOptions():
undoOption = 'u : undo last move'
printLegalMovesOption = 'l : show all legal moves'
randomMoveOption = 'r : make a random move'
quitOption = 'quit : resign'
moveOption = 'a3, Nc3, Qxa2, etc : make the move'
options = [undoOption, printLegalMovesOption, randomMoveOption,
quitOption, moveOption, '', ]
print('\n'.join(options))
def printAllLegalMoves(board, parser):
for move in parser.getLegalMovesWithNotation(board.currentSide, short=True):
print(move.notation)
def getRandomMove(board, parser):
legalMoves = board.getAllMovesLegal(board.currentSide)
randomMove = random.choice(legalMoves)
randomMove.notation = parser.notationForMove(randomMove)
return randomMove
def makeMove(move, board):
print("Making move : " + move.notation)
board.makeMove(move)
def printPointAdvantage(board):
print("Currently, the point difference is : " +
str(board.getPointAdvantageOfSide(board.currentSide)))
def undoLastTwoMoves(board):
if len(board.history) >= 2:
board.undoLastMove()
board.undoLastMove()
def startGame(board, playerSide, ai):
parser = InputParser(board, playerSide)
while True:
print()
print(board)
print()
if board.isCheckmate():
if board.currentSide == playerSide:
print("Checkmate, you lost")
else:
print("Checkmate! You won!")
return
if board.isStalemate():
if board.currentSide == playerSide:
print("Stalemate")
else:
print("Stalemate")
return
if board.currentSide == playerSide:
# printPointAdvantage(board)
move = None
command = input("It's your move."
" Type '?' for options. ? ")
if command.lower() == 'u':
undoLastTwoMoves(board)
continue
elif command.lower() == '?':
printCommandOptions()
continue
elif command.lower() == 'l':
printAllLegalMoves(board, parser)
continue
elif command.lower() == 'r':
move = getRandomMove(board, parser)
elif command.lower() == 'exit' or command.lower() == 'quit':
return
try:
move = parser.parse(command)
except ValueError as error:
print("%s" % error)
continue
makeMove(move, board)
else:
print("AI thinking...")
move = ai.getBestMove()
move.notation = parser.notationForMove(move)
makeMove(move, board)
def twoPlayerGame(board):
parserWhite = InputParser(board, WHITE)
parserBlack = InputParser(board, BLACK)
while True:
print()
print(board)
print()
if board.isCheckmate():
print("Checkmate")
return
if board.isStalemate():
print("Stalemate")
return
# printPointAdvantage(board)
if board.currentSide == WHITE:
parser = parserWhite
else:
parser = parserBlack
move = None
command = input("It's your move, {}.".format(board.currentSideRep()) + \
" Type '?' for options. ? ")
if command.lower() == 'u':
undoLastTwoMoves(board)
continue
elif command.lower() == '?':
printCommandOptions()
continue
elif command.lower() == 'l':
printAllLegalMoves(board, parser)
continue
elif command.lower() == 'r':
move = getRandomMove(board, parser)
elif command.lower() == 'exit' or command.lower() == 'quit':
return
try:
move = parser.parse(command)
except ValueError as error:
print("%s" % error)
continue
makeMove(move, board)
board = Board()
def main():
try:
if len(sys.argv) >= 2 and sys.argv[1] == "--two":
twoPlayerGame(board)
else:
playerSide = askForPlayerSide()
print()
aiDepth = askForDepthOfAI()
opponentAI = AI(board, not playerSide, aiDepth)
startGame(board, playerSide, opponentAI)
except KeyboardInterrupt:
sys.exit()
if __name__ == "__main__":
main()
| mit | -6,009,297,712,481,852,000 | 27.709677 | 80 | 0.551124 | false | 4.21468 | false | false | false |
openstack/monasca-thresh | docker/kafka_wait_for_topics.py | 2 | 5029 | #!/usr/bin/env python
# coding=utf-8
# (C) Copyright 2017 Hewlett Packard Enterprise Development LP
# (C) Copyright 2018 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Wait for specific Kafka topics.
For using this script you need to set two environment variables:
* `KAFKA_URI` for connection string to Kafka together with port.
Example: `kafka:9092`, `192.168.10.6:9092`.
* `KAFKA_WAIT_FOR_TOPICS` that contain topics that should exist in Kafka
to consider it's working. Many topics should be separated with comma.
Example: `retry-notifications,alarm-state-transitions`.
After making sure that this environment variables are set you can simply
execute this script in the following way:
`python3 kafka_wait_for_topics.py && ./start_service.sh`
`python3 kafka_wait_for_topics.py || exit 1`
Additional environment variables available are:
* `LOG_LEVEL` - default to `INFO`
* `KAFKA_WAIT_RETRIES` - number of retries, default to `24`
* `KAFKA_WAIT_INTERVAL` - in seconds, default to `5`
"""
import logging
import os
import sys
import time
from pykafka import KafkaClient
from pykafka.exceptions import NoBrokersAvailableError
# Run this script only with Python 3
if sys.version_info.major != 3:
sys.stdout.write("Sorry, requires Python 3.x\n")
sys.exit(1)
LOG_LEVEL = logging.getLevelName(os.environ.get('LOG_LEVEL', 'INFO'))
logging.basicConfig(level=LOG_LEVEL)
logger = logging.getLogger(__name__)
KAFKA_HOSTS = os.environ.get('KAFKA_URI', 'kafka:9092')
REQUIRED_TOPICS = os.environ.get('KAFKA_WAIT_FOR_TOPICS', '') \
.encode('utf-8').split(b',')
KAFKA_WAIT_RETRIES = int(os.environ.get('KAFKA_WAIT_RETRIES', '24'))
KAFKA_WAIT_INTERVAL = int(os.environ.get('KAFKA_WAIT_INTERVAL', '5'))
class TopicNoPartition(Exception):
"""Raise when topic has no partitions."""
class TopicNotFound(Exception):
"""Raise when topic was not found."""
def retry(retries=KAFKA_WAIT_RETRIES, delay=KAFKA_WAIT_INTERVAL,
check_exceptions=()):
"""Retry decorator."""
def decorator(func):
"""Decorator."""
def f_retry(*args, **kwargs):
"""Retry running function on exception after delay."""
for i in range(1, retries + 1):
try:
return func(*args, **kwargs)
# pylint: disable=W0703
# We want to catch all exceptions here to retry.
except check_exceptions + (Exception,) as exc:
if i < retries:
logger.info('Connection attempt %d of %d failed',
i, retries)
if isinstance(exc, check_exceptions):
logger.debug('Caught known exception, retrying...',
exc_info=True)
else:
logger.warn(
'Caught unknown exception, retrying...',
exc_info=True)
else:
logger.exception('Failed after %d attempts', retries)
raise
# No exception so wait before retrying
time.sleep(delay)
return f_retry
return decorator
@retry(check_exceptions=(TopicNoPartition, TopicNotFound))
def check_topics(client, req_topics):
"""Check for existence of provided topics in Kafka."""
client.update_cluster()
logger.debug('Found topics: %r', client.topics.keys())
for req_topic in req_topics:
if req_topic not in client.topics.keys():
err_topic_not_found = 'Topic not found: {}'.format(req_topic)
logger.warning(err_topic_not_found)
raise TopicNotFound(err_topic_not_found)
topic = client.topics[req_topic]
if not topic.partitions:
err_topic_no_part = 'Topic has no partitions: {}'.format(req_topic)
logger.warning(err_topic_no_part)
raise TopicNoPartition(err_topic_no_part)
logger.info('Topic is ready: %s', req_topic)
@retry(check_exceptions=(NoBrokersAvailableError,))
def connect_kafka(hosts):
"""Connect to Kafka with retries."""
return KafkaClient(hosts=hosts)
def main():
"""Start main part of the wait script."""
logger.info('Checking for available topics: %r', repr(REQUIRED_TOPICS))
client = connect_kafka(hosts=KAFKA_HOSTS)
check_topics(client, REQUIRED_TOPICS)
if __name__ == '__main__':
main()
| apache-2.0 | -7,059,508,135,713,383,000 | 33.682759 | 79 | 0.634719 | false | 3.938136 | false | false | false |
TamiaLab/carnetdumaker | apps/blog/feeds.py | 1 | 14096 | """
RSS/Atom feeds for the blog app.
"""
from django.contrib.syndication.views import Feed
from django.core.urlresolvers import reverse, reverse_lazy
from django.utils.feedgenerator import Atom1Feed
from django.utils.translation import ugettext_lazy as _
from apps.licenses.models import License
from .models import (Article,
ArticleTag,
ArticleCategory)
from .settings import NB_ARTICLES_PER_FEED
class BaseBlogArticleFeed(Feed):
"""
Base feed for articles.
"""
def items(self):
"""
Require implementation.
"""
raise NotImplementedError()
def item_title(self, item):
"""
Return the title of the article.
:param item: The current feed item.
"""
return item.title
def item_description(self, item):
"""
Return the description of the article.
:param item: The current feed item.
"""
content_html = item.content_html # TODO handle membership restriction
# FIXME Handle footnotes
return '<p><strong>%s</strong></p>\n%s' % (item.description_html, content_html) if item.description_html else content_html
def item_author_name(self, item):
"""
Return the author name for the article.
:param item: The current feed item.
"""
return item.author.username if item.author.is_active else _('Anonymous')
def item_pubdate(self, item):
"""
Return the published date of the article.
:param item: The current feed item.
"""
return item.pub_date
def item_updateddate(self, item):
"""
Return the last modification date of the article.
:param item: The current feed item.
"""
return item.last_content_modification_date or item.pub_date
def item_categories(self, item):
"""
Return the list of categories of the article.
:param item: The current feed item.
"""
cat_names = [c.name for c in item.categories.all()]
tag_names = [t.name for t in item.tags.all()]
return cat_names + tag_names
class LatestArticlesFeed(BaseBlogArticleFeed):
"""
Feed of latest articles.
"""
title = _('Latest articles')
link = reverse_lazy('blog:index')
feed_url = reverse_lazy('blog:latest_articles_rss')
description = _('Latest articles, all categories together')
def items(self):
"""
Return a list of the N most recent articles.
"""
return Article.objects.published().select_related('author') \
.prefetch_related('categories', 'tags')[:NB_ARTICLES_PER_FEED]
class LatestArticlesAtomFeed(LatestArticlesFeed):
"""
Feed of latest articles (ATOM version).
"""
feed_type = Atom1Feed
feed_url = reverse_lazy('blog:latest_articles_atom')
subtitle = LatestArticlesFeed.description
class LatestArticlesForCategoryFeed(BaseBlogArticleFeed):
"""
Feed of latest articles for a specific category.
"""
def get_object(self, request, *args, **kwargs):
"""
Return the desired ArticleCategory object by his slug hierarchy.
:param request: The current request.
:param args: Extra arguments.
:param kwargs: Extra keywords arguments.
:return: ArticleCategory
"""
# Get desired category hierarchy
hierarchy = kwargs.pop('hierarchy')
assert hierarchy is not None
# Get the category object by slug hierarchy
return ArticleCategory.objects.get(slug_hierarchy=hierarchy)
def title(self, obj):
"""
Return the title of the category.
:param obj: The feed object.
"""
return _('Latest articles in category "%s"') % obj.name
def link(self, obj):
"""
Return the permalink to the category.
:param obj: The feed object.
"""
return obj.get_absolute_url()
def feed_url(self, obj):
"""
Return the permalink to the latest articles RSS feed for this category.
:param obj: The feed object.
"""
return obj.get_latest_articles_rss_feed_url()
def description(self, obj):
"""
Return the description of the category.
:param obj: The feed object.
"""
return obj.description_html or _('Latest articles in category "%s"') % obj.name
def items(self, obj):
"""
Return all article for this category.
:param obj: The feed object.
"""
return obj.articles.published().select_related('author') \
.prefetch_related('categories', 'tags')[:NB_ARTICLES_PER_FEED]
class LatestArticlesForCategoryAtomFeed(LatestArticlesForCategoryFeed):
"""
Feed of latest articles for a specific category (ATOM version).
"""
feed_type = Atom1Feed
subtitle = LatestArticlesForCategoryFeed.description
def feed_url(self, obj):
"""
Return the permalink to the latest articles ATOM feed for this category.
:param obj: The feed object.
"""
return obj.get_latest_articles_atom_feed_url()
class LatestArticlesForLicenseFeed(BaseBlogArticleFeed):
"""
Feed of latest articles for a specific license.
"""
def get_object(self, request, *args, **kwargs):
"""
Return the desired License object by his slug.
:param request: The current request.
:param args: Extra arguments.
:param kwargs: Extra keywords arguments.
:return: ArticleLicense
"""
# Get desired license slug
slug = kwargs.pop('slug')
assert slug is not None
# Retrieve the license object
return License.objects.get(slug=slug)
def title(self, obj):
"""
Return the title of the license.
:param obj: The feed object.
"""
return _('Latest articles with license "%s"') % obj.name
def link(self, obj):
"""
Return the permalink to the license.
:param obj: The feed object.
"""
return reverse('bloglicense:license_articles_detail', kwargs={'slug': obj.slug})
def description(self, obj):
"""
Return the description of the license.
:param obj: The feed object.
"""
return obj.description_html or _('Latest articles with license "%s"') % obj.name
def feed_url(self, obj):
"""
Return the permalink to the latest articles RSS feed with this license.
:param obj: The feed object.
"""
return reverse('bloglicense:latest_license_articles_rss', kwargs={'slug': obj.slug})
def items(self, obj):
"""
Return all article for this license.
:param obj: The feed object.
"""
return obj.articles.published().select_related('author') \
.prefetch_related('categories', 'tags')[:NB_ARTICLES_PER_FEED]
class LatestArticlesForLicenseAtomFeed(LatestArticlesForLicenseFeed):
"""
Feed of latest articles for a specific license (ATOM version).
"""
feed_type = Atom1Feed
subtitle = LatestArticlesForLicenseFeed.description
def feed_url(self, obj):
"""
Return the permalink to the latest articles Atom feed with this license.
:param obj: The feed object.
"""
return reverse('bloglicense:latest_license_articles_atom', kwargs={'slug': obj.slug})
class LatestArticlesForTagFeed(BaseBlogArticleFeed):
"""
Feed of latest articles for a specific tag.
"""
def get_object(self, request, *args, **kwargs):
"""
Return the desired ArticleTag object by his slug.
:param request: The current request.
:param args: Extra arguments.
:param kwargs: Extra keywords arguments.
:return: ArticleTag
"""
# Get desired tag slug
slug = kwargs.pop('slug')
assert slug is not None
# Retrieve the tag object
return ArticleTag.objects.get(slug=slug)
def title(self, obj):
"""
Return the title of the tag.
:param obj: The feed object.
"""
return _('Latest articles with tag "%s"') % obj.name
def link(self, obj):
"""
Return the permalink to the tag.
:param obj: The feed object.
"""
return obj.get_absolute_url()
def description(self, obj):
"""
Return the description of the tag.
:param obj: The feed object.
"""
return _('Latest articles with tag "%s"') % obj.name
def feed_url(self, obj):
"""
Return the permalink to the latest articles RSS feed for this tag.
:param obj: The feed object.
"""
return obj.get_latest_articles_rss_feed_url()
def items(self, obj):
"""
Return all article for this tag.
:param obj: The feed object.
"""
return obj.articles.published().select_related('author') \
.prefetch_related('categories', 'tags')[:NB_ARTICLES_PER_FEED]
class LatestArticlesForTagAtomFeed(LatestArticlesForTagFeed):
"""
Feed of latest articles for a specific tag (ATOM version).
"""
feed_type = Atom1Feed
subtitle = LatestArticlesForTagFeed.description
def feed_url(self, obj):
"""
Return the permalink to the latest articles Atom feed for this tag.
:param obj: The feed object.
"""
return obj.get_latest_articles_atom_feed_url()
class ArticlesForYearFeed(BaseBlogArticleFeed):
"""
Feed of articles for a specific year.
"""
def get_object(self, request, *args, **kwargs):
"""
Return the desired year as a dict.
:param request: The current request.
:param args: Extra arguments.
:param kwargs: Extra keywords arguments.
:return: dict with year key.
"""
# Get desired archive year
year = kwargs.pop('year')
assert year is not None
# Return the year
return {'year': year}
def title(self, obj):
"""
Return the title of the archive.
:param obj: The feed object.
"""
return _('Latest articles for year %(year)s') % obj
def link(self, obj):
"""
Return the permalink to the archive.
:param obj: The feed object.
"""
return reverse('blog:archive_year', kwargs=obj)
def description(self, obj):
"""
Return the description of the archive.
:param obj: The feed object.
"""
return _('Latest articles for year %(year)s') % obj
def feed_url(self, obj):
"""
Return the permalink to the articles archive RSS feed for this year.
:param obj: The feed object.
"""
return reverse('blog:articles_archive_year_rss', kwargs=obj)
def items(self, obj):
"""
Return all article for this archive.
:param obj: The feed object.
"""
return Article.objects.published().filter(pub_date__year=int(obj['year'])) \
.select_related('author').prefetch_related('categories', 'tags')
class ArticlesForYearAtomFeed(ArticlesForYearFeed):
"""
Feed of articles for a specific year (ATOM version).
"""
feed_type = Atom1Feed
subtitle = ArticlesForYearFeed.description
def feed_url(self, obj):
"""
Return the permalink to the articles archive Atom feed for this year.
:param obj: The feed object.
"""
return reverse('blog:articles_archive_year_atom', kwargs=obj)
class ArticlesForYearAndMonthFeed(BaseBlogArticleFeed):
"""
Feed of articles for a specific year and month.
"""
def get_object(self, request, *args, **kwargs):
"""
Return the desired year and month as a dict.
:param request: The current request.
:param args: Extra arguments.
:param kwargs: Extra keywords arguments.
:return: dict with year and month keys.
"""
# Get desired archive year and month
year = kwargs.pop('year')
month = kwargs.pop('month')
assert year is not None
assert month is not None
# Return the year and month
return {'year': year, 'month': month}
def title(self, obj):
"""
Return the title of the archive.
:param obj: The feed object.
"""
return _('Latest articles for month %(year)s/%(month)s') % obj
def link(self, obj):
"""
Return the permalink to the archive.
:param obj: The feed object.
"""
return reverse('blog:archive_month', kwargs=obj)
def description(self, obj):
"""
Return the description of the archive.
:param obj: The feed object.
"""
return _('Latest articles for month %(year)s/%(month)s') % obj
def feed_url(self, obj):
"""
Return the permalink to the articles archive RSS feed for this year.
:param obj: The feed object.
"""
return reverse('blog:articles_archive_month_rss', kwargs=obj)
def items(self, obj):
"""
Return all article for this archive.
:param obj: The feed object.
"""
return Article.objects.published().filter(pub_date__year=int(obj['year']),
pub_date__month=int(obj['month'])) \
.select_related('author').prefetch_related('categories', 'tags')
class ArticlesForYearAndMonthAtomFeed(ArticlesForYearAndMonthFeed):
"""
Feed of articles for a specific year and month (ATOM version).
"""
feed_type = Atom1Feed
subtitle = ArticlesForYearAndMonthFeed.description
def feed_url(self, obj):
"""
Return the permalink to the articles archive Atom feed for this year.
:param obj: The feed object.
"""
return reverse('blog:articles_archive_month_atom', kwargs=obj)
| agpl-3.0 | -2,670,948,389,950,702,000 | 28.738397 | 130 | 0.600809 | false | 4.343914 | true | false | false |
CivicKnowledge/ambry | ambry/identity.py | 1 | 51456 | """Identity objects for constructing names for bundles and partitions, and
Object Numbers for datasets, columns, partitions and tables.
Copyright (c) 2013 Clarinova. This file is licensed under the terms of
the Revised BSD License, included in this distribution as LICENSE.txt
"""
from collections import OrderedDict
from copy import copy
import json
import os
import random
import time
from six import iteritems, itervalues, string_types
import requests
import semantic_version as sv
from .util import md5_for_file, Constant
class NotObjectNumberError(ValueError):
pass
class Base62DecodeError(ValueError):
pass
class Name(object):
"""The Name part of an identity."""
NAME_PART_SEP = '-'
DEFAULT_FORMAT = 'db'
# Name, Default Value, Is Optional
_name_parts = [('source', None, False),
('dataset', None, False),
('subset', None, True),
('type', None, True),
('part', None, True),
('bspace', None, True),
('btime', None, True),
('variation', None, True),
# Semantic Version, different from Object Number revision,
# which is an int. "Version" is the preferred name,
# but 'revision' is in the databases schema.
('version', None, True)
]
# Names that are generated from the name parts.
_generated_names = [
('name', None, True),
('vname', None, True),
('fqname', None, True)]
source = None
dataset = None
subset = None
type = None
part = None
variation = None
btime = None
bspace = None
version = None
def __init__(self, *args, **kwargs):
"""
:param args:
:param kwargs:
"""
for k, default, optional in self.name_parts:
if optional:
setattr(self, k, kwargs.get(k, default))
else:
setattr(self, k, kwargs.get(k))
self.version = self._parse_version(self.version)
self.clean()
self.is_valid()
def clean(self):
import re
for k, default, optional in self.name_parts:
# Skip the names in name query.
v = getattr(self, k)
if not v or not isinstance(v, string_types):
# Can only clean strings.
continue
# The < and > chars are only there to for <any> and <none> and version specs.
# . is needs for source, and + is needed for version specs
nv = re.sub(r'[^a-zA-Z0-9\.\<\>=]', '_', v).lower()
if v != nv:
setattr(self, k, nv)
def is_valid(self):
"""
:raise ValueError:
"""
for k, _, optional in self.name_parts:
if not optional and not bool(getattr(self, k)):
raise ValueError(
"Name requires field '{}' to have a value. Got: {}" .format(
k,
self.name_parts))
def _parse_version(self, version):
if version is not None and isinstance(version, string_types):
if version == NameQuery.ANY:
pass
elif version == NameQuery.NONE:
pass
else:
try:
version = str(sv.Version(version))
except ValueError:
try:
version = str(sv.Spec(version))
except ValueError:
raise ValueError("Could not parse '{}' as a semantic version".format(version))
if not version:
version = str(sv.Version('0.0.0'))
return version
@property
def name_parts(self):
return self._name_parts
def clear_dict(self, d):
return {k: v for k, v in list(d.items()) if v}
@property
def dict(self):
"""Returns the identity as a dict.
values that are empty are removed
"""
return self._dict(with_name=True)
def _dict(self, with_name=True):
"""Returns the identity as a dict.
values that are empty are removed
"""
d = dict([(k, getattr(self, k)) for k, _, _ in self.name_parts])
if with_name:
d['name'] = self.name
try:
d['vname'] = self.vname
except ValueError:
pass
return self.clear_dict(d)
@property
def name(self):
"""String version of the name, excluding the version, and excluding the
format, if the format is 'db'."""
d = self._dict(with_name=False)
return self.NAME_PART_SEP.join([str(d[k]) for (k, _, _) in self.name_parts if k and d.get(
k, False) and k != 'version' and not (k == 'format' and d[k] == Name.DEFAULT_FORMAT)])
@property
def vname(self):
if not self.version:
raise ValueError('No version set')
if isinstance(self.version, sv.Spec):
return self.name + str(self.version)
else:
return self.name + self.NAME_PART_SEP + str(self.version)
def _path_join(self, names=None, excludes=None, sep=os.sep):
d = self._dict(with_name=False)
if isinstance(excludes, string_types):
excludes = {excludes}
if not isinstance(excludes, set):
excludes = set(excludes)
if not names:
if not excludes:
excludes = set([])
names = set(k for k, _, _ in self.name_parts) - set(excludes)
else:
names = set(names)
final_parts = [str(d[k]) for (k, _, _) in self.name_parts
if k and d.get(k, False) and k in (names - excludes)]
return sep.join(final_parts)
@property
def path(self):
"""The path of the bundle source.
Includes the revision.
"""
# Need to do this to ensure the function produces the
# bundle path when called from subclasses
names = [k for k, _, _ in Name._name_parts]
return os.path.join(
self.source, self._path_join(names=names, excludes='source', sep=self.NAME_PART_SEP))
@property
def source_path(self):
"""The name in a form suitable for use in a filesystem.
Excludes the revision
"""
# Need to do this to ensure the function produces the
# bundle path when called from subclasses
names = [k for k, _, _ in self._name_parts]
parts = [self.source]
if self.bspace:
parts.append(self.bspace)
parts.append(
self._path_join(names=names, excludes=['source', 'version', 'bspace'], sep=self.NAME_PART_SEP))
return os.path.join(*parts)
@property
def cache_key(self):
"""The name in a form suitable for use as a cache-key"""
try:
return self.path
except TypeError:
raise TypeError("self.path is invalild: '{}', '{}'".format(str(self.path), type(self.path)))
def clone(self):
return self.__class__(**self.dict)
def ver(self, revision):
"""Clone and change the version."""
c = self.clone()
c.version = self._parse_version(self.version)
return c
def type_is_compatible(self, o):
if not isinstance(o, DatasetNumber):
return False
else:
return True
# The name always stores the version number as a string, so these
# convenience functions make it easier to update specific parts
@property
def version_minor(self):
return sv.Version(self.version).minor
@version_minor.setter
def version_minor(self, value):
v = sv.Version(self.version)
v.minor = int(value)
self.version = str(v)
@property
def version_major(self):
return sv.Version(self.version).minor
@version_major.setter
def version_major(self, value):
v = sv.Version(self.version)
v.major = int(value)
self.version = str(v)
@property
def version_patch(self):
return sv.Version(self.version).patch
@version_patch.setter
def version_patch(self, value):
v = sv.Version(self.version)
v.patch = int(value)
self.version = str(v)
@property
def version_build(self):
return sv.Version(self.version).build
@version_build.setter
def version_build(self, value):
v = sv.Version(self.version)
v.build = value
self.version = str(v)
def as_partition(self, **kwargs):
"""Return a PartitionName based on this name."""
return PartitionName(**dict(list(self.dict.items()) + list(kwargs.items())))
def as_namequery(self):
return NameQuery(**self._dict(with_name=False))
def __str__(self):
return self.name
class PartialPartitionName(Name):
"""For specifying a PartitionName within the context of a bundle."""
FORMAT = 'default'
time = None
space = None
table = None
grain = None
format = None
variant = None
segment = None
_name_parts = [
('table', None, True),
('time', None, True),
('space', None, True),
('grain', None, True),
('format', None, True),
('variant', None, True),
('segment', None, True)]
def promote(self, name):
"""Promote to a PartitionName by combining with a bundle Name."""
return PartitionName(**dict(list(name.dict.items()) + list(self.dict.items())))
def is_valid(self):
pass
def __eq__(self, o):
return (self.time == o.time and self.space == o.space and self.table == o.table and
self.grain == o.grain and self.format == o.format and self.segment == o.segment
and self.variant == o.variant
)
def __cmp__(self, o):
return cmp(str(self), str(o))
def __hash__(self):
return (hash(self.time) ^ hash(self.space) ^ hash(self.table) ^
hash(self.grain) ^ hash(self.format) ^ hash(self.segment) ^ hash(self.variant))
class PartitionName(PartialPartitionName, Name):
"""A Partition Name."""
_name_parts = (Name._name_parts[0:-1] +
PartialPartitionName._name_parts +
Name._name_parts[-1:])
def _local_parts(self):
parts = []
if self.format and self.format != Name.DEFAULT_FORMAT:
parts.append(str(self.format))
if self.table:
parts.append(self.table)
l = []
if self.time:
l.append(str(self.time))
if self.space:
l.append(str(self.space))
if l:
parts.append(self.NAME_PART_SEP.join(l))
l = []
if self.grain:
l.append(str(self.grain))
if self.variant:
l.append(str(self.variant))
if self.segment:
l.append(str(self.segment))
if l:
parts.append(self.NAME_PART_SEP.join([str(x) for x in l]))
# the format value is part of the file extension
return parts
@property
def name(self):
d = self._dict(with_name=False)
return self.NAME_PART_SEP.join(
[str(d[k]) for (k, _, _) in self.name_parts
if k and d.get(k, False) and k != 'version' and (k != 'format' or str(d[k]) != Name.DEFAULT_FORMAT)]
)
@property
def path(self):
"""The path of the bundle source.
Includes the revision.
"""
# Need to do this to ensure the function produces the
# bundle path when called from subclasses
names = [k for k, _, _ in Name._name_parts]
return os.path.join(self.source,
self._path_join(names=names, excludes=['source', 'format'], sep=self.NAME_PART_SEP),
*self._local_parts()
)
@property
def source_path(self):
raise NotImplemented("PartitionNames don't have source paths")
@property
def sub_path(self):
"""The path of the partition source, excluding the bundle path parts.
Includes the revision.
"""
try:
return os.path.join(*(self._local_parts()))
except TypeError as e:
raise TypeError(
"Path failed for partition {} : {}".format(
self.name,
e.message))
def type_is_compatible(self, o):
if not isinstance(o, PartitionNumber):
return False
else:
return True
@classmethod
def format_name(cls):
return cls.FORMAT
@classmethod
def extension(cls):
return cls.PATH_EXTENSION
def as_namequery(self):
return PartitionNameQuery(**self._dict(with_name=False))
def as_partialname(self):
return PartialPartitionName(** self.dict)
@property
def partital_dict(self, with_name=True):
"""Returns the name as a dict, but with only the items that are
particular to a PartitionName."""
d = self._dict(with_name=False)
d = {k: d.get(k) for k, _, _ in PartialPartitionName._name_parts if d.get(k, False)}
if 'format' in d and d['format'] == Name.DEFAULT_FORMAT:
del d['format']
d['name'] = self.name
return d
class PartialMixin(object):
NONE = '<none>'
ANY = '<any>'
use_clear_dict = True
def clear_dict(self, d):
if self.use_clear_dict:
return {k: v if v is not None else self.NONE for k, v in list(d.items())}
else:
return d
def _dict(self, with_name=True):
"""Returns the identity as a dict.
values that are empty are removed
"""
d = dict([(k, getattr(self, k)) for k, _, _ in self.name_parts])
return self.clear_dict(d)
def with_none(self):
"""Convert the NameQuery.NONE to None. This is needed because on the
kwargs list, a None value means the field is not specified, which
equates to ANY. The _find_orm() routine, however, is easier to write if
the NONE value is actually None.
Returns a clone of the origin, with NONE converted to None
"""
n = self.clone()
for k, _, _ in n.name_parts:
if getattr(n, k) == n.NONE:
delattr(n, k)
n.use_clear_dict = False
return n
def is_valid(self):
return True
@property
def path(self):
raise NotImplementedError("Can't get a path from a partial name")
@property
def cache_key(self):
raise NotImplementedError("Can't get a cache_key from a partial name")
class NameQuery(PartialMixin, Name):
"""A partition name used for finding and searching. does not have an
expectation of having all parts completely defined, and can't be used to
generate a string.
When a partial name is returned as a dict, parts that were not
specified in the constructor have a value of '<any.', and parts that
were specified as None have a value of '<none>'
"""
NONE = PartialMixin.NONE
ANY = PartialMixin.ANY
# These are valid values for a name query, so we need to remove the
# properties
name = None
vname = None
fqname = None
def clean(self):
"""Null operation, since NameQueries should not be cleaned.
:return:
"""
pass
@property
def name_parts(self):
"""Works with PartialNameMixin.clear_dict to set NONE and ANY
values."""
default = PartialMixin.ANY
np = ([(k, default, True)
for k, _, _ in super(NameQuery, self).name_parts]
+
[(k, default, True)
for k, _, _ in Name._generated_names]
)
return np
class PartitionNameQuery(PartialMixin, PartitionName):
"""A partition name used for finding and searching.
does not have an expectation of having all parts completely defined,
and can't be used to generate a string
"""
# These are valid values for a name query
name = None
vname = None
fqname = None
def clean(self):
"""Null operation, since NameQueries should not be cleaned.
:return:
"""
pass
@property
def name_parts(self):
"""Works with PartialNameMixin.clear_dict to set NONE and ANY
values."""
default = PartialMixin.ANY
return ([(k, default, True)
for k, _, _ in PartitionName._name_parts]
+
[(k, default, True)
for k, _, _ in Name._generated_names]
)
class ObjectNumber(object):
"""Static class for holding constants and static methods related to object
numbers."""
# When a name is resolved to an ObjectNumber, orig can
# be set to the input value, which can be important, for instance,
# if the value's use depends on whether the user specified a version
# number, since all values are resolved to versioned ONs
orig = None
assignment_class = 'self'
TYPE = Constant()
TYPE.DATASET = 'd'
TYPE.PARTITION = 'p'
TYPE.TABLE = 't'
TYPE.COLUMN = 'c'
TYPE.CONFIG = 'F'
TYPE.OTHER1 = 'other1'
TYPE.OTHER2 = 'other2'
VERSION_SEP = ''
DLEN = Constant()
# Number of digits in each assignment class
# TODO: Add a 22 digit version for UUIDs ( 2^128 ~= 62^22 )
DLEN.DATASET = (3, 5, 7, 9)
DLEN.DATASET_CLASSES = dict(
authoritative=DLEN.DATASET[0], # Datasets registered by number authority .
registered=DLEN.DATASET[1], # For registered users of a numbering authority
unregistered=DLEN.DATASET[2], # For unregistered users of a numebring authority
self=DLEN.DATASET[3]) # Self registered
DLEN.PARTITION = 3
DLEN.TABLE = 2
DLEN.COLUMN = 3
DLEN.REVISION = (0, 3)
DLEN.OTHER1 = 4
DLEN.OTHER2 = 4
# Because the dataset number can be 3, 5, 7 or 9 characters,
# And the revision is optional, the datasets ( and thus all
# other objects ) , can have several different lengths. We
# Use these different lengths to determine what kinds of
# fields to parse
# 's'-> short dataset, 'l'->long dataset, 'r' -> has revision
#
# generate with:
# {
# ds_len+rl:(ds_len, (rl if rl != 0 else None), cls)
# for cls, ds_len in self.DLEN.ATASET_CLASSES.items()
# for rl in self.DLEN.REVISION
# }
#
DATASET_LENGTHS = {
3: (3, None, 'authoritative'),
5: (5, None, 'registered'),
6: (3, 3, 'authoritative'),
7: (7, None, 'unregistered'),
8: (5, 3, 'registered'),
9: (9, None, 'self'),
10: (7, 3, 'unregistered'),
12: (9, 3, 'self')}
# Length of the caracters that aren't the dataset and revisions
NDS_LENGTH = {'d': 0,
'p': DLEN.PARTITION,
't': DLEN.TABLE,
'c': DLEN.TABLE + DLEN.COLUMN,
'other1': DLEN.OTHER1,
'other2': DLEN.OTHER1 + DLEN.OTHER2,
'F': DLEN.OTHER1 # Configs
}
TCMAXVAL = 62 ** DLEN.TABLE - 1 # maximum for table values.
CCMAXVAL = 62 ** DLEN.COLUMN - 1 # maximum for column values.
# maximum for table and column values.
PARTMAXVAL = 62 ** DLEN.PARTITION - 1
EPOCH = 1389210331 # About Jan 8, 2014
@classmethod
def parse(cls, on_str, force_type=None): # @ReservedAssignment
"""Parse a string into one of the object number classes."""
on_str_orig = on_str
if on_str is None:
return None
if not on_str:
raise NotObjectNumberError("Got null input")
if not isinstance(on_str, string_types):
raise NotObjectNumberError("Must be a string. Got a {} ".format(type(on_str)))
# if isinstance(on_str, unicode):
# dataset = on_str.encode('ascii')
if force_type:
type_ = force_type
else:
type_ = on_str[0]
on_str = on_str[1:]
if type_ not in list(cls.NDS_LENGTH.keys()):
raise NotObjectNumberError("Unknown type character '{}' for '{}'".format(type_, on_str_orig))
ds_length = len(on_str) - cls.NDS_LENGTH[type_]
if ds_length not in cls.DATASET_LENGTHS:
raise NotObjectNumberError(
"Dataset string '{}' has an unfamiliar length: {}".format(on_str_orig, ds_length))
ds_lengths = cls.DATASET_LENGTHS[ds_length]
assignment_class = ds_lengths[2]
try:
dataset = int(ObjectNumber.base62_decode(on_str[0:ds_lengths[0]]))
if ds_lengths[1]:
i = len(on_str) - ds_lengths[1]
revision = int(ObjectNumber.base62_decode(on_str[i:]))
on_str = on_str[0:i] # remove the revision
else:
revision = None
on_str = on_str[ds_lengths[0]:]
if type_ == cls.TYPE.DATASET:
return DatasetNumber(dataset, revision=revision, assignment_class=assignment_class)
elif type_ == cls.TYPE.TABLE:
table = int(ObjectNumber.base62_decode(on_str))
return TableNumber(
DatasetNumber(dataset, assignment_class=assignment_class), table, revision=revision)
elif type_ == cls.TYPE.PARTITION:
partition = int(ObjectNumber.base62_decode(on_str))
return PartitionNumber(
DatasetNumber(dataset, assignment_class=assignment_class), partition, revision=revision)
elif type_ == cls.TYPE.COLUMN:
table = int(ObjectNumber.base62_decode(on_str[0:cls.DLEN.TABLE]))
column = int(ObjectNumber.base62_decode(on_str[cls.DLEN.TABLE:]))
return ColumnNumber(
TableNumber(DatasetNumber(dataset, assignment_class=assignment_class), table),
column, revision=revision)
elif type_ == cls.TYPE.OTHER1 or type_ == cls.TYPE.CONFIG:
return GeneralNumber1(on_str_orig[0],
DatasetNumber(dataset, assignment_class=assignment_class),
int(ObjectNumber.base62_decode(on_str[0:cls.DLEN.OTHER1])),
revision=revision)
elif type_ == cls.TYPE.OTHER2:
return GeneralNumber2(on_str_orig[0],
DatasetNumber(dataset, assignment_class=assignment_class),
int(ObjectNumber.base62_decode(on_str[0:cls.DLEN.OTHER1])),
int(ObjectNumber.base62_decode(
on_str[cls.DLEN.OTHER1:cls.DLEN.OTHER1+cls.DLEN.OTHER2])),
revision=revision)
else:
raise NotObjectNumberError('Unknown type character: ' + type_ + ' in ' + str(on_str_orig))
except Base62DecodeError as e:
raise NotObjectNumberError('Unknown character: ' + str(e))
@classmethod
def base62_encode(cls, num):
"""Encode a number in Base X.
`num`: The number to encode
`alphabet`: The alphabet to use for encoding
Stolen from: http://stackoverflow.com/a/1119769/1144479
"""
alphabet = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
if num == 0:
return alphabet[0]
arr = []
base = len(alphabet)
while num:
rem = num % base
num = num // base
arr.append(alphabet[rem])
arr.reverse()
return ''.join(arr)
@classmethod
def base62_decode(cls, string):
"""Decode a Base X encoded string into the number.
Arguments:
- `string`: The encoded string
- `alphabet`: The alphabet to use for encoding
Stolen from: http://stackoverflow.com/a/1119769/1144479
"""
alphabet = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
base = len(alphabet)
strlen = len(string)
num = 0
idx = 0
for char in string:
power = (strlen - (idx + 1))
try:
num += alphabet.index(char) * (base ** power)
except ValueError:
raise Base62DecodeError(
"Failed to decode char: '{}'".format(char))
idx += 1
return num
@classmethod
def increment(cls, v):
"""Increment the version number of an object number of object number string"""
if not isinstance(v, ObjectNumber):
v = ObjectNumber.parse(v)
return v.rev(v.revision+1)
def rev(self, i):
"""Return a clone with a different revision."""
on = copy(self)
on.revision = i
return on
def __eq__(self, other):
return str(self) == str(other)
@classmethod
def _rev_str(cls, revision):
if not revision:
return ''
revision = int(revision)
return (
ObjectNumber.base62_encode(revision).rjust(
cls.DLEN.REVISION[1],
'0') if bool(revision) else '')
class TopNumber(ObjectNumber):
"""A general top level number, with a given number space.
Just like a DatasetNumber, with without the 'd'
"""
def __init__(self, space, dataset=None, revision=None, assignment_class='self'):
"""Constructor."""
if len(space) > 1:
raise ValueError("Number space must be a single letter")
self.space = space
self.assignment_class = assignment_class
if dataset is None:
digit_length = self.DLEN.DATASET_CLASSES[self.assignment_class]
# On 64 bit machine, max is about 10^17, 2^53
# That should be random enough to prevent
# collisions for a small number of self assigned numbers
max = 62 ** digit_length
dataset = random.randint(0, max)
self.dataset = dataset
self.revision = revision
@classmethod
def from_hex(cls, h, space, assignment_class='self'):
"""Produce a TopNumber, with a length to match the given assignment
class, based on an input hex string.
This can be used to create TopNumbers from a hash of a string.
"""
from math import log
# Use the ln(N)/ln(base) trick to find the right number of hext digits
# to use
hex_digits = int(
round(log(62 ** TopNumber.DLEN.DATASET_CLASSES[assignment_class]) / log(16), 0))
i = int(h[:hex_digits], 16)
return TopNumber(space, i, assignment_class=assignment_class)
@classmethod
def from_string(cls, s, space):
"""Produce a TopNumber by hashing a string."""
import hashlib
hs = hashlib.sha1(s).hexdigest()
return cls.from_hex(hs, space)
def _ds_str(self):
ds_len = self.DLEN.DATASET_CLASSES[self.assignment_class]
return ObjectNumber.base62_encode(self.dataset).rjust(ds_len, '0')
def __str__(self):
return (self.space + self._ds_str() + ObjectNumber._rev_str(self.revision))
class DatasetNumber(ObjectNumber):
"""An identifier for a dataset."""
def __init__(self, dataset=None, revision=None, assignment_class='self'):
"""Constructor."""
self.assignment_class = assignment_class
if dataset is None:
digit_length = self.DLEN.DATASET_CLASSES[self.assignment_class]
# On 64 bit machine, max is about 10^17, 2^53
# That should be random enough to prevent
# collisions for a small number of self assigned numbers
max = 62 ** digit_length
dataset = random.randint(0, max)
self.dataset = dataset
self.revision = revision
def _ds_str(self):
ds_len = self.DLEN.DATASET_CLASSES[self.assignment_class]
return ObjectNumber.base62_encode(self.dataset).rjust(ds_len, '0')
@property
def as_dataset(self):
return copy(self)
def as_partition(self, partition_number=0):
"""Return a new PartitionNumber based on this DatasetNumber."""
return PartitionNumber(self, partition_number)
def __str__(self):
return (ObjectNumber.TYPE.DATASET + self._ds_str() + ObjectNumber._rev_str(self.revision))
class TableNumber(ObjectNumber):
"""An identifier for a table."""
def __init__(self, dataset, table, revision=None):
if not isinstance(dataset, DatasetNumber):
raise ValueError("Constructor requires a DatasetNumber")
if table > ObjectNumber.TCMAXVAL:
raise ValueError("Table value '{}' is too large".format(table))
self.dataset = dataset
self.table = table
self.revision = revision
if not self.revision and dataset.revision:
self.revision = dataset.revision
@property
def as_table(self):
"""Returns self, so TableNumber and Column number can be used
interchangably."""
return self
@property
def as_dataset(self):
"""Unlike the .dataset property, this will include the revision."""
return self.dataset.rev(self.revision)
def __str__(self):
return (
ObjectNumber.TYPE.TABLE +
self.dataset._ds_str() +
ObjectNumber.base62_encode(self.table).rjust(self.DLEN.TABLE, '0') +
ObjectNumber._rev_str(self.revision))
class ColumnNumber(ObjectNumber):
"""An identifier for a column."""
def __init__(self, table, column, revision=None):
if not isinstance(table, TableNumber):
raise ValueError("Constructor requires a TableNumber. got: " + str(type(table)))
column = int(column)
if column > ObjectNumber.CCMAXVAL:
raise ValueError(
"Value {} is too large ( max is {} ) ".format(
column,
ObjectNumber.TCMAXVAL))
self.table = table
self.column = column
self.revision = revision
if not self.revision and table.revision:
self.revision = table.revision
@property
def dataset(self):
"""Return the dataset number for ths partition."""
return self.table.dataset
@property
def as_dataset(self):
"""Unlike the .dataset property, this will include the revision."""
return self.table.dataset.rev(self.revision)
@property
def as_table(self):
"""Unlike the .dataset property, this will include the revision."""
return self.table.rev(self.revision)
def __str__(self):
return (
ObjectNumber.TYPE.COLUMN +
self.dataset._ds_str() +
ObjectNumber.base62_encode(
self.table.table).rjust(
self.DLEN.TABLE,
'0') +
ObjectNumber.base62_encode(
self.column).rjust(
self.DLEN.COLUMN,
'0') +
ObjectNumber._rev_str(
self.revision))
class PartitionNumber(ObjectNumber):
"""An identifier for a partition."""
def __init__(self, dataset, partition, revision=None):
"""
Arguments:
dataset -- Must be a DatasetNumber
partition -- an integer, from 0 to 62^3
"""
partition = int(partition)
if not isinstance(dataset, DatasetNumber):
raise ValueError("Constructor requires a DatasetNumber. Got '{}' ".format(dataset))
if partition > ObjectNumber.PARTMAXVAL:
raise ValueError("Value is too large. Max is: {}".format(ObjectNumber.PARTMAXVAL))
self.dataset = dataset
self.partition = partition
self.revision = revision
if not self.revision and dataset.revision:
self.revision = dataset.revision
@property
def as_dataset(self):
"""Unlike the .dataset property, this will include the revision."""
return self.dataset.rev(self.revision)
def __str__(self):
return (
ObjectNumber.TYPE.PARTITION +
self.dataset._ds_str() +
ObjectNumber.base62_encode(self.partition).rjust(self.DLEN.PARTITION, '0') +
ObjectNumber._rev_str(self.revision))
class GeneralNumber1(ObjectNumber):
"""Other types of number. Can have any type code, and 4 digits of number, directly
descended from the dataset"""
def __init__(self, type_code, dataset, num, revision=None):
if isinstance(dataset, string_types):
dataset = ObjectNumber.parse(dataset).as_dataset
try:
dataset = dataset.as_dataset
except AttributeError:
raise ValueError(
'Constructor requires a DatasetNumber or ObjectNumber that converts to a DatasetNumber')
self.type_code = type_code
self.dataset = dataset
self.number = num
self.revision = revision
if not self.revision and dataset.revision:
self.revision = dataset.revision
@property
def as_dataset(self):
"""Unlike the .dataset property, this will include the revision."""
return self.dataset.rev(self.revision)
def __str__(self):
return (
self.type_code +
self.dataset._ds_str() +
ObjectNumber.base62_encode(self.number).rjust(self.DLEN.OTHER1, '0') +
ObjectNumber._rev_str(self.revision))
class GeneralNumber2(ObjectNumber):
"""Like General Number 2, but has a second level"""
def __init__(self, type_code, dataset, num1, num2, revision=None):
if isinstance(dataset, string_types):
dataset = ObjectNumber.parse(dataset).as_dataset
try:
dataset = dataset.as_dataset
except AttributeError:
raise ValueError(
'Constructor requires a DatasetNumber or ObjectNumber that converts to a DatasetNumber')
self.type_code = type_code
self.dataset = dataset
self.num1 = num1
self.num2 = num2
self.revision = revision
if not self.revision and dataset.revision:
self.revision = dataset.revision
@property
def as_dataset(self):
"""Unlike the .dataset property, this will include the revision."""
return self.dataset.rev(self.revision)
def __str__(self):
return (
self.type_code +
self.dataset._ds_str() +
ObjectNumber.base62_encode(self.num1).rjust(self.DLEN.OTHER1, '0') +
ObjectNumber.base62_encode(self.num2).rjust(self.DLEN.OTHER2, '0') +
ObjectNumber._rev_str(self.revision))
class Identity(object):
"""Identities represent the defining set of information about a bundle or a
partition.
Only the vid is actually required to uniquely identify a bundle or
partition, but the identity is also used for generating unique names
and for finding bundles and partitions.
"""
is_bundle = True
is_partition = False
OBJECT_NUMBER_SEP = '~'
_name_class = Name
_on = None
_name = None
# Extra data for the library and remotes
locations = None
partitions = None
files = None
urls = None # Url dict, from a remote library.
url = None # Url of remote where object should be retrieved
# A bundle if it is created during the identity listing process.
bundle = None
# Path to bundle in file system. Set in SourceTreeLibrary.list()
bundle_path = None
# Build state of the bundle. Set in SourceTreeLibrary.list()
bundle_state = None
# State of the git repository. Set in SourceTreeLibrary.list()
git_state = None
md5 = None
data = None # Catch-all for other information
def __init__(self, name, object_number):
assert isinstance(name, self._name_class), 'Wrong type: {}. Expected {}'\
.format(type(name), self._name_class)
self._on = object_number
self._name = name
if not self._name.type_is_compatible(self._on):
raise TypeError('The name and the object number must be ' +
'of compatible types: got {} and {}'
.format(type(name), type(object_number)))
# Update the patch number to always be the revision
nv = sv.Version(self._name.version)
nv.patch = int(self._on.revision)
self._name.version = str(nv)
self.data = {}
self.is_valid()
@classmethod
def from_dict(cls, d):
assert isinstance(d, dict)
if 'id' in d and d['id'] and 'revision' in d:
# The vid should be constructed from the id and the revision
if not d['id']:
raise ValueError(" 'id' key doesn't have a value in {} ".format(d))
ono = ObjectNumber.parse(d['id'])
if not ono:
raise ValueError("Failed to parse '{}' as an ObjectNumber ".format(d['id']))
on = ono.rev(d['revision'])
elif 'vid' in d and d['vid']:
on = ObjectNumber.parse(d['vid'])
if not on:
raise ValueError("Failed to parse '{}' as an ObjectNumber ".format(d['vid']))
else:
raise ValueError("Must have id and revision, or vid. Got neither from {}".format(d))
if isinstance(on, DatasetNumber):
try:
name = cls._name_class(**d)
ident = cls(name, on)
except TypeError as e:
raise TypeError("Failed to make identity from \n{}\n: {}".format(d, e.message))
elif isinstance(on, PartitionNumber):
ident = PartitionIdentity.from_dict(d)
else:
raise TypeError(
"Can't make identity from {}; object number is wrong type: {}".format(d, type(on)))
if 'md5' in d:
ident.md5 = d['md5']
return ident
@classmethod
def classify(cls, o):
"""Break an Identity name into parts, or describe the type of other
forms.
Break a name or object number into parts and classify them. Returns a named tuple
that indicates which parts of input string are name components, object number and
version number. Does not completely parse the name components.
Also can handle Name, Identity and ObjectNumbers
:param o: Input object to split
"""
# from collections import namedtuple
s = str(o)
if o is None:
raise ValueError("Input cannot be None")
class IdentityParts(object):
on = None
name = None
isa = None
name = None
vname = None
sname = None
name_parts = None
version = None
cache_key = None
# namedtuple('IdentityParts', ['isa', 'name', 'name_parts','on','version', 'vspec'])
ip = IdentityParts()
if isinstance(o, (DatasetNumber, PartitionNumber)):
ip.on = o
ip.name = None
ip.isa = type(ip.on)
ip.name_parts = None
elif isinstance(o, Name):
ip.on = None
ip.isa = type(o)
ip.name = str(o)
ip.name_parts = ip.name.split(Name.NAME_PART_SEP)
elif '/' in s:
# A cache key
ip.cache_key = s.strip()
ip.isa = str
elif cls.OBJECT_NUMBER_SEP in s:
# Must be a fqname
ip.name, on_s = s.strip().split(cls.OBJECT_NUMBER_SEP)
ip.on = ObjectNumber.parse(on_s)
ip.name_parts = ip.name.split(Name.NAME_PART_SEP)
ip.isa = type(ip.on)
elif Name.NAME_PART_SEP in s:
# Must be an sname or vname
ip.name = s
ip.on = None
ip.name_parts = ip.name.split(Name.NAME_PART_SEP)
ip.isa = Name
else:
# Probably an Object Number in string form
ip.name = None
ip.name_parts = None
ip.on = ObjectNumber.parse(s.strip())
ip.isa = type(ip.on)
if ip.name_parts:
last = ip.name_parts[-1]
try:
ip.version = sv.Version(last)
ip.vname = ip.name
except ValueError:
try:
ip.version = sv.Spec(last)
ip.vname = None # Specs aren't vnames you can query
except ValueError:
pass
if ip.version:
ip.name_parts.pop()
ip.sname = Name.NAME_PART_SEP.join(ip.name_parts)
else:
ip.sname = ip.name
return ip
def to_meta(self, md5=None, file=None):
"""Return a dictionary of metadata, for use in the Remote api."""
# from collections import OrderedDict
if not md5:
if not file:
raise ValueError('Must specify either file or md5')
md5 = md5_for_file(file)
size = os.stat(file).st_size
else:
size = None
return {
'id': self.id_,
'identity': json.dumps(self.dict),
'name': self.sname,
'fqname': self.fqname,
'md5': md5,
# This causes errors with calculating the AWS signature
'size': size
}
def add_md5(self, md5=None, file=None):
# import json
if not md5:
if not file:
raise ValueError("Must specify either file or md5")
md5 = md5_for_file(file)
self.md5 = md5
return self
#
# Naming, paths and cache_keys
#
def is_valid(self):
self._name.is_valid()
@property
def on(self):
"""Return the object number obect."""
return self._on
@property
def id_(self):
"""String version of the object number, without a revision."""
return str(self._on.rev(None))
@property
def vid(self):
"""String version of the object number."""
return str(self._on)
@property
def name(self):
"""The name object."""
return self._name
@property
def sname(self):
"""The name of the bundle, as a string, excluding the revision."""
return str(self._name)
@property
def vname(self):
""""""
return self._name.vname # Obsoleted by __getattr__??
@property
def fqname(self):
"""The fully qualified name, the versioned name and the vid.
This is the same as str(self)
"""
return str(self)
@property
def path(self):
"""The path of the bundle source.
Includes the revision.
"""
self.is_valid()
return self._name.path
@property
def source_path(self):
"""The path of the bundle source.
Includes the revision.
"""
self.is_valid()
return self._name.source_path
# Call other values on the name
def __getattr__(self, name):
if hasattr(self._name, name):
return getattr(self._name, name)
else:
raise AttributeError( 'Identity does not have attribute {} '.format(name))
@property
def cache_key(self):
"""The name in a form suitable for use as a cache-key"""
self.is_valid()
return self._name.cache_key
@property
def dict(self):
d = self._name.dict
d['vid'] = str(self._on)
d['id'] = str(self._on.rev(None))
d['revision'] = int(self._on.revision)
d['cache_key'] = self.cache_key
if self.md5:
d['md5'] = self.md5
return d
@property
def names_dict(self):
"""A dictionary with only the generated names, name, vname and fqname."""
INCLUDE_KEYS = ['name', 'vname', 'vid']
d = {k: v for k, v in iteritems(self.dict) if k in INCLUDE_KEYS}
d['fqname'] = self.fqname
return d
@property
def ident_dict(self):
"""A dictionary with only the items required to specify the identy,
excluding the generated names, name, vname and fqname."""
SKIP_KEYS = ['name','vname','fqname','vid','cache_key']
return {k: v for k, v in iteritems(self.dict) if k not in SKIP_KEYS}
@staticmethod
def _compose_fqname(vname, vid):
assert vid is not None
assert vname is not None
return vname + Identity.OBJECT_NUMBER_SEP + vid
def as_partition(self, partition=0, **kwargs):
"""Return a new PartitionIdentity based on this Identity.
:param partition: Integer partition number for PartitionObjectNumber
:param kwargs:
"""
assert isinstance(self._name, Name), "Wrong type: {}".format(type(self._name))
assert isinstance(self._on, DatasetNumber), "Wrong type: {}".format(type(self._on))
name = self._name.as_partition(**kwargs)
on = self._on.as_partition(partition)
return PartitionIdentity(name, on)
def add_partition(self, p):
"""Add a partition identity as a child of a dataset identity."""
if not self.partitions:
self.partitions = {}
self.partitions[p.vid] = p
def add_file(self, f):
"""Add a partition identity as a child of a dataset identity."""
if not self.files:
self.files = set()
self.files.add(f)
self.locations.set(f.type_)
@property
def partition(self):
"""Convenience function for accessing the first partition in the
partitions list, when there is only one."""
if not self.partitions:
return None
if len(self.partitions) > 1:
raise ValueError(
"Can't use this method when there is more than one partition")
return list(self.partitions.values())[0]
def rev(self, rev):
"""Return a new identity with the given revision"""
d = self.dict
d['revision'] = rev
return self.from_dict(d)
def __str__(self):
return self._compose_fqname(self._name.vname, self.vid)
def _info(self):
"""Returns an OrderedDict of information, for human display."""
d = OrderedDict()
d['vid'] = self.vid
d['sname'] = self.sname
d['vname'] = self.vname
return d
def __hash__(self):
return hash(str(self))
class PartitionIdentity(Identity):
"""Subclass of Identity for partitions."""
is_bundle = False
is_partition = True
_name_class = PartitionName
def is_valid(self):
self._name.is_valid()
if self._name.format:
assert self.format_name() == self._name.format_name(), "Got format '{}', expected '{}'".format(
self._name.format_name(), self.format_name)
@classmethod
def from_dict(cls, d):
"""Like Identity.from_dict, but will cast the class type based on the
format. i.e. if the format is hdf, return an HdfPartitionIdentity.
:param d:
:return:
"""
name = PartitionIdentity._name_class(**d)
if 'id' in d and 'revision' in d:
# The vid should be constructed from the id and the revision
on = (ObjectNumber.parse(d['id']).rev(d['revision']))
elif 'vid' in d:
on = ObjectNumber.parse(d['vid'])
else:
raise ValueError("Must have id and revision, or vid")
try:
return PartitionIdentity(name, on)
except TypeError as e:
raise TypeError(
"Failed to make identity from \n{}\n: {}".format(
d,
e.message))
@property
def table(self):
return self._name.table
def as_dataset(self):
"""Convert this identity to the identity of the corresponding
dataset."""
on = self.on.dataset
on.revision = self.on.revision
name = Name(**self.name.dict)
return Identity(name, on)
def as_partition(self, partition=0, **kwargs):
raise NotImplementedError(
"Can't generated a PartitionIdentity from a PartitionIdentity")
@property
def sub_path(self):
"""The portion of the path excluding the bundle path."""
self.is_valid()
return self._name.sub_path
@classmethod
def format_name(cls):
return cls._name_class.FORMAT
@classmethod
def extension(cls):
return cls._name_class.PATH_EXTENSION
class NumberServer(object):
def __init__(self, host='numbers.ambry.io', port='80', key=None, **kwargs):
"""
:param host:
:param port:
:param key: Key to set the assignment class. The number servers redis server mush have the
key value set to the assignment class, such as:
set assignment_class:<key> authoritative
Two values are supported, "authoritative" and "registered". If neither value is set, the
assignment class is "unregistered"
:param kwargs: No used; sucks up other parameters that may be in the configuration when the
object is constructed with the config, as in NumberServer(**get_runconfig().group('numbers'))
"""
self.host = host
self.port = port
self.key = key
self.port_str = ':' + str(port) if port else ''
self.last_response = None
self.next_time = None
def __next__(self):
if self.key:
params = dict(access_key=self.key)
else:
params = dict()
url = 'http://{}{}/next'.format(self.host, self.port_str)
r = requests.get(url, params=params)
r.raise_for_status()
d = r.json()
self.last_response = d
self.next_time = time.time() + self.last_response.get('wait', 0)
return ObjectNumber.parse(d['number'])
def find(self, name):
if self.key:
params = dict(access_key=self.key)
else:
params = dict()
r = requests.get(
'http://{}{}/find/{}'.format(self.host, self.port_str, name), params=params)
r.raise_for_status()
d = r.json()
self.last_response = d
try:
self.next_time = time.time() + self.last_response['wait']
except TypeError:
pass # wait time is None, can be added to time.
return ObjectNumber.parse(d['number'])
def sleep(self):
"""Wait for the sleep time of the last response, to avoid being rate
limited."""
if self.next_time and time.time() < self.next_time:
time.sleep(self.next_time - time.time())
# port to python2
NumberServer.next = NumberServer.__next__
| bsd-2-clause | -24,262,653,475,965,656 | 27.491694 | 112 | 0.562675 | false | 4.142662 | false | false | false |
rc1405/Minerva | bin/Minerva/server/dns.py | 1 | 5634 | '''
Copyright (C) 2015 Ryan M Cote.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
Author: Ryan M Cote <[email protected]>
'''
import bson
import datetime
import pymongo
class dns(object):
'''Initialize Class'''
def __init__(self, minerva_core):
db = minerva_core.get_db()
self.alerts = db.alerts
self.dns = db.dns
self.sizeLimit = minerva_core.conf['Webserver']['events']['maxResults']
'''Function to get the flow records for a given alert'''
def get_dns(self, IDs):
results_found = []
for ID in IDs:
orig_alert = self.alerts.find_one({ "_id": bson.objectid.ObjectId(ID) })
flow_results = []
src_ip = orig_alert['src_ip']
src_port = orig_alert['src_port']
dest_ip =orig_alert['dest_ip']
dest_port = orig_alert['dest_port']
proto = orig_alert['proto']
timestamp = orig_alert['timestamp']
start_time = timestamp - datetime.timedelta(seconds=300)
stop_time = timestamp + datetime.timedelta(seconds=300)
dns_results = self.dns.find( {
"$and": [
{ "src_ip": src_ip, "src_port": src_port, "dest_ip": dest_ip, "dest_port": dest_port, "proto": proto },
{ "$and": [
{ "timestamp": { "$gt": start_time }},
{ "timestamp": { "$lt": stop_time }},
] },
]}).sort([("_id", pymongo.ASCENDING)]).limit(self.sizeLimit)
numFound = dns_results.count()
dns_results = map(self.map_dns, dns_results)
results_found.append({ 'id': ID, 'sessions': dns_results, 'origin': orig_alert, 'numFound': numFound })
return results_found
def map_dns(self, item):
ret_dict = {}
ret_dict['ID'] = item.pop('_id')
ret_dict['document'] = item
return ret_dict
'''Function to search flow records'''
def search_dns(self, request, orig_search=False):
if not orig_search:
event_search = {}
if len(request['src_ip']) > 0:
event_search['src_ip'] = str(request['src_ip'])
if len(request['src_port']) > 0:
try:
event_search['src_port'] = int(request['src_port'])
except ValueError:
pass
if len(request['dest_ip']) > 0:
event_search['dest_ip'] = str(request['dest_ip'])
if len(request['dest_port']) > 0:
try:
event_search['dest_port'] = int(request['dest_port'])
except ValueError:
pass
if len(request['sensor']) > 0:
event_search['sensor'] = str(request['sensor'])
event_search['proto'] = str(request['proto'])
if len(request['query_type']) > 0:
event_search['dns.type'] = str(request['query_type'])
if len(request['rrtype']) > 0:
event_search['dns.rrtype'] = str(request['rrtype'])
if len(request['rcode']) > 0:
event_search['dns.rcode'] = str(request['rcode'])
if len(request['rrname']) > 0:
event_search['dns.rrname'] = str(request['rrname'])
if len(request['rdata']) > 0:
event_search['dns.rdata'] = str(request['rdata'])
if len(request['start']) > 0:
start_time = datetime.datetime.strptime(request['start'], '%m-%d-%Y %H:%M:%S')
else:
start_time = datetime.datetime.utcnow() - datetime.timedelta(seconds=600)
if len(request['stop']) > 0:
stop_time = datetime.datetime.strptime(request['stop'], '%m-%d-%Y %H:%M:%S')
else:
stop_time = datetime.datetime.utcnow() + datetime.timedelta(seconds=600)
else:
event_search = request
stop_time = event_search.pop('stop_time')
start_time = event_search.pop('start_time')
results = self.dns.find(
{ "$and": [
event_search,
{ "$and": [
{ "timestamp": { "$gt": start_time }},
{ "timestamp": { "$lt": stop_time }}
]},
]}).sort([("_id", pymongo.ASCENDING)]).limit(self.sizeLimit)
numFound = results.count()
results_found = map(self.map_dns, results)
event_search['start_time'] = start_time
event_search['stop_time'] = stop_time
return numFound, results_found, event_search
def map_dns(self, item):
ret_dict = {}
ret_dict['ID'] = item.pop('_id')
ret_dict['document'] = item
return ret_dict
| gpl-2.0 | 4,670,705,531,512,347,000 | 36.065789 | 123 | 0.518992 | false | 4.064935 | false | false | false |
yarhajile/sven-daemon | Sven/Daemon.py | 1 | 4165 | #
# Daemon.py
#
#
# Documentation
#
"""
Disk And Execution MONitor (Daemon)
Default daemon behaviors (they can be modified):
1.) Ignore SIGHUP signals.
2.) Default current working directory to the "/" directory.
3.) Set the current file creation mode mask to 0.
4.) Close all open files (0 to [SC_OPEN_MAX or 256]).
5.) Redirect standard I/O streams to "/dev/null".
Failed fork() calls will return a tuple: (errno, strerror). This behavior
can be modified to meet your program's needs.
Resources:
Advanced Programming in the Unix Environment: W. Richard Stevens
Unix Network Programming (Volume 1): W. Richard Stevens
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
-----
Changes
08/16/04
* Changed os.umask(0) to os.umask(022) for Postfix purposes.
-----
"""
#
# Imports
#
import os # Miscellaneous OS interfaces.
import sys # System-specific parameters and functions.
import signal # Set handlers for asynchronous events.
#
# Constants.
#
__author__ = "Chad J. Schroeder"
__version__ = "$Id$"
#
# Methods.
#
def background():
"""
Detach a process from the controlling terminal and run it in the
background as a daemon.
"""
try:
# Fork a child process so the parent can exit. This will return control
# to the command line or shell. This is required so that the new process
# is guaranteed not to be a process group leader. We have this guarantee
# because the process GID of the parent is inherited by the child, but
# the child gets a new PID, making it impossible for its PID to equal its
# PGID.
pid = os.fork()
except OSError, e:
return((e.errno, e.strerror)) # ERROR (return a tuple)
if (pid == 0): # The first child.
# Next we call os.setsid() to become the session leader of this new
# session. The process also becomes the process group leader of the
# new process group. Since a controlling terminal is associated with a
# session, and this new session has not yet acquired a controlling
# terminal our process now has no controlling terminal. This shouldn't
# fail, since we're guaranteed that the child is not a process group
# leader.
os.setsid()
# When the first child terminates, all processes in the second child
# are sent a SIGHUP, so it's ignored.
signal.signal(signal.SIGHUP, signal.SIG_IGN)
try:
# Fork a second child to prevent zombies. Since the first child is
# a session leader without a controlling terminal, it's possible for
# it to acquire one by opening a terminal in the future. This second
# fork guarantees that the child is no longer a session leader, thus
# preventing the daemon from ever acquiring a controlling terminal.
pid = os.fork() # Fork a second child.
except OSError, e:
return((e.errno, e.strerror)) # ERROR (return a tuple)
if (pid == 0): # The second child.
# Ensure that the daemon doesn't keep any directory in use. Failure
# to do this could make a filesystem unmountable.
os.chdir("/")
# Give the child complete control over permissions.
os.umask(022)
else:
os._exit(0) # Exit parent (the first child) of the second child.
else:
os._exit(0) # Exit parent of the first child.
# Close all open files. Try the system configuration variable, SC_OPEN_MAX,
# for the maximum number of open files to close. If it doesn't exist, use
# the default value (configurable).
try:
maxfd = os.sysconf("SC_OPEN_MAX")
except (AttributeError, ValueError):
maxfd = 256 # default maximum
for fd in range(0, maxfd):
try:
os.close(fd)
except OSError: # ERROR (ignore)
pass
# Redirect the standard file descriptors to /dev/null.
os.open("/dev/null", os.O_RDONLY) # standard input (0)
os.open("/dev/null", os.O_RDWR) # standard output (1)
os.open("/dev/null", os.O_RDWR) # standard error (2)
return(0) | gpl-2.0 | 5,869,123,554,980,802,000 | 31.046154 | 79 | 0.643697 | false | 3.863636 | false | false | false |
nicememory/pie | pyglet/tests/integration/resource/test_resource_loading.py | 1 | 3664 | #!/usr/bin/env python
'''
Layout:
. (script home)
file.txt F1
dir1/
file.txt F2
dir1/
file.txt F3
res.zip/
file.txt F7
dir1/
file.txt F8
dir1/
file.txt F9
dir2/
file.txt F6
'''
import os
import sys
import unittest
from pyglet import resource
from pyglet.compat import asbytes
class ResourceLoadingTestCase(unittest.TestCase):
def setUp(self):
self.script_home = os.path.dirname(__file__)
def check(self, path, result):
self.check_file(path, 'file.txt', result)
def check_file(self, path, file, result):
loader = resource.Loader(path, script_home=self.script_home)
self.assertTrue(loader.file(file).read() == asbytes('%s\n' % result))
def checkFail(self, path):
loader = resource.Loader(path, script_home=self.script_home)
self.assertRaises(resource.ResourceNotFoundException,
loader.file, 'file.txt')
def test1(self):
self.check(None, 'F1')
def test2(self):
self.check('', 'F1')
def test2a(self):
self.check('.', 'F1')
def test2b(self):
self.checkFail(())
def test2c(self):
self.checkFail('foo')
def test2d(self):
self.checkFail(['foo'])
def test2e(self):
self.check(['foo', '.'], 'F1')
def test3(self):
self.check(['.', 'dir1'], 'F1')
def test4(self):
self.check(['dir1'], 'F2')
def test5(self):
self.check(['dir1', '.'], 'F2')
def test6(self):
self.check(['dir1/dir1'], 'F3')
def test7(self):
self.check(['dir1', 'dir1/dir1'], 'F2')
def test8(self):
self.check(['dir1/dir1', 'dir1'], 'F3')
def test9(self):
self.check('dir1/res.zip', 'F7')
def test9a(self):
self.check('dir1/res.zip/', 'F7')
def test10(self):
self.check('dir1/res.zip/dir1', 'F8')
def test10a(self):
self.check('dir1/res.zip/dir1/', 'F8')
def test11(self):
self.check(['dir1/res.zip/dir1', 'dir1/res.zip'], 'F8')
def test12(self):
self.check(['dir1/res.zip', 'dir1/res.zip/dir1'], 'F7')
def test12a(self):
self.check(['dir1/res.zip', 'dir1/res.zip/dir1/dir1'], 'F7')
def test12b(self):
self.check(['dir1/res.zip/dir1/dir1/', 'dir1/res.zip/dir1'], 'F9')
def test12c(self):
self.check(['dir1/res.zip/dir1/dir1', 'dir1/res.zip/dir1'], 'F9')
def test13(self):
self.check(['dir1', 'dir2'], 'F2')
def test14(self):
self.check(['dir2', 'dir1'], 'F6')
# path tests
def test15(self):
self.check_file([''], 'dir1/file.txt', 'F2')
def test15a(self):
self.check_file([''], 'dir1/dir1/file.txt', 'F3')
def test15b(self):
self.check_file(['dir1'], 'dir1/file.txt', 'F3')
def test15c(self):
self.check_file([''], 'dir2/file.txt', 'F6')
def test15d(self):
self.check_file(['.'], 'dir2/file.txt', 'F6')
# zip path tests
def test16(self):
self.check_file(['dir1/res.zip'], 'dir1/file.txt', 'F8')
def test16a(self):
self.check_file(['dir1/res.zip/'], 'dir1/file.txt', 'F8')
def test16a(self):
self.check_file(['dir1/res.zip/'], 'dir1/dir1/file.txt', 'F9')
def test16b(self):
self.check_file(['dir1/res.zip/dir1'], 'dir1/file.txt', 'F9')
def test16c(self):
self.check_file(['dir1/res.zip/dir1/'], 'dir1/file.txt', 'F9')
| apache-2.0 | 7,559,877,571,650,317,000 | 23.264901 | 77 | 0.526474 | false | 3.038143 | true | false | false |
Eureka22/ASM_xf | PythonD/site_python/twisted/test/test_ssl.py | 2 | 7095 | # Twisted, the Framework of Your Internet
# Copyright (C) 2001 Matthew W. Lefkowitz
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from __future__ import nested_scopes
from twisted.trial import unittest
from twisted.internet import protocol, reactor
from twisted.protocols import basic
from twisted.python import util
from OpenSSL import SSL
from twisted.internet import ssl
import os
import test_tcp
certPath = util.sibpath(__file__, "server.pem")
class StolenTCPTestCase(test_tcp.ProperlyCloseFilesTestCase, test_tcp.WriteDataTestCase):
def setUp(self):
f = protocol.ServerFactory()
f.protocol = protocol.Protocol
self.listener = reactor.listenSSL(
0, f, ssl.DefaultOpenSSLContextFactory(certPath, certPath), interface="127.0.0.1",
)
f = protocol.ClientFactory()
f.protocol = test_tcp.ConnectionLosingProtocol
f.protocol.master = self
L = []
def connector():
p = self.listener.getHost()[2]
ctx = ssl.ClientContextFactory()
return reactor.connectSSL('127.0.0.1', p, f, ctx)
self.connector = connector
self.totalConnections = 0
class ClientTLSContext(ssl.ClientContextFactory):
isClient = 1
def getContext(self):
return SSL.Context(ssl.SSL.TLSv1_METHOD)
class UnintelligentProtocol(basic.LineReceiver):
pretext = [
"first line",
"last thing before tls starts",
"STARTTLS",
]
posttext = [
"first thing after tls started",
"last thing ever",
]
def connectionMade(self):
for l in self.pretext:
self.sendLine(l)
def lineReceived(self, line):
if line == "READY":
self.transport.startTLS(ClientTLSContext(), self.factory.client)
for l in self.posttext:
self.sendLine(l)
self.transport.loseConnection()
class ServerTLSContext(ssl.DefaultOpenSSLContextFactory):
isClient = 0
def __init__(self, *args, **kw):
kw['sslmethod'] = SSL.TLSv1_METHOD
ssl.DefaultOpenSSLContextFactory.__init__(self, *args, **kw)
class LineCollector(basic.LineReceiver):
def __init__(self, doTLS):
self.doTLS = doTLS
def connectionMade(self):
self.factory.rawdata = ''
self.factory.lines = []
def lineReceived(self, line):
self.factory.lines.append(line)
if line == 'STARTTLS':
self.sendLine('READY')
if self.doTLS:
ctx = ServerTLSContext(
privateKeyFileName=certPath,
certificateFileName=certPath,
)
self.transport.startTLS(ctx, self.factory.server)
else:
self.setRawMode()
def rawDataReceived(self, data):
self.factory.rawdata += data
self.factory.done = 1
def connectionLost(self, reason):
self.factory.done = 1
class TLSTestCase(unittest.TestCase):
def testTLS(self):
cf = protocol.ClientFactory()
cf.protocol = UnintelligentProtocol
cf.client = 1
sf = protocol.ServerFactory()
sf.protocol = lambda: LineCollector(1)
sf.done = 0
sf.server = 1
port = reactor.listenTCP(0, sf)
portNo = port.getHost()[2]
reactor.connectTCP('0.0.0.0', portNo, cf)
i = 0
while i < 5000 and not sf.done:
reactor.iterate(0.01)
i += 1
self.failUnless(sf.done, "Never finished reading all lines")
self.assertEquals(
sf.lines,
UnintelligentProtocol.pretext + UnintelligentProtocol.posttext
)
def testUnTLS(self):
cf = protocol.ClientFactory()
cf.protocol = UnintelligentProtocol
cf.client = 1
sf = protocol.ServerFactory()
sf.protocol = lambda: LineCollector(0)
sf.done = 0
sf.server = 1
port = reactor.listenTCP(0, sf)
portNo = port.getHost()[2]
reactor.connectTCP('0.0.0.0', portNo, cf)
i = 0
while i < 5000 and not sf.done:
reactor.iterate(0.01)
i += 1
self.failUnless(sf.done, "Never finished reading all lines")
self.assertEquals(
sf.lines,
UnintelligentProtocol.pretext
)
self.failUnless(sf.rawdata, "No encrypted bytes received")
def testBackwardsTLS(self):
cf = protocol.ClientFactory()
cf.protocol = lambda: LineCollector(1)
cf.server = 0
cf.done = 0
sf = protocol.ServerFactory()
sf.protocol = UnintelligentProtocol
sf.client = 0
port = reactor.listenTCP(0, sf)
portNo = port.getHost()[2]
reactor.connectTCP('0.0.0.0', portNo, cf)
i = 0
while i < 2000 and not cf.done:
reactor.iterate(0.01)
i += 1
self.failUnless(cf.done, "Never finished reading all lines")
self.assertEquals(
cf.lines,
UnintelligentProtocol.pretext + UnintelligentProtocol.posttext
)
class SingleLineServerProtocol(protocol.Protocol):
def connectionMade(self):
self.transport.write("+OK <some crap>\r\n")
class RecordingClientProtocol(protocol.Protocol):
def connectionMade(self):
self.buffer = []
def dataReceived(self, data):
self.factory.buffer.append(data)
class BufferingTestCase(unittest.TestCase):
def testOpenSSLBuffering(self):
server = protocol.ServerFactory()
client = protocol.ClientFactory()
server.protocol = SingleLineServerProtocol
client.protocol = RecordingClientProtocol
client.buffer = []
from twisted.internet.ssl import DefaultOpenSSLContextFactory
from twisted.internet.ssl import ClientContextFactory
sCTX = DefaultOpenSSLContextFactory(certPath, certPath)
cCTX = ClientContextFactory()
port = reactor.listenSSL(0, server, sCTX, interface='127.0.0.1')
reactor.connectSSL('127.0.0.1', port.getHost()[2], client, cCTX)
for i in range(100):
reactor.iterate()
self.assertEquals(client.buffer, ["+OK <some crap>\r\n"])
testOpenSSLBuffering.todo = "This wasn't working before anyway."
| gpl-2.0 | -3,102,412,264,390,012,400 | 29.581897 | 94 | 0.613249 | false | 4.153981 | true | false | false |
sim1234/Versioning | setup.py | 1 | 3811 | from setuptools import setup, find_packages # Always prefer setuptools over distutils
from codecs import open # To use a consistent encoding
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='py_versioning',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# http://packaging.python.org/en/latest/tutorial.html#version
version='1.0.0',
description='Set of tools helping in versioning projects',
long_description=long_description,
# The project's main homepage.
url='https://github.com/sim1234/Versioning',
# Author details
author='Szymon Zmilczak & Jaroslaw Szymla',
author_email='[email protected]',
# Choose your license
license='GPLv2',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
'Topic :: Software Development :: Version Control',
'Topic :: System :: Filesystems',
'Topic :: Database',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2 :: Only',
'Programming Language :: Python :: 2.7',
],
# What does your project relate to?
keywords='version control versioning py_versioning pyversioning',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
# List run-time dependencies here. These will be installed by pip when your
# project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/technical.html#install-requires-vs-requirements-files
install_requires=['sqlalchemy'],
# List additional groups of dependencies here (e.g. development dependencies).
# You can install these using the following syntax, for example:
# $ pip install -e .[dev,test]
extras_require = {
'dev': ['check-manifest'],
'test': ['coverage'],
'django': ['django'],
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
#'sample': ['package_data.dat'],
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages.
# see http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
data_files=[
#('my_data', ['data/data_file']),
],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'py_versioning=py_versioning:main',
],
},
)
| gpl-2.0 | 8,158,990,582,437,618 | 36.732673 | 98 | 0.660194 | false | 4.128927 | false | false | false |
ryaninhust/sampling | egraphs.py | 1 | 1417 | from igraph import Graph
from seed import generate_seed_graph
from query import query_seed
class EGraph(object):
def __init__(self, path):
self.path = path
self._seed_graph = None
self._g = None
def query_node(self, name):
raise NotImplementedError
@property
def origin_graph(self):
if not self._g:
g = Graph.Read_Ncol(self.path, directed=False)
self._g = g.simplify()
return self._g
@property
def seed_graph(self):
raise NotImplementedError
class FBEgoGraph(EGraph):
name = 'public'
def query_node(self, node_name, n_attribute):
node = self.origin_graph.vs.find(name=node_name)
result = [{'name': n['name'], 'degree': n.degree()} for n in node.neighbors()]
return result
@property
def seed_graph(self):
if not self._seed_graph:
self._seed_graph = generate_seed_graph(self.origin_graph, 100)
return self._seed_graph
class RemoteGraph(EGraph):
name = 'public'
def query_node(self, node_name, n_attribute):
node = self.origin_graph.vs.find(name=node_name)
result = [{'name': n['name'], 'degree': n.degree()} for n in node.neighbors()]
return result
@property
def seed_graph(self):
if not self._seed_graph:
self._seed_graph = query_seed()[0]
return self._seed_graph
| mit | 3,165,686,679,552,840,000 | 24.763636 | 86 | 0.599859 | false | 3.71916 | false | false | false |
ClusterHQ/eliot | examples/dask_eliot.py | 2 | 1095 | from os import getpid
from dask.bag import from_sequence
import dask.config
from dask.distributed import Client
from eliot import log_call, to_file
from eliot.dask import compute_with_trace
@log_call
def multiply(x, y=7):
return x * y
@log_call
def add(x, y):
return x + y
@log_call
def main_computation():
bag = from_sequence([1, 2, 3])
bag = bag.map(multiply).fold(add)
return compute_with_trace(bag)[0] # instead of dask.compute(bag)
def _start_logging():
# Name log file based on PID, so different processes so stomp on each
# others' logfiles:
to_file(open("{}.log".format(getpid()), "a"))
def main():
# Setup logging on the main process:
_start_logging()
# Start three worker processes on the local machine:
client = Client(n_workers=3, threads_per_worker=1)
# Setup Eliot logging on each worker process:
client.run(_start_logging)
# Run the Dask computation in the worker processes:
result = main_computation()
print("Result:", result)
if __name__ == '__main__':
import dask_eliot
dask_eliot.main()
| apache-2.0 | -5,911,900,486,905,028,000 | 22.804348 | 73 | 0.671233 | false | 3.298193 | false | false | false |
randerzander/wellbook | etl/hive/model.py | 1 | 1481 | #!./pyenv/bin/python
import sys, json, numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.ensemble import RandomForestRegressor
file_nos, x, y_oil, y_gas, y_water = [], [], [], [], []
files = {}
idx = 0
f = open('predict_input.txt')
lines = f.read().split('\n')
#for line in sys.stdin:
for line in lines:
tokens = line.strip().lower().split('\t')
if tokens[0] in files: continue
else: files[tokens[0]] = True
if len(tokens) == 1: continue
#print 'Parsing well ' + tokens[0]
y_oil.append(float(tokens[1])) #bbls_oil
y_gas.append(float(tokens[2])) #mcf_gas
y_water.append(float(tokens[3])) #bbls_water
#footages, fieldname, producedpools, wellbore, welltype, ctb, perfs, spacing
x.append((tokens[4], tokens[5], tokens[6], tokens[7], tokens[8], tokens[9], tokens[10], tokens[11]))
file_nos.append(tokens[0])
#for token in tokens[12:]:
#if token != '': x[idx].append(float(token))
idx += 1
X = np.asarray(x)
#LabelEncode footages, fieldname, producedpools, wellbore, welltype, ctb, perfs, spacing
for idx in xrange(0, 8): X[:,idx] = LabelEncoder().fit_transform(X[:,idx])
m_oil, m_gas, m_water = RandomForestRegressor(), RandomForestRegressor(), RandomForestRegressor()
m_oil.fit(X,y_oil)
m_gas.fit(X,y_gas)
m_water.fit(X,y_water)
for idx,x in enumerate(X):
print '\t'.join([file_nos[idx], str(x[0]), str(m_oil.predict(x)[0]), str(x[1]), str(m_gas.predict(x)[0]), str(x[1]), str(m_water.predict(x)[0])])
| apache-2.0 | 4,250,318,843,458,691,600 | 37.973684 | 147 | 0.656313 | false | 2.789077 | false | false | false |
liumeixia/xiaworkspace | pythonProject/automate/idaiyanpre/system.py | 9 | 2321 | #-*- coding: utf-8 -*-
########################################################################
import logging
import datetime
from datetime import datetime
import time
import os
import sys
lis=0
class system(object):
""""""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
def log(self):
# filepath=raw_input(u'输入文件')
if not os.path.isdir('c:\\TEST_log\\test\\'):
os.makedirs('c:\\TEST_log\\test\\')
logFileName = 'c:\\TEST_log\\test\\%s.log' %time.strftime("%m-%d-%H-%M-%S",time.localtime(time.time()))
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', filename=logFileName, level=logging.DEBUG)
#################################################################################################
#定义一个StreamHandler,将INFO级别或更高的日志信息打印到标准错误,并将其添加到当前的日志处理对象#
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
#################################################################################################
logging.info('####################')
logging.info('# Begin Test #')
logging.info('####################')
def FileName(self):
global lis
lis=lis+1
filesave='c:\\screenshot\\'
picname= '%s.png'%(datetime.utcnow().strftime('%m-%d-') + time.strftime('%H',time.localtime(time.time()))+datetime.utcnow().strftime('.%M.%S.')+datetime.utcnow().strftime('%f')[:3])
# print filesave
# picname= '%s.png'%(datetime.utcnow().strftime('%m-%d-%H:%M:%S:') + datetime.utcnow().strftime('%f')[:3])
# print picname
filename=filesave+picname
# print filename
return filename
def strB2Q(self,ustring):
"""半角转全角"""
rstring = ""
for uchar in ustring:
inside_code=ord(uchar)
if inside_code == 32: #半角空格直接转化
inside_code = 12288
elif inside_code >= 32 and inside_code <= 126: #半角字符(除空格)根据关系转化
inside_code += 65248
rstring += unichr(inside_code)
return rstring | gpl-2.0 | 8,262,490,510,182,584,000 | 35.333333 | 182 | 0.518587 | false | 3.362654 | true | false | false |
azer/jsbuild | jsbuild/index.py | 1 | 3930 | from functools import partial
from glob import glob
from itertools import chain
from jsbuild.dependency import Dependency
from jsbuild.logging import logger
from jsbuild.manifest import Manifest
from jsbuild.maps import get_class_by_format
from jsbuild import templates
import os.path
import re
clean_backdir = lambda path: re.sub('^(\.\.\/?)+','',path)
count_backdir = lambda path: get_backdir(path).count('../')
has_backdir = lambda path: re.match('^\.\.',path) and True or False
join_path = lambda *args: os.path.normpath(os.path.join(*args))
def get_backdir(path):
search = re.search('((?:\.\.\/)+)',path)
return os.path.normpath(search.groups()[0]) if search else ''
class Index(Dependency):
def __init__(self,*args,**kwargs):
super(Index,self).__init__(*args,**kwargs)
self._buffer_ = None
self._manifest_ = None
self._dependencies_ = None
self.to_call = []
@property
def buffer(self):
if not self._buffer_:
self._buffer_ = self.read()
return self._buffer_
@property
def content(self):
root = self
while root.index: root = root.index
name = root.manifest.name
content = '\n'.join(map(lambda dep: dep.content if not isinstance(dep,Index) or not dep.get_config('filename',False) else dep.put() or '', self.dependencies))
if not self.index:
content = templates.package%{ "name":name, "content":content }
for flname in self.to_call:
content = '%s\n%s'%(content,templates.maincall%{ "index_name":root.manifest.name, "filename":flname})
for rpl in self.get_config('replacements',[]):
content = re.sub(rpl['pattern'],rpl['replacement']%self.get_config('dict',{}),content,flags=re.DOTALL)
return content
@property
def dependencies(self):
if self._dependencies_ == None:
self.import_manifest()
return self._dependencies_
@property
def manifest(self):
if self._manifest_ == None:
self._manifest_ = Manifest(self.parse())
return self._manifest_
def get_config(self,key,default=None):
return self.manifest.build.__contains__(key) and self.manifest['build'][key] or default
@property
def source_dir(self):
return os.path.normpath(os.path.join(self.working_dir,self.get_config('dir','')))
@property
def path(self):
logger.debug('Trying to find client-side path of "%s" (:working_dir %s :source_dir %s)'%(self.src,self.working_dir,self.source_dir))
if not self.index: return ''
parent = self.index
parent_ref = get_backdir(self.src)
while parent and has_backdir(parent_ref):
parent_dir = join_path(os.path.dirname(parent.src) if parent.index else '',parent.get_config('dir',''))
parent_dir_merged = join_path(clean_backdir(parent_dir),parent_ref)
if len(parent_dir_merged)>0 and not parent_dir_merged=='.' and (not has_backdir(parent_dir_merged)):
break
parent_ref = join_path(parent_dir if parent.index and parent.index.index else clean_backdir(parent_dir),parent_ref)
parent = parent.index
path = join_path(parent.path if parent else '',clean_backdir(os.path.dirname(self.src)))
return path if path!='.' else ''
def import_manifest(self):
logger.debug('Importing manifest document')
self._dependencies_ = []
sdir = self.source_dir
files = [ el for el in map(partial(lambda path: os.path.join(sdir,path)),self.get_config('files',[])) ]
for depinfo in chain(*map(glob,files)):
src = depinfo if not self.source_dir else depinfo[len(self.source_dir)+1:]
dp = get_class_by_format(src)(index=self)
dp.src = src
self.dependencies.append(dp)
def parse(self,content):
raise Exception('Not Implemented')
def put(self):
filename = os.path.normpath(os.path.join(self.working_dir, self.get_config('filename')))
with open('%s'%filename,'w',encoding='utf-8') as fl:
fl.write(self.content)
logger.info('Writing %s OK'%filename)
| mit | 6,466,859,445,148,958,000 | 32.305085 | 162 | 0.669975 | false | 3.43832 | true | false | false |
ISMiller101/vpsolver | setup.py | 1 | 2141 | #!/usr/bin/env python
"""
Basic Setup Script
This code is part of the Arc-flow Vector Packing Solver (VPSolver).
Copyright (C) 2013-2015, Filipe Brandao
Faculdade de Ciencias, Universidade do Porto
Porto, Portugal. All rights reserved. E-mail: <[email protected]>.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from os import system
from setuptools import setup
from setuptools.command.install import install
class CustomInstallCommand(install):
""" Custom Install Command """
def run(self):
try:
system('/bin/bash ./compile.sh')
system('/bin/cp bin/* ' + self.install_scripts)
except IOError:
pass
install.run(self)
setup(
name='VPSolver',
version='1.1',
description='Cutting and Packing Exact Solver Based on an Arc-Flow Formulation',
author='',
author_email='',
packages=['pyvpsolver'],
include_package_data=True,
scripts=[
'scripts/vpsolver_gurobi.sh',
'scripts/vpsolver_cplex.sh',
'scripts/vpsolver_coinor.sh',
'scripts/vpsolver_glpk.sh',
'scripts/vpsolver_lpsolve.sh',
'scripts/vpsolver_scip.sh',
],
url='',
license='LICENSE',
long_description=open('README').read(),
keywords='',
classifiers=[
'Development Status :: 1 - Planning',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
'Topic :: Scientific/Engineering'
],
cmdclass = { 'install' : CustomInstallCommand },
use_2to3 = True
)
| gpl-3.0 | 7,635,097,545,252,229,000 | 30.028986 | 84 | 0.684727 | false | 3.717014 | false | false | false |
anksp21/Community-Zenpacks | ZenPacks.community.HPBladeChassis/ZenPacks/community/HPBladeChassis/BladeChassisInterconnect.py | 5 | 2484 | from Globals import InitializeClass
# from AccessControl import ClassSecurityInfo
from Products.ZenRelations.RelSchema import *
from Products.ZenModel.DeviceComponent import DeviceComponent
from Products.ZenModel.ManagedEntity import ManagedEntity
from Products.ZenUtils.Utils import convToUnits
from Products.ZenModel.ZenossSecurity import ZEN_VIEW, ZEN_CHANGE_SETTINGS
_kw = dict(mode='w')
class BladeChassisInterconnect(DeviceComponent, ManagedEntity):
"Blade Chassis Interconnect Information"
portal_type = meta_type = 'BladeChassisInterconnect'
bciNumber = -1
bciType = ""
bciProductName = ""
bciStatus = ""
bciMgmtIp = ""
bciSerialNum = ""
bciPartNumber = ""
bciSparePartNumber = ""
_properties = (
dict(id='bciNumber', type='int', **_kw),
dict(id='bciType', type='string', **_kw),
dict(id='bciProductName', type='string', **_kw),
dict(id='bciStatus', type='string', **_kw),
dict(id='bciMgmtIp', type='string', **_kw),
dict(id='bciSerialNum', type='string', **_kw),
dict(id='bciPartNumber', type='string', **_kw),
dict(id='bciSparePartNumber', type='string', **_kw)
)
_relations = (
('bladechassis', ToOne(ToManyCont, 'ZenPacks.community.HPBladeChassis.BladeChassis', 'bladechassisinterconnects')),
)
# Screen action bindings (and tab definitions)
factory_type_information = (
{
'id' : 'BladeChassisInterconnect',
'meta_type' : 'Blade Chassis Interconnect',
'description' : 'Blade Chassis Interconnect Description',
'icon' : 'Device_icon.gif',
'product' : 'BladeServers',
'factory' : 'manage_addBladeServer',
'immediate_view' : 'bladeserverPerformance',
'actions' :
(
{ 'id' : 'perf'
, 'name' : 'perf'
, 'action' : 'bladeserverPerformance'
, 'permissions' : (ZEN_VIEW, )
},
{ 'id' : 'templates'
, 'name' : 'Templates'
, 'action' : 'objTemplates'
, 'permissions' : (ZEN_CHANGE_SETTINGS, )
},
)
},
)
def device(self):
return self.bladechassis()
def managedDeviceLink(self):
from Products.ZenModel.ZenModelRM import ZenModelRM
d = self.getDmdRoot("Devices").findDevice(self.bsProductName)
if d:
return ZenModelRM.urlLink(d, 'link')
return None
def snmpIgnore(self):
return ManagedEntity.snmpIgnore(self) or self.snmpindex < 0
InitializeClass(BladeChassisInterconnect)
| gpl-2.0 | 3,578,364,691,106,884,000 | 29.292683 | 116 | 0.646538 | false | 3.192802 | false | false | false |
swegener/libsigrokdecode | decoders/max7219/pd.py | 3 | 3720 | ##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2015 Paul Evans <[email protected]>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import re
import sigrokdecode as srd
def _decode_intensity(val):
intensity = val & 0x0f
if intensity == 0:
return 'min'
elif intensity == 15:
return 'max'
else:
return intensity
registers = {
0x00: ['No-op', lambda _: ''],
0x09: ['Decode', lambda v: '0b{:08b}'.format(v)],
0x0A: ['Intensity', _decode_intensity],
0x0B: ['Scan limit', lambda v: 1 + v],
0x0C: ['Shutdown', lambda v: 'off' if v else 'on'],
0x0F: ['Display test', lambda v: 'on' if v else 'off']
}
ann_reg, ann_digit, ann_warning = range(3)
class Decoder(srd.Decoder):
api_version = 3
id = 'max7219'
name = 'MAX7219'
longname = 'Maxim MAX7219/MAX7221'
desc = 'Maxim MAX72xx series 8-digit LED display driver.'
license = 'gplv2+'
inputs = ['spi']
outputs = []
tags = ['Display']
annotations = (
('register', 'Register write'),
('digit', 'Digit displayed'),
('warning', 'Warning'),
)
annotation_rows = (
('commands', 'Commands', (ann_reg, ann_digit)),
('warnings', 'Warnings', (ann_warning,)),
)
def __init__(self):
self.reset()
def reset(self):
pass
def start(self):
self.out_ann = self.register(srd.OUTPUT_ANN)
self.pos = 0
self.cs_start = 0
def putreg(self, ss, es, reg, value):
self.put(ss, es, self.out_ann, [ann_reg, ['%s: %s' % (reg, value)]])
def putdigit(self, ss, es, digit, value):
self.put(ss, es, self.out_ann, [ann_digit, ['Digit %d: %02X' % (digit, value)]])
def putwarn(self, ss, es, message):
self.put(ss, es, self.out_ann, [ann_warning, [message]])
def decode(self, ss, es, data):
ptype, mosi, _ = data
if ptype == 'DATA':
if not self.cs_asserted:
return
if self.pos == 0:
self.addr = mosi
self.addr_start = ss
elif self.pos == 1:
if self.addr >= 1 and self.addr <= 8:
self.putdigit(self.addr_start, es, self.addr, mosi)
elif self.addr in registers:
name, decoder = registers[self.addr]
self.putreg(self.addr_start, es, name, decoder(mosi))
else:
self.putwarn(self.addr_start, es,
'Unknown register %02X' % (self.addr))
self.pos += 1
elif ptype == 'CS-CHANGE':
self.cs_asserted = mosi
if self.cs_asserted:
self.pos = 0
self.cs_start = ss
else:
if self.pos == 1:
# Don't warn if pos=0 so that CS# glitches don't appear
# as spurious warnings.
self.putwarn(self.cs_start, es, 'Short write')
elif self.pos > 2:
self.putwarn(self.cs_start, es, 'Overlong write')
| gpl-3.0 | -5,946,079,890,034,845,000 | 31.347826 | 88 | 0.55457 | false | 3.546235 | false | false | false |
kawamon/hue | desktop/core/ext-py/pytest-4.6.11/testing/test_unittest.py | 3 | 30188 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import pytest
from _pytest.main import EXIT_NOTESTSCOLLECTED
def test_simple_unittest(testdir):
testpath = testdir.makepyfile(
"""
import unittest
class MyTestCase(unittest.TestCase):
def testpassing(self):
self.assertEqual('foo', 'foo')
def test_failing(self):
self.assertEqual('foo', 'bar')
"""
)
reprec = testdir.inline_run(testpath)
assert reprec.matchreport("testpassing").passed
assert reprec.matchreport("test_failing").failed
def test_runTest_method(testdir):
testdir.makepyfile(
"""
import unittest
class MyTestCaseWithRunTest(unittest.TestCase):
def runTest(self):
self.assertEqual('foo', 'foo')
class MyTestCaseWithoutRunTest(unittest.TestCase):
def runTest(self):
self.assertEqual('foo', 'foo')
def test_something(self):
pass
"""
)
result = testdir.runpytest("-v")
result.stdout.fnmatch_lines(
"""
*MyTestCaseWithRunTest::runTest*
*MyTestCaseWithoutRunTest::test_something*
*2 passed*
"""
)
def test_isclasscheck_issue53(testdir):
testpath = testdir.makepyfile(
"""
import unittest
class _E(object):
def __getattr__(self, tag):
pass
E = _E()
"""
)
result = testdir.runpytest(testpath)
assert result.ret == EXIT_NOTESTSCOLLECTED
def test_setup(testdir):
testpath = testdir.makepyfile(
"""
import unittest
class MyTestCase(unittest.TestCase):
def setUp(self):
self.foo = 1
def setup_method(self, method):
self.foo2 = 1
def test_both(self):
self.assertEqual(1, self.foo)
assert self.foo2 == 1
def teardown_method(self, method):
assert 0, "42"
"""
)
reprec = testdir.inline_run("-s", testpath)
assert reprec.matchreport("test_both", when="call").passed
rep = reprec.matchreport("test_both", when="teardown")
assert rep.failed and "42" in str(rep.longrepr)
def test_setUpModule(testdir):
testpath = testdir.makepyfile(
"""
values = []
def setUpModule():
values.append(1)
def tearDownModule():
del values[0]
def test_hello():
assert values == [1]
def test_world():
assert values == [1]
"""
)
result = testdir.runpytest(testpath)
result.stdout.fnmatch_lines(["*2 passed*"])
def test_setUpModule_failing_no_teardown(testdir):
testpath = testdir.makepyfile(
"""
values = []
def setUpModule():
0/0
def tearDownModule():
values.append(1)
def test_hello():
pass
"""
)
reprec = testdir.inline_run(testpath)
reprec.assertoutcome(passed=0, failed=1)
call = reprec.getcalls("pytest_runtest_setup")[0]
assert not call.item.module.values
def test_new_instances(testdir):
testpath = testdir.makepyfile(
"""
import unittest
class MyTestCase(unittest.TestCase):
def test_func1(self):
self.x = 2
def test_func2(self):
assert not hasattr(self, 'x')
"""
)
reprec = testdir.inline_run(testpath)
reprec.assertoutcome(passed=2)
def test_function_item_obj_is_instance(testdir):
"""item.obj should be a bound method on unittest.TestCase function items (#5390)."""
testdir.makeconftest(
"""
def pytest_runtest_makereport(item, call):
if call.when == 'call':
class_ = item.parent.obj
assert isinstance(item.obj.__self__, class_)
"""
)
testdir.makepyfile(
"""
import unittest
class Test(unittest.TestCase):
def test_foo(self):
pass
"""
)
result = testdir.runpytest_inprocess()
result.stdout.fnmatch_lines(["* 1 passed in*"])
def test_teardown(testdir):
testpath = testdir.makepyfile(
"""
import unittest
class MyTestCase(unittest.TestCase):
values = []
def test_one(self):
pass
def tearDown(self):
self.values.append(None)
class Second(unittest.TestCase):
def test_check(self):
self.assertEqual(MyTestCase.values, [None])
"""
)
reprec = testdir.inline_run(testpath)
passed, skipped, failed = reprec.countoutcomes()
assert failed == 0, failed
assert passed == 2
assert passed + skipped + failed == 2
def test_teardown_issue1649(testdir):
"""
Are TestCase objects cleaned up? Often unittest TestCase objects set
attributes that are large and expensive during setUp.
The TestCase will not be cleaned up if the test fails, because it
would then exist in the stackframe.
"""
testpath = testdir.makepyfile(
"""
import unittest
class TestCaseObjectsShouldBeCleanedUp(unittest.TestCase):
def setUp(self):
self.an_expensive_object = 1
def test_demo(self):
pass
"""
)
testdir.inline_run("-s", testpath)
gc.collect()
for obj in gc.get_objects():
assert type(obj).__name__ != "TestCaseObjectsShouldBeCleanedUp"
def test_unittest_skip_issue148(testdir):
testpath = testdir.makepyfile(
"""
import unittest
@unittest.skip("hello")
class MyTestCase(unittest.TestCase):
@classmethod
def setUpClass(self):
xxx
def test_one(self):
pass
@classmethod
def tearDownClass(self):
xxx
"""
)
reprec = testdir.inline_run(testpath)
reprec.assertoutcome(skipped=1)
def test_method_and_teardown_failing_reporting(testdir):
testdir.makepyfile(
"""
import unittest, pytest
class TC(unittest.TestCase):
def tearDown(self):
assert 0, "down1"
def test_method(self):
assert False, "down2"
"""
)
result = testdir.runpytest("-s")
assert result.ret == 1
result.stdout.fnmatch_lines(
[
"*tearDown*",
"*assert 0*",
"*test_method*",
"*assert False*",
"*1 failed*1 error*",
]
)
def test_setup_failure_is_shown(testdir):
testdir.makepyfile(
"""
import unittest
import pytest
class TC(unittest.TestCase):
def setUp(self):
assert 0, "down1"
def test_method(self):
print("never42")
xyz
"""
)
result = testdir.runpytest("-s")
assert result.ret == 1
result.stdout.fnmatch_lines(["*setUp*", "*assert 0*down1*", "*1 failed*"])
assert "never42" not in result.stdout.str()
def test_setup_setUpClass(testdir):
testpath = testdir.makepyfile(
"""
import unittest
import pytest
class MyTestCase(unittest.TestCase):
x = 0
@classmethod
def setUpClass(cls):
cls.x += 1
def test_func1(self):
assert self.x == 1
def test_func2(self):
assert self.x == 1
@classmethod
def tearDownClass(cls):
cls.x -= 1
def test_teareddown():
assert MyTestCase.x == 0
"""
)
reprec = testdir.inline_run(testpath)
reprec.assertoutcome(passed=3)
def test_setup_class(testdir):
testpath = testdir.makepyfile(
"""
import unittest
import pytest
class MyTestCase(unittest.TestCase):
x = 0
def setup_class(cls):
cls.x += 1
def test_func1(self):
assert self.x == 1
def test_func2(self):
assert self.x == 1
def teardown_class(cls):
cls.x -= 1
def test_teareddown():
assert MyTestCase.x == 0
"""
)
reprec = testdir.inline_run(testpath)
reprec.assertoutcome(passed=3)
@pytest.mark.parametrize("type", ["Error", "Failure"])
def test_testcase_adderrorandfailure_defers(testdir, type):
testdir.makepyfile(
"""
from unittest import TestCase
import pytest
class MyTestCase(TestCase):
def run(self, result):
excinfo = pytest.raises(ZeroDivisionError, lambda: 0/0)
try:
result.add%s(self, excinfo._excinfo)
except KeyboardInterrupt:
raise
except:
pytest.fail("add%s should not raise")
def test_hello(self):
pass
"""
% (type, type)
)
result = testdir.runpytest()
assert "should not raise" not in result.stdout.str()
@pytest.mark.parametrize("type", ["Error", "Failure"])
def test_testcase_custom_exception_info(testdir, type):
testdir.makepyfile(
"""
from unittest import TestCase
import py, pytest
import _pytest._code
class MyTestCase(TestCase):
def run(self, result):
excinfo = pytest.raises(ZeroDivisionError, lambda: 0/0)
# we fake an incompatible exception info
from _pytest.monkeypatch import MonkeyPatch
mp = MonkeyPatch()
def t(*args):
mp.undo()
raise TypeError()
mp.setattr(_pytest._code, 'ExceptionInfo', t)
try:
excinfo = excinfo._excinfo
result.add%(type)s(self, excinfo)
finally:
mp.undo()
def test_hello(self):
pass
"""
% locals()
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
[
"NOTE: Incompatible Exception Representation*",
"*ZeroDivisionError*",
"*1 failed*",
]
)
def test_testcase_totally_incompatible_exception_info(testdir):
(item,) = testdir.getitems(
"""
from unittest import TestCase
class MyTestCase(TestCase):
def test_hello(self):
pass
"""
)
item.addError(None, 42)
excinfo = item._excinfo.pop(0)
assert "ERROR: Unknown Incompatible" in str(excinfo.getrepr())
def test_module_level_pytestmark(testdir):
testpath = testdir.makepyfile(
"""
import unittest
import pytest
pytestmark = pytest.mark.xfail
class MyTestCase(unittest.TestCase):
def test_func1(self):
assert 0
"""
)
reprec = testdir.inline_run(testpath, "-s")
reprec.assertoutcome(skipped=1)
class TestTrialUnittest(object):
def setup_class(cls):
cls.ut = pytest.importorskip("twisted.trial.unittest")
# on windows trial uses a socket for a reactor and apparently doesn't close it properly
# https://twistedmatrix.com/trac/ticket/9227
cls.ignore_unclosed_socket_warning = ("-W", "always")
def test_trial_testcase_runtest_not_collected(self, testdir):
testdir.makepyfile(
"""
from twisted.trial.unittest import TestCase
class TC(TestCase):
def test_hello(self):
pass
"""
)
reprec = testdir.inline_run(*self.ignore_unclosed_socket_warning)
reprec.assertoutcome(passed=1)
testdir.makepyfile(
"""
from twisted.trial.unittest import TestCase
class TC(TestCase):
def runTest(self):
pass
"""
)
reprec = testdir.inline_run(*self.ignore_unclosed_socket_warning)
reprec.assertoutcome(passed=1)
def test_trial_exceptions_with_skips(self, testdir):
testdir.makepyfile(
"""
from twisted.trial import unittest
import pytest
class TC(unittest.TestCase):
def test_hello(self):
pytest.skip("skip_in_method")
@pytest.mark.skipif("sys.version_info != 1")
def test_hello2(self):
pass
@pytest.mark.xfail(reason="iwanto")
def test_hello3(self):
assert 0
def test_hello4(self):
pytest.xfail("i2wanto")
def test_trial_skip(self):
pass
test_trial_skip.skip = "trialselfskip"
def test_trial_todo(self):
assert 0
test_trial_todo.todo = "mytodo"
def test_trial_todo_success(self):
pass
test_trial_todo_success.todo = "mytodo"
class TC2(unittest.TestCase):
def setup_class(cls):
pytest.skip("skip_in_setup_class")
def test_method(self):
pass
"""
)
from _pytest.compat import _is_unittest_unexpected_success_a_failure
should_fail = _is_unittest_unexpected_success_a_failure()
result = testdir.runpytest("-rxs", *self.ignore_unclosed_socket_warning)
result.stdout.fnmatch_lines_random(
[
"*XFAIL*test_trial_todo*",
"*trialselfskip*",
"*skip_in_setup_class*",
"*iwanto*",
"*i2wanto*",
"*sys.version_info*",
"*skip_in_method*",
"*1 failed*4 skipped*3 xfailed*"
if should_fail
else "*4 skipped*3 xfail*1 xpass*",
]
)
assert result.ret == (1 if should_fail else 0)
def test_trial_error(self, testdir):
testdir.makepyfile(
"""
from twisted.trial.unittest import TestCase
from twisted.internet.defer import Deferred
from twisted.internet import reactor
class TC(TestCase):
def test_one(self):
crash
def test_two(self):
def f(_):
crash
d = Deferred()
d.addCallback(f)
reactor.callLater(0.3, d.callback, None)
return d
def test_three(self):
def f():
pass # will never get called
reactor.callLater(0.3, f)
# will crash at teardown
def test_four(self):
def f(_):
reactor.callLater(0.3, f)
crash
d = Deferred()
d.addCallback(f)
reactor.callLater(0.3, d.callback, None)
return d
# will crash both at test time and at teardown
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
[
"*ERRORS*",
"*DelayedCalls*",
"*test_four*",
"*NameError*crash*",
"*test_one*",
"*NameError*crash*",
"*test_three*",
"*DelayedCalls*",
"*test_two*",
"*crash*",
]
)
def test_trial_pdb(self, testdir):
p = testdir.makepyfile(
"""
from twisted.trial import unittest
import pytest
class TC(unittest.TestCase):
def test_hello(self):
assert 0, "hellopdb"
"""
)
child = testdir.spawn_pytest(p)
child.expect("hellopdb")
child.sendeof()
def test_trial_testcase_skip_property(self, testdir):
testpath = testdir.makepyfile(
"""
from twisted.trial import unittest
class MyTestCase(unittest.TestCase):
skip = 'dont run'
def test_func(self):
pass
"""
)
reprec = testdir.inline_run(testpath, "-s")
reprec.assertoutcome(skipped=1)
def test_trial_testfunction_skip_property(self, testdir):
testpath = testdir.makepyfile(
"""
from twisted.trial import unittest
class MyTestCase(unittest.TestCase):
def test_func(self):
pass
test_func.skip = 'dont run'
"""
)
reprec = testdir.inline_run(testpath, "-s")
reprec.assertoutcome(skipped=1)
def test_trial_testcase_todo_property(self, testdir):
testpath = testdir.makepyfile(
"""
from twisted.trial import unittest
class MyTestCase(unittest.TestCase):
todo = 'dont run'
def test_func(self):
assert 0
"""
)
reprec = testdir.inline_run(testpath, "-s")
reprec.assertoutcome(skipped=1)
def test_trial_testfunction_todo_property(self, testdir):
testpath = testdir.makepyfile(
"""
from twisted.trial import unittest
class MyTestCase(unittest.TestCase):
def test_func(self):
assert 0
test_func.todo = 'dont run'
"""
)
reprec = testdir.inline_run(
testpath, "-s", *self.ignore_unclosed_socket_warning
)
reprec.assertoutcome(skipped=1)
def test_djangolike_testcase(testdir):
# contributed from Morten Breekevold
testdir.makepyfile(
"""
from unittest import TestCase, main
class DjangoLikeTestCase(TestCase):
def setUp(self):
print("setUp()")
def test_presetup_has_been_run(self):
print("test_thing()")
self.assertTrue(hasattr(self, 'was_presetup'))
def tearDown(self):
print("tearDown()")
def __call__(self, result=None):
try:
self._pre_setup()
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
import sys
result.addError(self, sys.exc_info())
return
super(DjangoLikeTestCase, self).__call__(result)
try:
self._post_teardown()
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
import sys
result.addError(self, sys.exc_info())
return
def _pre_setup(self):
print("_pre_setup()")
self.was_presetup = True
def _post_teardown(self):
print("_post_teardown()")
"""
)
result = testdir.runpytest("-s")
assert result.ret == 0
result.stdout.fnmatch_lines(
[
"*_pre_setup()*",
"*setUp()*",
"*test_thing()*",
"*tearDown()*",
"*_post_teardown()*",
]
)
def test_unittest_not_shown_in_traceback(testdir):
testdir.makepyfile(
"""
import unittest
class t(unittest.TestCase):
def test_hello(self):
x = 3
self.assertEqual(x, 4)
"""
)
res = testdir.runpytest()
assert "failUnlessEqual" not in res.stdout.str()
def test_unorderable_types(testdir):
testdir.makepyfile(
"""
import unittest
class TestJoinEmpty(unittest.TestCase):
pass
def make_test():
class Test(unittest.TestCase):
pass
Test.__name__ = "TestFoo"
return Test
TestFoo = make_test()
"""
)
result = testdir.runpytest()
assert "TypeError" not in result.stdout.str()
assert result.ret == EXIT_NOTESTSCOLLECTED
def test_unittest_typerror_traceback(testdir):
testdir.makepyfile(
"""
import unittest
class TestJoinEmpty(unittest.TestCase):
def test_hello(self, arg1):
pass
"""
)
result = testdir.runpytest()
assert "TypeError" in result.stdout.str()
assert result.ret == 1
@pytest.mark.parametrize("runner", ["pytest", "unittest"])
def test_unittest_expected_failure_for_failing_test_is_xfail(testdir, runner):
script = testdir.makepyfile(
"""
import unittest
class MyTestCase(unittest.TestCase):
@unittest.expectedFailure
def test_failing_test_is_xfail(self):
assert False
if __name__ == '__main__':
unittest.main()
"""
)
if runner == "pytest":
result = testdir.runpytest("-rxX")
result.stdout.fnmatch_lines(
["*XFAIL*MyTestCase*test_failing_test_is_xfail*", "*1 xfailed*"]
)
else:
result = testdir.runpython(script)
result.stderr.fnmatch_lines(["*1 test in*", "*OK*(expected failures=1)*"])
assert result.ret == 0
@pytest.mark.parametrize("runner", ["pytest", "unittest"])
def test_unittest_expected_failure_for_passing_test_is_fail(testdir, runner):
script = testdir.makepyfile(
"""
import unittest
class MyTestCase(unittest.TestCase):
@unittest.expectedFailure
def test_passing_test_is_fail(self):
assert True
if __name__ == '__main__':
unittest.main()
"""
)
from _pytest.compat import _is_unittest_unexpected_success_a_failure
should_fail = _is_unittest_unexpected_success_a_failure()
if runner == "pytest":
result = testdir.runpytest("-rxX")
result.stdout.fnmatch_lines(
[
"*MyTestCase*test_passing_test_is_fail*",
"*1 failed*" if should_fail else "*1 xpassed*",
]
)
else:
result = testdir.runpython(script)
result.stderr.fnmatch_lines(["*1 test in*", "*(unexpected successes=1)*"])
assert result.ret == (1 if should_fail else 0)
@pytest.mark.parametrize(
"fix_type, stmt", [("fixture", "return"), ("yield_fixture", "yield")]
)
def test_unittest_setup_interaction(testdir, fix_type, stmt):
testdir.makepyfile(
"""
import unittest
import pytest
class MyTestCase(unittest.TestCase):
@pytest.{fix_type}(scope="class", autouse=True)
def perclass(self, request):
request.cls.hello = "world"
{stmt}
@pytest.{fix_type}(scope="function", autouse=True)
def perfunction(self, request):
request.instance.funcname = request.function.__name__
{stmt}
def test_method1(self):
assert self.funcname == "test_method1"
assert self.hello == "world"
def test_method2(self):
assert self.funcname == "test_method2"
def test_classattr(self):
assert self.__class__.hello == "world"
""".format(
fix_type=fix_type, stmt=stmt
)
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*3 passed*"])
def test_non_unittest_no_setupclass_support(testdir):
testpath = testdir.makepyfile(
"""
class TestFoo(object):
x = 0
@classmethod
def setUpClass(cls):
cls.x = 1
def test_method1(self):
assert self.x == 0
@classmethod
def tearDownClass(cls):
cls.x = 1
def test_not_teareddown():
assert TestFoo.x == 0
"""
)
reprec = testdir.inline_run(testpath)
reprec.assertoutcome(passed=2)
def test_no_teardown_if_setupclass_failed(testdir):
testpath = testdir.makepyfile(
"""
import unittest
class MyTestCase(unittest.TestCase):
x = 0
@classmethod
def setUpClass(cls):
cls.x = 1
assert False
def test_func1(self):
cls.x = 10
@classmethod
def tearDownClass(cls):
cls.x = 100
def test_notTornDown():
assert MyTestCase.x == 1
"""
)
reprec = testdir.inline_run(testpath)
reprec.assertoutcome(passed=1, failed=1)
def test_issue333_result_clearing(testdir):
testdir.makeconftest(
"""
import pytest
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_call(item):
yield
assert 0
"""
)
testdir.makepyfile(
"""
import unittest
class TestIt(unittest.TestCase):
def test_func(self):
0/0
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(failed=1)
def test_unittest_raise_skip_issue748(testdir):
testdir.makepyfile(
test_foo="""
import unittest
class MyTestCase(unittest.TestCase):
def test_one(self):
raise unittest.SkipTest('skipping due to reasons')
"""
)
result = testdir.runpytest("-v", "-rs")
result.stdout.fnmatch_lines(
"""
*SKIP*[1]*test_foo.py*skipping due to reasons*
*1 skipped*
"""
)
def test_unittest_skip_issue1169(testdir):
testdir.makepyfile(
test_foo="""
import unittest
class MyTestCase(unittest.TestCase):
@unittest.skip("skipping due to reasons")
def test_skip(self):
self.fail()
"""
)
result = testdir.runpytest("-v", "-rs")
result.stdout.fnmatch_lines(
"""
*SKIP*[1]*skipping due to reasons*
*1 skipped*
"""
)
def test_class_method_containing_test_issue1558(testdir):
testdir.makepyfile(
test_foo="""
import unittest
class MyTestCase(unittest.TestCase):
def test_should_run(self):
pass
def test_should_not_run(self):
pass
test_should_not_run.__test__ = False
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
@pytest.mark.parametrize(
"base", ["six.moves.builtins.object", "unittest.TestCase", "unittest2.TestCase"]
)
def test_usefixtures_marker_on_unittest(base, testdir):
"""#3498"""
module = base.rsplit(".", 1)[0]
pytest.importorskip(module)
testdir.makepyfile(
conftest="""
import pytest
@pytest.fixture(scope='function')
def fixture1(request, monkeypatch):
monkeypatch.setattr(request.instance, 'fixture1', True )
@pytest.fixture(scope='function')
def fixture2(request, monkeypatch):
monkeypatch.setattr(request.instance, 'fixture2', True )
def node_and_marks(item):
print(item.nodeid)
for mark in item.iter_markers():
print(" ", mark)
@pytest.fixture(autouse=True)
def my_marks(request):
node_and_marks(request.node)
def pytest_collection_modifyitems(items):
for item in items:
node_and_marks(item)
"""
)
testdir.makepyfile(
"""
import pytest
import {module}
class Tests({base}):
fixture1 = False
fixture2 = False
@pytest.mark.usefixtures("fixture1")
def test_one(self):
assert self.fixture1
assert not self.fixture2
@pytest.mark.usefixtures("fixture1", "fixture2")
def test_two(self):
assert self.fixture1
assert self.fixture2
""".format(
module=module, base=base
)
)
result = testdir.runpytest("-s")
result.assert_outcomes(passed=2)
def test_testcase_handles_init_exceptions(testdir):
"""
Regression test to make sure exceptions in the __init__ method are bubbled up correctly.
See https://github.com/pytest-dev/pytest/issues/3788
"""
testdir.makepyfile(
"""
from unittest import TestCase
import pytest
class MyTestCase(TestCase):
def __init__(self, *args, **kwargs):
raise Exception("should raise this exception")
def test_hello(self):
pass
"""
)
result = testdir.runpytest()
assert "should raise this exception" in result.stdout.str()
assert "ERROR at teardown of MyTestCase.test_hello" not in result.stdout.str()
def test_error_message_with_parametrized_fixtures(testdir):
testdir.copy_example("unittest/test_parametrized_fixture_error_message.py")
result = testdir.runpytest()
result.stdout.fnmatch_lines(
[
"*test_two does not support fixtures*",
"*TestSomethingElse::test_two",
"*Function type: TestCaseFunction",
]
)
@pytest.mark.parametrize(
"test_name, expected_outcome",
[
("test_setup_skip.py", "1 skipped"),
("test_setup_skip_class.py", "1 skipped"),
("test_setup_skip_module.py", "1 error"),
],
)
def test_setup_inheritance_skipping(testdir, test_name, expected_outcome):
"""Issue #4700"""
testdir.copy_example("unittest/{}".format(test_name))
result = testdir.runpytest()
result.stdout.fnmatch_lines(["* {} in *".format(expected_outcome)])
| apache-2.0 | -2,479,681,594,579,308,000 | 27.292409 | 95 | 0.52872 | false | 4.457767 | true | false | false |
bcgov/gwells | app/backend/gwells/models/__init__.py | 1 | 3065 | """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Don't split all the model classes into seperate files (see The Zen of Python: "Flat is better than nested.")
# If you are going to do it, adhere to the standards:
# See: https://docs.djangoproject.com/en/2.0/topics/db/models/#organizing-models-in-a-package
# See: https://www.python.org/dev/peps/pep-0008/#package-and-module-names
import uuid
from django.contrib.gis.db import models
from django.contrib.auth.models import User
from django.dispatch import receiver
from django.db.models.signals import post_save
from ..db_comments.model_mixins import DBComments
from .common import *
from .screen import *
from .survey import *
from .bulk import *
# DATALOAD_USER: Use for ETL etc.
DATALOAD_USER = 'DATALOAD_USER'
# DE_DUPLICATE_USER: Use when running scripts that remove duplicates.
DE_DUPLICATE_USER = 'DE_DUPLICATE_USER'
class Profile(models.Model, DBComments):
"""
Extended User Profile
"""
user = models.OneToOneField(
User, on_delete=models.CASCADE, related_name="profile")
profile_guid = models.UUIDField(
primary_key=True, default=uuid.uuid4, editable=False)
username = models.CharField(max_length=100, blank=True, null=True)
name = models.CharField(max_length=100, blank=True, null=True)
db_table_comment = ('Additional user information that cannot be stored on the django auth_user table.')
class Meta:
db_table = 'profile'
class Border(models.Model):
se_a_c_flg = models.CharField(max_length=254)
obejctid = models.FloatField()
shape = models.FloatField()
length_m = models.FloatField()
oic_number = models.CharField(max_length=7)
area_sqm = models.FloatField()
upt_date = models.CharField(max_length=20)
upt_type = models.CharField(max_length=50)
chng_org = models.CharField(max_length=30)
aa_parent = models.CharField(max_length=100)
aa_type = models.CharField(max_length=50)
aa_id = models.BigIntegerField()
aa_name = models.CharField(max_length=100)
abrvn = models.CharField(max_length=40)
bdy_type = models.CharField(max_length=20)
oic_year = models.CharField(max_length=4)
afctd_area = models.CharField(max_length=120)
geom = models.MultiPolygonField(srid=4269)
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()
| apache-2.0 | 8,003,407,017,705,290,000 | 35.927711 | 110 | 0.722349 | false | 3.522989 | false | false | false |
Felixaverlant/starter_kit_python_notebook_gapi | auth.py | 2 | 1996 | """A simple example of how to access the Google Analytics API."""
import argparse
from apiclient.discovery import build
import httplib2
from oauth2client import client
from oauth2client import file
from oauth2client import tools
def get_service(api_name, api_version, scope, client_secrets_path):
"""Get a service that communicates to a Google API.
Args:
api_name: string The name of the api to connect to.
api_version: string The api version to connect to.
scope: A list of strings representing the auth scopes to authorize for the
connection.
client_secrets_path: string A path to a valid client secrets file.
Returns:
A service that is connected to the specified API.
"""
# Parse command-line arguments.
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
parents=[tools.argparser])
flags = parser.parse_args([])
# Set up a Flow object to be used if we need to authenticate.
flow = client.flow_from_clientsecrets(
client_secrets_path, scope=scope,
message=tools.message_if_missing(client_secrets_path))
# Prepare credentials, and authorize HTTP object with them.
# If the credentials don't exist or are invalid run through the native client
# flow. The Storage object will ensure that if successful the good
# credentials will get written back to a file.
storage = file.Storage(api_name + '.dat')
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = tools.run_flow(flow, storage, flags)
http = credentials.authorize(http=httplib2.Http())
# Build the service object.
service = build(api_name, api_version, http=http)
return service
def main():
# Define the auth scopes to request.
scope = ['https://www.googleapis.com/auth/analytics.readonly']
# Authenticate and construct service.
service = get_service('analytics', 'v3', scope, 'client_secrets.json')
return service
if __name__ == '__main__':
main()
| mit | 4,952,279,847,229,344,000 | 31.721311 | 79 | 0.728457 | false | 4.0818 | false | false | false |
benjohnston24/partyCoin | partycoin/updateDonations.py | 1 | 9826 | #! /usr/bin/env python
"""!
-----------------------------------------------------------------------------
File Name : updateDonations.py
Purpose: This module is used to read the data downloaded from the Australian
electoral commission website and update the database accordingly
Updated: Thu Mar 5 19:25:41 AEDT 2015
Created: 24-Feb-2015 21:32:45 AEDT
-----------------------------------------------------------------------------
Revision History
Wed Mar 11 15:36:29 AEDT 2015: Version 0.2
*File renamed
*updateDonations class re-configured to inherit partyCoinDbase class
24-Feb-2015 21:32:45 AEDT: Version 0.1
*Configured to work with sql database only
-----------------------------------------------------------------------------
S.D.G
"""
__author__ = 'Ben Johnston'
__revision__ = '0.2'
__date__ = 'Wed Mar 11 15:36:17 AEDT 2015'
__license__ = 'MPL v2.0'
## LICENSE DETAILS############################################################
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
##IMPORTS#####################################################################
from dbConfig import partyCoinDbase
import re
import os
import pdb
##############################################################################
#Tables used in tracking the donations made by the political parties
MAIN_TABLE = 'funds_tracker_donation'
SECONDARY_TABLE = 'new_funds'
#regular expression search strings
#the first string searches for the acronym of the state without any alphabet
#characters on either side
#STATES is used to to determine in which state a given political party resides
STATES = {'nsw': '([^A-Za-z]nsw([^A-Za-z]|$))|'
'([^A-Za-z]n[\.,\-]s[\.,\-]w[^A-Za-z])|'
'(new south wales)',
'qld': '([^A-Za-z]qld([^A-Za-z]|$))|'
'(^A-Za-z]q[\.,\-]l[\.,\-]d[^A-Za-z])|'
'(queensland)',
'vic': '([^A-Za-z]vic([^A-Za-z]|$))|'
'([^A-Za-z]v[\.,\-]i[\.,\-]c[^A-Za-z])|' '(victoria)',
'sa': '([^A-Za-z]sa([^A-Za-z]|$))|'
'([^A-Za-z]s[\.,\-]a[^A-Za-z])|'
'(south australia)',
'nt': '([^A-Za-z]nt([^A-Za-z]|$))|'
'([^A-Za-z]n[\.,\-]t[^A-Za-z])|'
'(northern territory)',
'wa': '([^A-Za-z]wa([^A-Za-z]|$))|'
'([^A-Za-z]w[\.,\-]a[^A-Za-z])|'
'(western australia)',
'act': '([^A-Za-z]act([^A-Za-z]|$))|'
'([^A-Za-z]a[\.,\-]c[\.,\-]t[^A-Za-z])|'
'(australian captial territory)',
'tas': '([^A-Za-z]tas([^A-Za-z]|$))|'
'([^A-Za-z]t[\.,\-]a[\.,\-]s[^A-Za-z])|'
'(tasmania)',
}
#FEDERAL is used to indicate that a political party is a country wide
#organisation
FEDERAL = 'FED'
#CLASSES#######################################################################
class updateDonations(partyCoinDbase):
"""!
This class is used to update the mysql database containing the political
funding information. The class possesses methods that enable reading of the
data supplied by dataGetter.py
"""
def __init__(self, debug_level=0):
"""!
The constructor for the object
@param self The pointer for the object
"""
#Instantiate the parent class
partyCoinDbase.__init__(self, debug_level=debug_level)
self.connect_to_db()
def prepare_for_new_data(self):
#Drop the secondary table if it exists
try:
self.execute_command('DROP TABLE %s' % SECONDARY_TABLE)
except Exception as e:
if e[1].find('Unknown table') >= 0:
pass
else:
raise(e)
#Create a new blank instances of the secondary table
msg = 'CREATE TABLE %s LIKE %s' % (
SECONDARY_TABLE, MAIN_TABLE)
self.execute_command(msg)
def add_funds_to_db(self,
year=None,
party=None,
party_state=None,
donor=None,
address=None,
state=None,
postcode=None,
don_type=None,
amount=None):
"""!
This method adds donation information to the database
"""
if (donor is None) or (address is None) or (state is None) or\
(postcode is None) or (don_type is None) or (amount is None)\
or (party is None) or (year is None) or (party_state is None):
return False
#Check inputs
year = year.replace("'", '')
party = party.replace("'", '')
donor = donor.replace("'", '')
address = address.replace("'", '')
don_type = don_type.replace("'", '')
state = state.replace("'", '')
party_state = party_state.replace("'", '')
msg = 'INSERT INTO %s(year, party,'\
'donor, address, state, postCode, donor_type, amount, '\
'party_state)'\
" VALUES('%s', '%s','%s','%s', '%s', '%s', '%s', %0.2f, '%s')" %\
(SECONDARY_TABLE, year, party, donor, address, state,
postcode, don_type, amount, party_state.upper())
self.execute_command(msg)
def replace_old_data(self):
"""!
This method replaces the old data in the database with the recently
collected
"""
try:
self.execute_command('DROP TABLE %s' % MAIN_TABLE)
except Exception as e:
if e[1].find('Unknown table') >= 0:
pass
else:
raise(e)
self.execute_command('ALTER TABLE %s RENAME %s' % (SECONDARY_TABLE,
MAIN_TABLE))
def import_data_from_dir(self, log_folder=None, file_extension=None):
"""!
This method is used to import data from log files into the database
@param self The pointer for the object
@param log_folder The folder containing the log files to import
@param file_extension The file type to be imported
"""
#Check the inputs are valid
if (log_folder is None) or (file_extension is None):
msg = 'log_folder and/or file_extension not supplied'
self.info_logger.info(msg)
return
#Walk through the collected file list
working_dir = log_folder
file_list = os.listdir(working_dir)
counter = 0
for f in file_list:
#If the file is a csv file
if os.path.splitext(f)[1] == file_extension:
counter += 1
self.info_logger.info("Reading file %s" % f)
row_counter = 0
f_handle = open(os.path.join(working_dir, f), 'r')
data = f_handle.read().split(',\r\n')
#Process the data based on the row
for row in data:
#Ensure the file acutally contains data
if len(data) == 1:
break
#The first row contains the year
if (row_counter == 0):
year = row.split(' ')
year = year[len(year) - 1][:4]
#The second row contains the name
elif (row_counter == 1):
party = row.split('data')[0].replace(',', '')
#party = row.split(',')[0]
party_state = None
#find the state
test_party = party.lower().\
replace('.', '').\
replace(',', '')
for state in STATES.keys():
#Check which state the party is from
if re.search(STATES[state], test_party):
party_state = state
break
#If a state has been allocated, break the loop
if party_state is not None:
break
#If a state has not been allocated default to FEDERAL
#level
if party_state is None:
party_state = FEDERAL
#Ignore the third row
elif(row_counter == 2):
pass
#Handle data rows except for last blank lines
elif (row != ''):
extracted_data = row.split('","')
#Remove existing quotation marks
for i in range(len(extracted_data)):
extracted_data[i] = \
extracted_data[i].replace('"', '').\
replace("'", '')
self.add_funds_to_db(year=year,
party=party,
party_state=party_state,
donor=extracted_data[0],
address=extracted_data[1],
state=extracted_data[3],
postcode=extracted_data[4],
don_type=extracted_data[6],
amount=float(extracted_data[5]))
row_counter += 1
self.replace_old_data()
| mpl-2.0 | -7,321,782,133,038,778,000 | 40.635593 | 79 | 0.448199 | false | 4.261058 | false | false | false |
UITools/saleor | saleor/dashboard/seo/fields.py | 1 | 1793 | from django import forms
from django.utils.translation import pgettext_lazy
from ...seo.models import SeoModel
from ..widgets import CharsLeftWidget
SEO_FIELD_HELP_TEXT = pgettext_lazy(
'Form field help text',
'If empty, the preview shows what will be autogenerated.')
MIN_DESCRIPTION_LENGTH = 120
MIN_TITLE_LENGTH = 25
DESCRIPTION_MAX_LENGTH = SeoModel._meta.get_field('seo_description').max_length
TITLE_MAX_LENGTH = SeoModel._meta.get_field('seo_title').max_length
class SeoTitleField(forms.CharField):
widget = CharsLeftWidget(
attrs={
'data-min-recommended-length': MIN_TITLE_LENGTH,
'maxlength': TITLE_MAX_LENGTH})
def __init__(self, extra_attrs=None, required=False, *args, **kwargs):
super().__init__(*args, **kwargs)
self.max_length = TITLE_MAX_LENGTH
if extra_attrs:
self.widget.attrs.update(extra_attrs)
self.required = required
self.help_text = SEO_FIELD_HELP_TEXT
self.label = pgettext_lazy(
'A SEO friendly title', 'SEO Friendly Title')
class SeoDescriptionField(forms.CharField):
help_text = SEO_FIELD_HELP_TEXT
widget = CharsLeftWidget(
attrs={
'help_text': SEO_FIELD_HELP_TEXT,
'data-min-recommended-length': MIN_DESCRIPTION_LENGTH,
'maxlength': DESCRIPTION_MAX_LENGTH})
def __init__(self, extra_attrs=None, required=False, *args, **kwargs):
super().__init__(*args, **kwargs)
self.max_length = DESCRIPTION_MAX_LENGTH
if extra_attrs:
self.widget.attrs.update(extra_attrs)
self.required = required
self.help_text = SEO_FIELD_HELP_TEXT
self.label = pgettext_lazy(
'A SEO friendly description', 'SEO Friendly Description')
| bsd-3-clause | -7,445,493,343,679,669,000 | 34.86 | 79 | 0.650864 | false | 3.659184 | false | false | false |
DemocracyClub/UK-Polling-Stations | polling_stations/apps/data_importers/management/commands/import_slough.py | 1 | 1132 | from data_importers.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "E06000039"
addresses_name = (
"parl.2019-12-12/Version 1/Democracy_Club__12December2019slough.CSV"
)
stations_name = "parl.2019-12-12/Version 1/Democracy_Club__12December2019slough.CSV"
elections = ["parl.2019-12-12"]
allow_station_point_from_postcode = False
def station_record_to_dict(self, record):
if record.polling_place_id == "1000": # Claycots School [Town Hall]
record = record._replace(polling_place_easting="0")
record = record._replace(polling_place_northing="0")
return super().station_record_to_dict(record)
def address_record_to_dict(self, record):
rec = super().address_record_to_dict(record)
uprn = record.property_urn.strip().lstrip("0")
if uprn in [
"100081042223",
"10022917421",
]:
rec["accept_suggestion"] = False
if uprn == "100080321307":
rec["postcode"] = "SL6 0LG"
return rec
| bsd-3-clause | -1,444,987,101,396,873,500 | 34.375 | 88 | 0.638693 | false | 3.215909 | false | false | false |
numenta-archive/htmresearch | projects/dp1/dp_experiment1.py | 3 | 12622 | # Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This is for running some very preliminary disjoint pooling experiments.
"""
import cPickle
from multiprocessing import Pool
import random
import time
import numpy
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams['pdf.fonttype'] = 42
from htmresearch.frameworks.layers.l2_l4_inference import L4L2Experiment
from htmresearch.frameworks.layers.object_machine_factory import (
createObjectMachine
)
def printColumnPoolerDiagnostics(pooler):
print "sampleSizeProximal: ", pooler.sampleSizeProximal
print "Average number of proximal synapses per cell:",
print float(pooler.numberOfProximalSynapses()) / pooler.cellCount
print "Average number of distal segments per cell:",
print float(pooler.numberOfDistalSegments()) / pooler.cellCount
print "Average number of connected distal synapses per cell:",
print float(pooler.numberOfConnectedDistalSynapses()) / pooler.cellCount
print "Average number of distal synapses per cell:",
print float(pooler.numberOfDistalSynapses()) / pooler.cellCount
def runExperiment(args):
"""
Run experiment. args is a dict representing the parameters. We do it this way
to support multiprocessing.
The method returns the args dict updated with multiple additional keys
representing accuracy metrics.
"""
numObjects = args.get("numObjects", 10)
numLocations = args.get("numLocations", 10)
numFeatures = args.get("numFeatures", 10)
numColumns = args.get("numColumns", 2)
sensorInputSize = args.get("sensorInputSize", 300)
networkType = args.get("networkType", "MultipleL4L2Columns")
longDistanceConnections = args.get("longDistanceConnections", 0)
locationNoise = args.get("locationNoise", 0.0)
featureNoise = args.get("featureNoise", 0.0)
numPoints = args.get("numPoints", 10)
trialNum = args.get("trialNum", 42)
plotInferenceStats = args.get("plotInferenceStats", True)
settlingTime = args.get("settlingTime", 3)
includeRandomLocation = args.get("includeRandomLocation", False)
enableFeedback = args.get("enableFeedback", True)
numAmbiguousLocations = args.get("numAmbiguousLocations", 0)
numInferenceRpts = args.get("numInferenceRpts", 1)
numLearningRpts = args.get("numLearningRpts", 3)
l2Params = args.get("l2Params", None)
l4Params = args.get("l4Params", None)
# Create the objects
objects = createObjectMachine(
machineType="simple",
numInputBits=20,
sensorInputSize=sensorInputSize,
externalInputSize=2400,
numCorticalColumns=numColumns,
numFeatures=numFeatures,
numLocations=numLocations,
seed=trialNum
)
objects.createRandomObjects(numObjects, numPoints=numPoints,
numLocations=numLocations,
numFeatures=numFeatures)
r = objects.objectConfusion()
print "Average common pairs in objects=", r[0],
print ", locations=",r[1],", features=",r[2]
# print "Total number of objects created:",len(objects.getObjects())
# print "Objects are:"
# for o in objects:
# pairs = objects[o]
# pairs.sort()
# print str(o) + ": " + str(pairs)
# This object machine will simulate objects where each object is just one
# unique feature/location pair. We will use this to pretrain L4/L2 with
# individual pairs.
pairObjects = createObjectMachine(
machineType="simple",
numInputBits=20,
sensorInputSize=sensorInputSize,
externalInputSize=2400,
numCorticalColumns=numColumns,
numFeatures=numFeatures,
numLocations=numLocations,
seed=trialNum
)
# Create "pair objects" consisting of all unique F/L pairs from our objects.
# These pairs should have the same SDRs as the original objects.
pairObjects.locations = objects.locations
pairObjects.features = objects.features
distinctPairs = objects.getDistinctPairs()
print "Number of distinct feature/location pairs:",len(distinctPairs)
for pairNumber,pair in enumerate(distinctPairs):
pairObjects.addObject([pair], pairNumber)
#####################################################
#
# Setup experiment and train the network
name = "dp_O%03d_L%03d_F%03d_C%03d_T%03d" % (
numObjects, numLocations, numFeatures, numColumns, trialNum
)
exp = L4L2Experiment(
name,
numCorticalColumns=numColumns,
L2Overrides=l2Params,
L4Overrides=l4Params,
networkType = networkType,
longDistanceConnections=longDistanceConnections,
inputSize=sensorInputSize,
externalInputSize=2400,
numInputBits=20,
seed=trialNum,
enableFeedback=enableFeedback,
numLearningPoints=numLearningRpts,
)
# Learn all FL pairs in each L4 and in each L2
# Learning in L2 involves choosing a small random number of cells, growing
# proximal synapses to L4 cells. Growing distal synapses to active cells in
# each neighboring column. Each column gets its own distal segment.
exp.learnObjects(pairObjects.provideObjectsToLearn())
# Verify that all columns learned the pairs
# numCorrectClassifications = 0
# for pairId in pairObjects:
#
# obj = pairObjects[pairId]
# objectSensations = {}
# for c in range(numColumns):
# objectSensations[c] = [obj[0]]*settlingTime
#
# inferConfig = {
# "object": pairId,
# "numSteps": settlingTime,
# "pairs": objectSensations,
# }
#
# inferenceSDRs = pairObjects.provideObjectToInfer(inferConfig)
#
# exp.infer(inferenceSDRs, objectName=pairId, reset=False)
#
# if exp.isObjectClassified(pairId, minOverlap=30):
# numCorrectClassifications += 1
#
# exp.sendReset()
#
# print "Classification accuracy for pairs=",100.0*numCorrectClassifications/len(distinctPairs)
########################################################################
#
# Create "object representations" in L2 by simultaneously invoking the union
# of all FL pairs in an object and doing some sort of spatial pooling to
# create L2 representation.
exp.resetStatistics()
for objectId in objects:
# Create one sensation per object consisting of the union of all features
# and the union of locations.
ul, uf = objects.getUniqueFeaturesLocationsInObject(objectId)
print "Object",objectId,"Num unique features:",len(uf),"Num unique locations:",len(ul)
objectSensations = {}
for c in range(numColumns):
objectSensations[c] = [(tuple(ul), tuple(uf))]*settlingTime
inferConfig = {
"object": objectId,
"numSteps": settlingTime,
"pairs": objectSensations,
}
inferenceSDRs = objects.provideObjectToInfer(inferConfig)
exp.infer(inferenceSDRs, objectName="Object "+str(objectId))
# Compute confusion matrix between all objects as network settles
for iteration in range(settlingTime):
confusion = numpy.zeros((numObjects, numObjects))
for o1 in objects:
for o2 in objects:
confusion[o1, o2] = len(set(exp.statistics[o1]["Full L2 SDR C0"][iteration]) &
set(exp.statistics[o2]["Full L2 SDR C0"][iteration]) )
plt.figure()
plt.imshow(confusion)
plt.xlabel('Object #')
plt.ylabel('Object #')
plt.title("Object overlaps")
plt.colorbar()
plt.savefig("confusion_random_10L_5F_"+str(iteration)+".pdf")
plt.close()
for col in range(numColumns):
print "Diagnostics for column",col
printColumnPoolerDiagnostics(exp.getAlgorithmInstance(column=col))
print
return args
# Show average overlap as a function of number of shared FL pairs,
# shared locations, shared features
# Compute confusion matrix showing number of shared FL pairs
# Compute confusion matrix using our normal method
def runExperimentPool(numObjects,
numLocations,
numFeatures,
numColumns,
longDistanceConnectionsRange = [0.0],
numWorkers=7,
nTrials=1,
numPoints=10,
locationNoiseRange=[0.0],
featureNoiseRange=[0.0],
enableFeedback=[True],
ambiguousLocationsRange=[0],
numInferenceRpts=1,
settlingTime=3,
l2Params=None,
l4Params=None,
resultsName="convergence_results.pkl"):
"""
Allows you to run a number of experiments using multiple processes.
For each parameter except numWorkers, pass in a list containing valid values
for that parameter. The cross product of everything is run, and each
combination is run nTrials times.
Returns a list of dict containing detailed results from each experiment.
Also pickles and saves the results in resultsName for later analysis.
Example:
results = runExperimentPool(
numObjects=[10],
numLocations=[5],
numFeatures=[5],
numColumns=[2,3,4,5,6],
numWorkers=8,
nTrials=5)
"""
# Create function arguments for every possibility
args = []
for c in reversed(numColumns):
for o in reversed(numObjects):
for l in numLocations:
for f in numFeatures:
for p in longDistanceConnectionsRange:
for t in range(nTrials):
for locationNoise in locationNoiseRange:
for featureNoise in featureNoiseRange:
for ambiguousLocations in ambiguousLocationsRange:
for feedback in enableFeedback:
args.append(
{"numObjects": o,
"numLocations": l,
"numFeatures": f,
"numColumns": c,
"trialNum": t,
"numPoints": numPoints,
"longDistanceConnections" : p,
"plotInferenceStats": False,
"locationNoise": locationNoise,
"featureNoise": featureNoise,
"enableFeedback": feedback,
"numAmbiguousLocations": ambiguousLocations,
"numInferenceRpts": numInferenceRpts,
"l2Params": l2Params,
"l4Params": l4Params,
"settlingTime": settlingTime,
}
)
numExperiments = len(args)
print "{} experiments to run, {} workers".format(numExperiments, numWorkers)
# Run the pool
if numWorkers > 1:
pool = Pool(processes=numWorkers)
rs = pool.map_async(runExperiment, args, chunksize=1)
while not rs.ready():
remaining = rs._number_left
pctDone = 100.0 - (100.0*remaining) / numExperiments
print " =>", remaining, "experiments remaining, percent complete=",pctDone
time.sleep(5)
pool.close() # No more work
pool.join()
result = rs.get()
else:
result = []
for arg in args:
result.append(runExperiment(arg))
# print "Full results:"
# pprint.pprint(result, width=150)
# Pickle results for later use
with open(resultsName,"wb") as f:
cPickle.dump(result,f)
return result
if __name__ == "__main__":
# This is how you run a specific experiment in single process mode. Useful
# for debugging, profiling, etc.
results = runExperiment(
{
"numObjects": 20,
"numPoints": 10,
"numLocations": 10,
"numFeatures": 5,
"numColumns": 1,
"trialNum": 4,
"settlingTime": 3,
"plotInferenceStats": False, # Outputs detailed graphs
}
)
| agpl-3.0 | -7,704,964,464,401,315,000 | 33.77135 | 97 | 0.647283 | false | 4.114081 | false | false | false |
huanghao/mic | plugins/imager/loop_plugin.py | 5 | 9832 | #!/usr/bin/python -tt
#
# Copyright (c) 2011 Intel, Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; version 2 of the License
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import os
import shutil
import tempfile
from mic import chroot, msger, rt_util
from mic.utils import misc, fs_related, errors, cmdln
from mic.conf import configmgr
from mic.plugin import pluginmgr
from mic.imager.loop import LoopImageCreator, load_mountpoints
from mic.pluginbase import ImagerPlugin
class LoopPlugin(ImagerPlugin):
name = 'loop'
@classmethod
@cmdln.option("--compress-disk-image", dest="compress_image",
type='choice', choices=("gz", "bz2", "lzo"), default=None,
help="Same with --compress-image")
# alias to compress-image for compatibility
@cmdln.option("--compress-image", dest="compress_image",
type='choice', choices=("gz", "bz2", "lzo"), default=None,
help="Compress all loop images with 'gz' or 'bz2' or 'lzo',"
"Note: if you want to use 'lzo', package 'lzop' is needed to"
"be installed manually.")
@cmdln.option("--shrink", action='store_true', default=False,
help="Whether to shrink loop images to minimal size")
def do_create(self, subcmd, opts, *args):
"""${cmd_name}: create loop image
Usage:
${name} ${cmd_name} <ksfile> [OPTS]
${cmd_option_list}
"""
if len(args) != 1:
raise errors.Usage("Extra arguments given")
creatoropts = configmgr.create
ksconf = args[0]
if creatoropts['runtime'] == "bootstrap":
configmgr._ksconf = ksconf
rt_util.bootstrap_mic()
elif not rt_util.inbootstrap():
try:
fs_related.find_binary_path('mic-native')
except errors.CreatorError:
if not msger.ask("Subpackage \"mic-native\" has not been "
"installed in your host system, still "
"continue with \"native\" running mode?",
False):
raise errors.Abort("Abort because subpackage 'mic-native' "
"has not been installed")
recording_pkgs = []
if len(creatoropts['record_pkgs']) > 0:
recording_pkgs = creatoropts['record_pkgs']
if creatoropts['release'] is not None:
if 'name' not in recording_pkgs:
recording_pkgs.append('name')
if 'vcs' not in recording_pkgs:
recording_pkgs.append('vcs')
configmgr._ksconf = ksconf
# try to find the pkgmgr
pkgmgr = None
backends = pluginmgr.get_plugins('backend')
if 'auto' == creatoropts['pkgmgr']:
for key in configmgr.prefer_backends:
if key in backends:
pkgmgr = backends[key]
break
else:
for key in backends.keys():
if key == creatoropts['pkgmgr']:
pkgmgr = backends[key]
break
if not pkgmgr:
raise errors.CreatorError("Can't find backend: %s, "
"available choices: %s" %
(creatoropts['pkgmgr'],
','.join(backends.keys())))
creator = LoopImageCreator(creatoropts,
pkgmgr,
opts.compress_image,
opts.shrink)
if len(recording_pkgs) > 0:
creator._recording_pkgs = recording_pkgs
image_names = [creator.name + ".img"]
image_names.extend(creator.get_image_names())
self.check_image_exists(creator.destdir,
creator.pack_to,
image_names,
creatoropts['release'])
try:
creator.check_depend_tools()
creator.mount(None, creatoropts["cachedir"])
creator.install()
creator.configure(creatoropts["repomd"])
creator.copy_kernel()
creator.unmount()
creator.package(creatoropts["destdir"])
creator.create_manifest()
if creatoropts['release'] is not None:
creator.release_output(ksconf,
creatoropts['destdir'],
creatoropts['release'])
creator.print_outimage_info()
except errors.CreatorError:
raise
finally:
creator.cleanup()
msger.info("Finished.")
return 0
@classmethod
def _do_chroot_tar(cls, target, cmd=[]):
mountfp_xml = os.path.splitext(target)[0] + '.xml'
if not os.path.exists(mountfp_xml):
raise errors.CreatorError("No mount point file found for this tar "
"image, please check %s" % mountfp_xml)
import tarfile
tar = tarfile.open(target, 'r')
tmpdir = misc.mkdtemp()
tar.extractall(path=tmpdir)
tar.close()
mntdir = misc.mkdtemp()
loops = []
for (mp, label, name, size, fstype) in load_mountpoints(mountfp_xml):
if fstype in ("ext2", "ext3", "ext4"):
myDiskMount = fs_related.ExtDiskMount
elif fstype == "btrfs":
myDiskMount = fs_related.BtrfsDiskMount
elif fstype in ("vfat", "msdos"):
myDiskMount = fs_related.VfatDiskMount
else:
raise errors.CreatorError("Cannot support fstype: %s" % fstype)
name = os.path.join(tmpdir, name)
size = size * 1024L * 1024L
loop = myDiskMount(fs_related.SparseLoopbackDisk(name, size),
os.path.join(mntdir, mp.lstrip('/')),
fstype, size, label)
try:
msger.verbose("Mount %s to %s" % (mp, mntdir + mp))
fs_related.makedirs(os.path.join(mntdir, mp.lstrip('/')))
loop.mount()
except:
loop.cleanup()
for lp in reversed(loops):
chroot.cleanup_after_chroot("img", lp, None, mntdir)
shutil.rmtree(tmpdir, ignore_errors=True)
raise
loops.append(loop)
try:
if len(cmd) != 0:
cmdline = "/usr/bin/env HOME=/root " + ' '.join(cmd)
else:
cmdline = "/usr/bin/env HOME=/root /bin/bash"
chroot.chroot(mntdir, None, cmdline)
except:
raise errors.CreatorError("Failed to chroot to %s." % target)
finally:
for loop in reversed(loops):
chroot.cleanup_after_chroot("img", loop, None, mntdir)
shutil.rmtree(tmpdir, ignore_errors=True)
@classmethod
def do_chroot(cls, target, cmd=[]):
if target.endswith('.tar'):
import tarfile
if tarfile.is_tarfile(target):
LoopPlugin._do_chroot_tar(target, cmd)
return
else:
raise errors.CreatorError("damaged tarball for loop images")
img = target
imgsize = misc.get_file_size(img) * 1024L * 1024L
imgtype = misc.get_image_type(img)
if imgtype == "btrfsimg":
fstype = "btrfs"
myDiskMount = fs_related.BtrfsDiskMount
elif imgtype in ("ext3fsimg", "ext4fsimg"):
fstype = imgtype[:4]
myDiskMount = fs_related.ExtDiskMount
else:
raise errors.CreatorError("Unsupported filesystem type: %s" \
% imgtype)
extmnt = misc.mkdtemp()
extloop = myDiskMount(fs_related.SparseLoopbackDisk(img, imgsize),
extmnt,
fstype,
4096,
"%s label" % fstype)
try:
extloop.mount()
except errors.MountError:
extloop.cleanup()
shutil.rmtree(extmnt, ignore_errors=True)
raise
try:
if len(cmd) != 0:
cmdline = ' '.join(cmd)
else:
cmdline = "/bin/bash"
envcmd = fs_related.find_binary_inchroot("env", extmnt)
if envcmd:
cmdline = "%s HOME=/root %s" % (envcmd, cmdline)
chroot.chroot(extmnt, None, cmdline)
except:
raise errors.CreatorError("Failed to chroot to %s." % img)
finally:
chroot.cleanup_after_chroot("img", extloop, None, extmnt)
@classmethod
def do_unpack(cls, srcimg):
image = os.path.join(tempfile.mkdtemp(dir="/var/tmp", prefix="tmp"),
"target.img")
msger.info("Copying file system ...")
shutil.copyfile(srcimg, image)
return image
| gpl-2.0 | -8,321,301,634,943,653,000 | 36.526718 | 79 | 0.518918 | false | 4.438826 | true | false | false |
palmtree5/Red-DiscordBot | redbot/cogs/audio/core/utilities/parsers.py | 4 | 1323 | import logging
import re
import struct
from typing import Final, Optional
import aiohttp
from ..abc import MixinMeta
from ..cog_utils import CompositeMetaClass
log = logging.getLogger("red.cogs.Audio.cog.Utilities.Parsing")
STREAM_TITLE: Final[re.Pattern] = re.compile(br"StreamTitle='([^']*)';")
class ParsingUtilities(MixinMeta, metaclass=CompositeMetaClass):
async def icyparser(self, url: str) -> Optional[str]:
try:
async with self.session.get(url, headers={"Icy-MetaData": "1"}) as resp:
metaint = int(resp.headers["icy-metaint"])
for _ in range(5):
await resp.content.readexactly(metaint)
metadata_length = struct.unpack("B", await resp.content.readexactly(1))[0] * 16
metadata = await resp.content.readexactly(metadata_length)
m = re.search(STREAM_TITLE, metadata.rstrip(b"\0"))
if m:
title = m.group(1)
if title:
title = title.decode("utf-8", errors="replace")
return title
else:
return None
except (KeyError, aiohttp.ClientConnectionError, aiohttp.ClientResponseError):
return None
| gpl-3.0 | 702,563,211,058,560,600 | 36.8 | 99 | 0.571429 | false | 4.309446 | false | false | false |
facebook/screenshot-tests-for-android | plugin/src/py/android_screenshot_tests/device_name_calculator.py | 1 | 3786 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import subprocess
from .adb_executor import AdbExecutor
class DeviceNameCalculator:
def __init__(self, executor=AdbExecutor()):
self.executor = executor
def name(self):
api_version_text = self._api_version_text()
play_services_text = self._play_services_text()
screen_density_text = self._screen_density_text()
screen_size_text = self._screen_size_text()
architecture_text = self._architecture_text()
locale = self._locale()
device_parameters = [
api_version_text,
play_services_text,
screen_density_text,
screen_size_text,
architecture_text,
locale,
]
if None in device_parameters:
raise RuntimeError(
"ERROR: you shouldn't see this in normal operation,"
"file a bug report please.\n\n "
"One or more device params are None"
)
return "{0}_{1}_{2}_{3}_{4}_{5}".format(
api_version_text,
play_services_text,
screen_density_text,
screen_size_text,
architecture_text,
locale,
)
def _screen_density_text(self):
density = int(self._screen_density())
if density in range(0, 121):
return "LDPI"
elif density in range(121, 161):
return "MDPI"
elif density in range(161, 241):
return "HDPI"
elif density in range(241, 321):
return "XHDPI"
elif density in range(321, 481):
return "XXHDPI"
return "XXXHDPI"
def _screen_density(self):
result = self.executor.execute(["shell", "wm", "density"])
density = re.search("[0-9]+", result)
if density:
return density.group(0)
def _screen_size_text(self):
result = self.executor.execute(["shell", "wm", "size"])
density = re.search("[0-9]+x[0-9]+", result)
if density:
return density.group(0)
def _has_play_services(self):
try:
output = self.executor.execute(
["shell", "pm", "path", "com.google.android.gms"]
)
return True if output else False
except subprocess.CalledProcessError:
return False
def _play_services_text(self):
play_services = self._has_play_services()
return "GP" if play_services else "NO_GP"
def _api_version(self):
return self.executor.execute(["shell", "getprop", "ro.build.version.sdk"])
def _api_version_text(self):
return "API_{0}".format(int(self._api_version()))
def _architecture_text(self):
architecture = self.executor.execute(["shell", "getprop", "ro.product.cpu.abi"])
return architecture.rstrip()
def _locale(self):
persist_locale = self.executor.execute(
["shell", "getprop", "persist.sys.locale"]
)
product_locale = self.executor.execute(
["shell", "getprop", "ro.product.locale"]
)
return persist_locale.rstrip() if persist_locale else product_locale.rstrip()
| apache-2.0 | 7,558,238,337,540,248,000 | 31.637931 | 88 | 0.589276 | false | 4.07535 | false | false | false |
skirpichev/omg | diofant/tests/polys/test_polyfuncs.py | 2 | 2972 | """Tests for high-level polynomials manipulation functions."""
import pytest
from diofant import (ComputationFailed, MultivariatePolynomialError, horner,
interpolate, symbols, symmetrize, viete)
from diofant.abc import a, b, c, d, e, x, y, z
__all__ = ()
def test_symmetrize():
assert symmetrize(0, x, y, z) == (0, 0)
assert symmetrize(1, x, y, z) == (1, 0)
s1 = x + y + z
s2 = x*y + x*z + y*z
assert symmetrize(1) == (1, 0)
assert symmetrize(1, formal=True) == (1, 0, [])
assert symmetrize(x) == (x, 0)
assert symmetrize(x + 1) == (x + 1, 0)
assert symmetrize(x, x, y) == (x + y, -y)
assert symmetrize(x + 1, x, y) == (x + y + 1, -y)
assert symmetrize(x, x, y, z) == (s1, -y - z)
assert symmetrize(x + 1, x, y, z) == (s1 + 1, -y - z)
assert symmetrize(x**2, x, y, z) == (s1**2 - 2*s2, -y**2 - z**2)
assert symmetrize(x**2 + y**2) == (-2*x*y + (x + y)**2, 0)
assert symmetrize(x**2 - y**2) == (-2*x*y + (x + y)**2, -2*y**2)
assert symmetrize(x**3 + y**2 + a*x**2 + b*y**3, x, y) == \
(-3*x*y*(x + y) - 2*a*x*y + a*(x + y)**2 + (x + y)**3,
y**2*(1 - a) + y**3*(b - 1))
U = [u0, u1, u2] = symbols('u:3')
assert symmetrize(x + 1, x, y, z, formal=True, symbols=U) == \
(u0 + 1, -y - z, [(u0, x + y + z), (u1, x*y + x*z + y*z), (u2, x*y*z)])
assert symmetrize([1, 2, 3]) == [(1, 0), (2, 0), (3, 0)]
assert symmetrize([1, 2, 3], formal=True) == ([(1, 0), (2, 0), (3, 0)], [])
assert symmetrize([x + y, x - y]) == [(x + y, 0), (x + y, -2*y)]
def test_horner():
assert horner(0) == 0
assert horner(1) == 1
assert horner(x) == x
assert horner(x + 1) == x + 1
assert horner(x**2 + 1) == x**2 + 1
assert horner(x**2 + x) == (x + 1)*x
assert horner(x**2 + x + 1) == (x + 1)*x + 1
assert horner(
9*x**4 + 8*x**3 + 7*x**2 + 6*x + 5) == (((9*x + 8)*x + 7)*x + 6)*x + 5
assert horner(
a*x**4 + b*x**3 + c*x**2 + d*x + e) == (((a*x + b)*x + c)*x + d)*x + e
assert horner(4*x**2*y**2 + 2*x**2*y + 2*x*y**2 + x*y, wrt=x) == ((
4*y + 2)*x*y + (2*y + 1)*y)*x
assert horner(4*x**2*y**2 + 2*x**2*y + 2*x*y**2 + x*y, wrt=y) == ((
4*x + 2)*y*x + (2*x + 1)*x)*y
def test_interpolate():
assert interpolate([1, 4, 9, 16], x) == x**2
assert interpolate([(1, 1), (2, 4), (3, 9)], x) == x**2
assert interpolate([(1, 2), (2, 5), (3, 10)], x) == 1 + x**2
assert interpolate({1: 2, 2: 5, 3: 10}, x) == 1 + x**2
def test_viete():
r1, r2 = symbols('r1, r2')
ans = [(r1 + r2, -b/a), (r1*r2, c/a)]
assert viete(a*x**2 + b*x + c, [r1, r2], x) == ans
assert viete(a*x**2 + b*x + c, None, x) == ans
pytest.raises(ValueError, lambda: viete(1, [], x))
pytest.raises(ValueError, lambda: viete(x**2 + 1, [r1]))
pytest.raises(MultivariatePolynomialError, lambda: viete(x + y, [r1]))
pytest.raises(ComputationFailed, lambda: viete(1))
| bsd-3-clause | -5,562,564,503,117,511,000 | 31.659341 | 79 | 0.475774 | false | 2.253222 | true | false | false |
MostlyOpen/odoo_api | myo_employee.py | 1 | 11410 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import print_function
import sqlite3
def hr_department_create(client, department_name):
hr_department_model = client.model('hr.department')
hr_department_browse = hr_department_model.browse([('name', '=', department_name), ])
if hr_department_browse.id == []:
values = {
'name': department_name,
}
hr_department_model.create(values)
def hr_department_export_sqlite(client, args, db_path, table_name):
conn = sqlite3.connect(db_path)
conn.text_factory = str
cursor = conn.cursor()
try:
cursor.execute('''DROP TABLE ''' + table_name + ''';''')
except Exception as e:
print('------->', e)
cursor.execute(
'''
CREATE TABLE ''' + table_name + ''' (
id INTEGER NOT NULL PRIMARY KEY,
name,
new_id INTEGER
);
'''
)
department_model = client.model('hr.department')
department_browse = department_model.browse(args)
department_count = 0
for department_reg in department_browse:
department_count += 1
print(department_count, department_reg.id, department_reg.name.encode("utf-8"))
cursor.execute('''
INSERT INTO ''' + table_name + '''(
id,
name
)
VALUES(?,?)
''', (department_reg.id,
department_reg.name,
)
)
conn.commit()
conn.close()
print()
print('--> department_count: ', department_count)
print()
def hr_department_import_sqlite(client, args, db_path, table_name):
hr_department_model = client.model('hr.department')
conn = sqlite3.connect(db_path)
# conn.text_factory = str
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
cursor2 = conn.cursor()
data = cursor.execute('''
SELECT
id,
name,
new_id
FROM ''' + table_name + ''';
''')
print(data)
print([field[0] for field in cursor.description])
hr_department_count = 0
for row in cursor:
hr_department_count += 1
print(
hr_department_count, row['id'], row['name'],
)
hr_department_browse = hr_department_model.browse([('name', '=', row['name']), ])
if hr_department_browse.id == []:
values = {
'name': row['name'],
}
hr_department_id = hr_department_model.create(values).id
cursor2.execute(
'''
UPDATE ''' + table_name + '''
SET new_id = ?
WHERE id = ?;''',
(hr_department_id,
row['id']
)
)
conn.commit()
conn.close()
print()
print('--> hr_department_count: ', hr_department_count)
def employee_create_from_user(client, user_login, job_title, department_name):
print('Configuring employee "' + user_login + '"...')
employee_model = client.model('res.users')
hr_employee_model = client.model('hr.employee')
hr_job_model = client.model('hr.job')
hr_department_model = client.model('hr.department')
employee_browse = employee_model.browse([('login', '=', user_login), ])
user_ids = employee_browse.id
if user_ids == []:
print('--> User "' + user_login + '"does not exist!')
else:
user = employee_browse[0]
hr_employee_browse = hr_employee_model.browse([('name', '=', user.name), ])
employee_ids = hr_employee_browse.id
if employee_ids != []:
print('--> Employee "' + user.name + '"already exists!')
else:
job_id = False
hr_job_browse = hr_job_model.browse([('name', '=', job_title), ])
if hr_job_browse.id != []:
job_id = hr_job_browse[0].id
department_id = False
hr_department_browse = hr_department_model.browse([('name', '=', department_name), ])
if hr_department_browse.id != []:
department_id = hr_department_browse[0].id
values = {
'name': user.name,
'address_id': user.partner_id.id,
'work_email': user.partner_id.email,
'job_id': job_id,
'department_id': department_id,
'user_id': user.id,
}
hr_employee_model.create(values)
print()
print('--> Done')
print()
def hr_employee_export_sqlite(client, args, db_path, table_name):
conn = sqlite3.connect(db_path)
conn.text_factory = str
cursor = conn.cursor()
try:
cursor.execute('''DROP TABLE ''' + table_name + ''';''')
except Exception as e:
print('------->', e)
cursor.execute(
'''
CREATE TABLE ''' + table_name + ''' (
id INTEGER NOT NULL PRIMARY KEY,
resource_id,
name,
code,
work_email,
department_id,
address_id,
job_id,
user_id,
image,
new_id INTEGER
);
'''
)
employee_model = client.model('hr.employee')
employee_browse = employee_model.browse(args)
employee_count = 0
for employee_reg in employee_browse:
employee_count += 1
print(employee_count, employee_reg.id, employee_reg.name.encode("utf-8"))
department_id = None
if employee_reg.department_id:
department_id = employee_reg.department_id.id
job_id = None
if employee_reg.job_id:
job_id = employee_reg.job_id.id
# address_id = None
# if employee_reg.address_id:
# address_id = employee_reg.address_id.id
# user_id = None
# if employee_reg.user_id:
# user_id = employee_reg.user_id.id
image = None
if employee_reg.image:
image = employee_reg.image
cursor.execute('''
INSERT INTO ''' + table_name + '''(
id,
resource_id,
name,
code,
work_email,
department_id,
address_id,
job_id,
user_id,
image
)
VALUES(?,?,?,?,?,?,?,?,?,?)
''', (employee_reg.id,
employee_reg.resource_id.id,
employee_reg.name,
employee_reg.code,
employee_reg.work_email,
department_id,
employee_reg.address_id.id,
job_id,
employee_reg.user_id.id,
image,
)
)
conn.commit()
conn.close()
print()
print('--> employee_count: ', employee_count)
print()
def hr_employee_import_sqlite(
client, args, db_path, table_name, hr_department_table_name, res_partner_table_name, res_users_table_name
):
hr_employee_model = client.model('hr.employee')
conn = sqlite3.connect(db_path)
# conn.text_factory = str
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
cursor2 = conn.cursor()
data = cursor.execute('''
SELECT
id,
resource_id,
name,
code,
work_email,
department_id,
address_id,
job_id,
user_id,
image,
new_id
FROM ''' + table_name + ''';
''')
print(data)
print([field[0] for field in cursor.description])
hr_employee_count = 0
for row in cursor:
hr_employee_count += 1
print(
hr_employee_count, row['id'], row['name'], row['code'],
)
hr_employee_browse = hr_employee_model.browse([('name', '=', row['name']), ])
if hr_employee_browse.id == []:
department_id = row['department_id']
new_department_id = False
if department_id is not None:
cursor2.execute(
'''
SELECT new_id
FROM ''' + hr_department_table_name + '''
WHERE id = ?;''',
(department_id,
)
)
new_department_id = cursor2.fetchone()[0]
address_id = row['address_id']
new_address_id = False
if address_id is not None:
cursor2.execute(
'''
SELECT new_id
FROM ''' + res_partner_table_name + '''
WHERE id = ?;''',
(address_id,
)
)
new_address_id = cursor2.fetchone()[0]
user_id = row['user_id']
new_user_id = False
if user_id is not None:
cursor2.execute(
'''
SELECT new_id
FROM ''' + res_users_table_name + '''
WHERE id = ?;''',
(user_id,
)
)
new_user_id = cursor2.fetchone()[0]
values = {
'name': row['name'],
'code': row['code'],
'address_id': new_address_id,
'work_email': row['work_email'],
'job_id': row['job_id'],
'department_id': new_department_id,
'user_id': new_user_id,
'image': row['image'],
}
hr_employee_id = hr_employee_model.create(values).id
cursor2.execute(
'''
UPDATE ''' + table_name + '''
SET new_id = ?
WHERE id = ?;''',
(hr_employee_id,
row['id']
)
)
conn.commit()
conn.close()
print()
print('--> hr_employee_count: ', hr_employee_count)
| agpl-3.0 | 2,261,187,033,920,331,800 | 26.886076 | 109 | 0.465557 | false | 4.345011 | false | false | false |
SmingHub/Sming | Sming/Components/Storage/Tools/hwconfig/hwconfig.py | 1 | 4450 | #!/usr/bin/env python3
#
# Sming hardware configuration tool
#
import common, argparse, os, partition
from common import *
from config import Config
from config import schema as config_schema
def openOutput(path):
if path == '-':
try:
stdout_binary = sys.stdout.buffer # Python 3
except AttributeError:
stdout_binary = sys.stdout
return stdout_binary
status("Writing to '%s'" % path)
output_dir = os.path.abspath(os.path.dirname(path))
os.makedirs(output_dir, exist_ok=True)
return open(path, 'wb')
def handle_validate(args, config, part):
# Validate resulting hardware configuration against schema
try:
from jsonschema import Draft7Validator
inst = json_loads(config.to_json())
v = Draft7Validator(config_schema)
errors = sorted(v.iter_errors(inst), key=lambda e: e.path)
if errors != []:
for e in errors:
critical("%s @ %s" % (e.message, e.path))
sys.exit(3)
except ImportError as err:
critical("\n** WARNING! %s: Cannot validate '%s', please run `make python-requirements **\n\n" % (str(err), args.input))
def handle_flashcheck(args, config, part):
# Expect list of chunks, such as "0x100000=/out/Esp8266/debug/firmware/spiff_rom.bin 0x200000=custom.bin"
list = args.expr.split()
if len(list) == 0:
raise InputError("No chunks to flash!")
for e in list:
addr, filename = e.split('=')
addr = int(addr, 0)
part = config.partitions.find_by_address(config.devices[0], addr)
if part is None:
raise InputError("No partition contains address 0x%08x" % addr)
if part.address != addr:
raise InputError("Address 0x%08x is within partition '%s', not at start (0x%08x)" % (addr, part.name, part.address))
filesize = os.path.getsize(filename)
if filesize > part.size:
raise InputError("File '%s' is 0x%08x bytes, too big for partition '%s' (0x%08x bytes)" % (os.path.basename(filename), filesize, part.name, part.size))
def handle_partgen(args, config, part):
# Generate partition table binary
if not args.no_verify:
status("Verifying partition table...")
config.verify(args.secure)
return config.partitions.to_binary(config.devices)
def handle_expr(args, config, part):
# Evaluate expression against configuration data
return str(eval(args.expr)).encode()
def main():
parser = argparse.ArgumentParser(description='Sming hardware configuration utility')
parser.add_argument('--no-verify', help="Don't verify partition table fields", action='store_true')
parser.add_argument('--quiet', '-q', help="Don't print non-critical status messages to stderr", action='store_true')
parser.add_argument('--secure', help="Require app partitions to be suitable for secure boot", action='store_true')
parser.add_argument('--part', help="Name of partition to operate on")
parser.add_argument('command', help='Action to perform', choices=['partgen', 'expr', 'validate', 'flashcheck'])
parser.add_argument('input', help='Name of hardware configuration or path to binary partition table')
parser.add_argument('output', help='Path to output file. Will use stdout if omitted.', nargs='?', default='-')
parser.add_argument('expr', help='Expression to evaluate', nargs='?', default=None)
args = parser.parse_args()
common.quiet = args.quiet
output = None
input_is_binary = False
if os.path.exists(args.input):
inputData = open(args.input, "rb").read()
input_is_binary = inputData[0:2] == partition.Entry.MAGIC_BYTES
if input_is_binary:
config = Config.from_binary(inputData)
else:
raise InputError("File '%s' not recognised as partition table" % args.input)
else:
config = Config.from_name(args.input)
partitions = config.partitions
# Locate any supplied partition by name
part = None
if args.part is not None:
part = partitions.find_by_name(args.part)
if part is None:
return
output = globals()['handle_' + args.command](args, config, part)
if output is not None:
openOutput(args.output).write(output)
if __name__ == '__main__':
try:
main()
except InputError as e:
print("** ERROR! %s" % e, file=sys.stderr)
sys.exit(2)
| lgpl-3.0 | 8,049,036,951,163,652,000 | 37.034188 | 163 | 0.643596 | false | 3.819742 | true | false | false |
sublee/etc | etc/adapter.py | 1 | 2507 | # -*- coding: utf-8 -*-
"""
etc.adapter
~~~~~~~~~~~
The interface for etcd adapters. A subclass of :class:`Adapter` will be
injected verification code automatically.
"""
from __future__ import absolute_import
import functools
import six
__all__ = ['Adapter']
def with_verifier(verify, func):
@functools.wraps(func)
def wrapped(self, *args, **kwargs):
verify(*args, **kwargs)
return func(self, *args, **kwargs)
return wrapped
class AdapterMeta(type):
def __new__(meta, name, bases, attrs):
for attr, verify in [('set', meta.verify_set),
('append', meta.verify_append)]:
try:
func = attrs[attr]
except KeyError:
continue
attrs[attr] = with_verifier(verify, func)
return super(AdapterMeta, meta).__new__(meta, name, bases, attrs)
@staticmethod
def verify_set(key, value=None, dir=False, ttl=None, refresh=False,
prev_value=None, prev_index=None, prev_exist=None,
timeout=None):
if not refresh and (value is None) == (not dir):
raise ValueError('Set value or make as directory')
if value is not None and not isinstance(value, six.text_type):
raise TypeError('Set %s value' % six.text_type.__name__)
@staticmethod
def verify_append(key, value=None, dir=False, ttl=None, timeout=None):
if (value is None) == (not dir):
raise ValueError('Set value or make as directory')
if value is not None and not isinstance(value, six.text_type):
raise TypeError('Set %s value' % six.text_type.__name__)
class Adapter(six.with_metaclass(AdapterMeta)):
"""An interface to implement several essential raw methods of etcd."""
def __init__(self, url):
self.url = url
def clear(self):
pass
def get(self, key, recursive=False, sorted=False, quorum=False,
wait=False, wait_index=None, timeout=None):
raise NotImplementedError
def set(self, key, value=None, dir=False, ttl=None, refresh=False,
prev_value=None, prev_index=None, prev_exist=None, timeout=None):
raise NotImplementedError
def append(self, key, value=None, dir=False, ttl=None, timeout=None):
raise NotImplementedError
def delete(self, key, dir=False, recursive=False,
prev_value=None, prev_index=None, timeout=None):
raise NotImplementedError
| bsd-3-clause | -6,410,024,683,960,013,000 | 30.734177 | 77 | 0.609095 | false | 3.992038 | false | false | false |
specify/specify7 | specifyweb/specify/encryption.py | 1 | 1452 | # See edu.ku.brc.helpers.Encryption and rfc2898
from itertools import islice
from hashlib import md5
from Crypto.Cipher import DES
from Crypto.Random.random import randint
ITERATION_COUNT = 1000
def decrypt(text: str, password: str) -> str:
key = password.encode('utf-8')
fromhex = bytes.fromhex(text)
salt, ciphertext = fromhex[:8], fromhex[8:]
derivedkey = generate_derivedkey(key, salt)
deskey, iv = derivedkey[:8], derivedkey[8:]
des = DES.new(deskey, DES.MODE_CBC, iv)
padded = des.decrypt(ciphertext)
paddinglen = padded[-1]
return padded[:-paddinglen].decode('utf-8')
def encrypt(text: str, password: str) -> str:
text_encoded = text.encode('utf-8')
paddinglen = 8 - len(text_encoded) % 8
padded = text_encoded + bytes([paddinglen]) * paddinglen
key = password.encode('utf-8')
salt = make_salt()
derivedkey = generate_derivedkey(key, salt)
deskey, iv = derivedkey[:8], derivedkey[8:]
des = DES.new(deskey, DES.MODE_CBC, iv)
ciphertext = des.encrypt(padded)
return (salt + ciphertext).hex().upper()
def rand_byte() -> int:
return randint(0, 0xff)
def make_salt() -> bytes:
return bytes(islice(iter(rand_byte, None), 8))
def generate_derivedkey(key: bytes, salt: bytes, iterations: int = ITERATION_COUNT) -> bytes:
out = key + salt
for i in range(iterations):
md = md5()
md.update(out)
out = md.digest()
return out
| gpl-2.0 | 3,260,853,576,327,188,500 | 28.04 | 93 | 0.657713 | false | 3.3 | false | false | false |
getsentry/rigidsearch | rigidsearch/utils.py | 1 | 2243 | import re
from cStringIO import StringIO as BytesIO
from datetime import timedelta
from functools import update_wrapper
from flask import make_response, current_app, request
_ws_re = re.compile(r'(\s+)')
def chop_tail(base, tail):
if not base.endswith(tail):
return base, False
return base[:-len(tail)], True
def normalize_text(text):
def _handle_match(match):
ws = match.group()
nl = ws.count('\n')
if nl >= 2:
return u'\n\n'
elif nl == 1:
return u'\n'
return u' '
return _ws_re.sub(_handle_match, text).strip('\n')
def cors(origin=None, methods=None, headers=None, max_age=21600,
attach_to_all=True, automatic_options=True):
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, basestring):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, basestring):
origin = ', '.join(origin or ('*',))
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
def release_file(request, name):
f = request.files[name]
rv = f.stream
f.stream = BytesIO()
return rv
| bsd-3-clause | 6,092,743,340,628,280,000 | 29.726027 | 67 | 0.593848 | false | 3.942004 | false | false | false |
ZeitOnline/zeit.cms | src/zeit/cms/content/interfaces.py | 1 | 21341 | from zeit.cms.i18n import MessageFactory as _
import zc.form.field
import zc.form.interfaces
import zeit.cms.content.field
import zeit.cms.content.sources
import zeit.cms.interfaces
import zeit.cms.repository.interfaces
import zeit.cms.tagging.interfaces
import zope.component.interfaces
import zope.interface
import zope.interface.common.sequence
import zope.interface.interfaces
import zope.schema
import zope.schema.interfaces
# XXX There is too much, too unordered in here, clean this up.
# prevent circular import
from zeit.cms.content.contentsource import ICMSContentSource
from zeit.cms.content.contentsource import INamedCMSContentSource
from zeit.cms.content.contentsource import IAutocompleteSource
class IAuthorType(zeit.cms.interfaces.ICMSContentType):
"""Interface type for authors."""
class AuthorSource(zeit.cms.content.contentsource.CMSContentSource):
zope.interface.implements(
zeit.cms.content.contentsource.IAutocompleteSource)
check_interfaces = IAuthorType
name = 'authors'
authorSource = AuthorSource()
class IChannelField(zc.form.interfaces.ICombinationField):
"""Marker interface so we can register a specialized widget
for this field."""
class ReferenceField(zope.schema.Choice):
def _validate(self, value):
if self._init_field:
return
# skip immediate superclass, since that's what we want to change
super(zope.schema.Choice, self)._validate(value)
if value.target not in self.vocabulary:
raise zope.schema.interfaces.ConstraintNotSatisfied(value)
class ICommonMetadata(zope.interface.Interface):
year = zope.schema.Int(
title=_("Year"),
min=1900,
max=2100)
volume = zope.schema.Int(
title=_("Volume"),
min=1,
max=53,
required=False)
page = zope.schema.Int(
title=_("Page"),
readonly=True,
required=False)
ressort = zope.schema.Choice(
title=_("Ressort"),
source=zeit.cms.content.sources.RessortSource())
sub_ressort = zope.schema.Choice(
title=_('Sub ressort'),
source=zeit.cms.content.sources.SubRessortSource(),
required=False)
channels = zope.schema.Tuple(
title=_('Channels'),
value_type=zc.form.field.Combination(
(zope.schema.Choice(
title=_('Channel'),
source=zeit.cms.content.sources.ChannelSource()),
zope.schema.Choice(
title=_('Subchannel'),
source=zeit.cms.content.sources.SubChannelSource(),
required=False))
),
default=(),
required=False)
zope.interface.alsoProvides(channels.value_type, IChannelField)
lead_candidate = zope.schema.Bool(
title=_('Lead candidate'),
default=True,
required=False)
printRessort = zope.schema.TextLine(
title=_("Print ressort"),
readonly=True,
required=False,
default=u'n/a')
# not required since e.g. Agenturmeldungen don't have an author, only
# a copyright notice
authorships = zope.schema.Tuple(
title=_("Authors"),
value_type=ReferenceField(source=authorSource),
default=(),
required=False)
authorships.value_type.setTaggedValue(
'zeit.cms.addform.contextfree', 'zeit.content.author.add_contextfree')
# DEPRECATED, use authorships instead
# (still used by zeit.vgwort for querying)
authors = zope.schema.Tuple(
title=_("Authors (freetext)"),
value_type=zope.schema.TextLine(),
required=False,
default=(u'',),
description=_(u'overwritten if any non-freetext authors are set'))
access = zope.schema.Choice(
title=_('Access'),
default=u'free',
source=zeit.cms.content.sources.ACCESS_SOURCE)
keywords = zeit.cms.tagging.interfaces.Keywords(
required=False,
default=())
serie = zope.schema.Choice(
title=_("Serie"),
source=zeit.cms.content.sources.SerieSource(),
required=False)
copyrights = zope.schema.TextLine(
title=_("Copyright (c)"),
description=_("Do not enter (c)."),
required=False)
supertitle = zope.schema.TextLine(
title=_("Kicker"),
description=_("Please take care of capitalisation."),
required=False,
max_length=70)
# DEPRECATED, use authorships instead (still used by
# k4import/exporter.zeit.de to transmit author information *into* vivi,
# so Producing can manually convert it to authorships)
byline = zope.schema.TextLine(
title=_("By line"),
readonly=True,
required=False)
title = zope.schema.Text(
title=_("Title"),
missing_value=u'')
title.setTaggedValue('zeit.cms.charlimit', 70)
subtitle = zope.schema.Text(
title=_("Subtitle"),
missing_value=u'',
required=False)
subtitle.setTaggedValue('zeit.cms.charlimit', 170)
teaserTitle = zope.schema.TextLine(
title=_("Teaser title"),
required=False,
max_length=70)
teaserText = zope.schema.Text(
title=_("Teaser text"),
required=False,
max_length=170)
teaserSupertitle = zope.schema.TextLine(
title=_(u'Teaser kicker'),
description=_(u'Please take care of capitalisation.'),
required=False,
max_length=70)
vg_wort_id = zope.schema.TextLine(
title=_('VG Wort Id'),
required=False)
dailyNewsletter = zope.schema.Bool(
title=_("Daily newsletter"),
description=_(
"Should this article be listed in the daily newsletter?"),
required=False,
default=True)
commentsPremoderate = zope.schema.Bool(
title=_("Comments premoderate"),
required=False,
default=False)
commentsAllowed = zope.schema.Bool(
title=_("Comments allowed"),
required=False,
default=True)
commentsAPIv2 = zope.schema.Bool(
title=_("Use Comments APIv2"),
required=False,
default=False)
commentSectionEnable = zope.schema.Bool(
title=_("Show commentthread"),
required=False,
default=True)
banner = zope.schema.Bool(
title=_("Banner"),
required=False,
default=True)
banner_content = zope.schema.Bool(
title=_("Banner in Content"),
required=False,
default=True)
banner_outer = zope.schema.Bool(
title=_("Banner Mainad"),
required=False,
default=True)
banner_id = zope.schema.TextLine(
title=_('Banner id'),
required=False)
hide_adblocker_notification = zope.schema.Bool(
title=_('Hide AdBlocker notification'),
default=False,
required=False)
product = zope.schema.Choice(
title=_('Product id'),
# XXX kludgy, we expect a product with this ID to be present in the XML
# file. We only need to set an ID here, since to read the product we'll
# ask the source anyway.
default=zeit.cms.content.sources.Product(u'ZEDE'),
source=zeit.cms.content.sources.PRODUCT_SOURCE)
overscrolling = zope.schema.Bool(
title=_('Overscrolling'),
required=False,
default=True)
cap_title = zope.schema.TextLine(
title=_('CAP title'),
required=False)
deeplink_url = zope.schema.URI(
title=_('Deeplink URL'),
required=False,
default=None)
tldr_title = zope.schema.TextLine(
title=_("tldr title"),
required=False,
max_length=70)
tldr_text = zope.schema.Text(
title=_("tldr text"),
required=False,
max_length=450)
tldr_milestone = zope.schema.Bool(
title=_("tldr milestone"),
required=False,
default=False)
tldr_date = zope.schema.Datetime(
title=_("tldr date"),
required=False)
storystreams = zope.schema.Tuple(
title=_("Storystreams"),
value_type=zope.schema.Choice(
source=zeit.cms.content.sources.StorystreamSource()),
default=(),
required=False)
advertisement_title = zope.schema.TextLine(
title=_("Advertisement title"),
required=False)
advertisement_text = zope.schema.Text(
title=_("Advertisement text"),
required=False)
class IProduct(zope.interface.Interface):
"""A publication product"""
id = zope.interface.Attribute('id')
title = zope.interface.Attribute('title')
vgwortcode = zope.interface.Attribute('VGWort code, optional')
href = zope.interface.Attribute('URL for the "homepage" of this product')
target = zope.interface.Attribute('Optional link target (e.g. _blank)')
show = zope.interface.Attribute(
'Flag what to display in frontend byline. {issue,link,source}')
volume = zope.interface.Attribute('Boolean: has print volumes')
location = zope.interface.Attribute(
'uniqueId template of the IVolumes of this product, '
'e.g. http://xml.zeit.de/{year}/{name}/ausgabe')
centerpage = zope.interface.Attribute(
'uniqueId template for the public-facing CP of this product, '
'e.g. http://xml.zeit.de/{year}/{name}/index')
cp_template = zope.interface.Attribute(
'uniqueId of a zeit.content.text.interfaces.IPythonScript, which is '
'used to create the public-facing CP of this product')
autochannel = zope.interface.Attribute(
'Set false to suppress setting channel on ressort changes')
relates_to = zope.interface.Attribute(
'Product-ID of another Product we belong to')
dependent_products = zope.interface.Attribute(
'List of products whose relates_to points to us')
class ISerie(zope.interface.Interface):
id = zope.interface.Attribute('')
title = zope.interface.Attribute('')
serienname = zope.interface.Attribute('')
url = zope.interface.Attribute('')
encoded = zope.interface.Attribute('')
column = zope.interface.Attribute('')
video = zope.interface.Attribute('')
class IStorystreamReference(zope.interface.Interface):
id = zope.interface.Attribute('')
title = zope.interface.Attribute('')
references = zope.interface.Attribute('')
def hex_literal(value):
try:
int(value, base=16)
except ValueError:
raise zeit.cms.interfaces.ValidationError(_("Invalid hex literal"))
else:
return True
WRITEABLE_ON_CHECKIN = object()
WRITEABLE_LIVE = object()
WRITEABLE_ALWAYS = object()
class IDAVPropertyConverter(zope.interface.Interface):
"""Parse a unicode string from a DAV property to a value and vice versa."""
def fromProperty(value):
"""Convert property value to python value.
returns python object represented by value.
raises ValueError if the value could not be converted.
raises zope.schema.ValidationError if the value could be converted but
does not satisfy the constraints.
"""
def toProperty(value):
"""Convert python value to DAV property value.
returns unicode
"""
class IGenericDAVPropertyConverter(IDAVPropertyConverter):
"""A dav property converter which converts in a generic way.
This interface is a marker if some code wants to know if a generic
converter or a specialised is doing the work.
"""
class IDAVToken(zope.interface.Interface):
"""A string representing a token that uniquely identifies a value."""
class IDAVPropertyChangedEvent(zope.component.interfaces.IObjectEvent):
"""A dav property has been changed."""
old_value = zope.interface.Attribute("The value before the change.")
new_value = zope.interface.Attribute("The value after the change.")
property_namespace = zope.interface.Attribute("Webdav property namespace.")
property_name = zope.interface.Attribute("Webdav property name.")
field = zope.interface.Attribute(
"zope.schema field the property was changed for.")
class DAVPropertyChangedEvent(zope.component.interfaces.ObjectEvent):
zope.interface.implements(IDAVPropertyChangedEvent)
def __init__(self, object, property_namespace, property_name,
old_value, new_value, field):
self.object = object
self.property_namespace = property_namespace
self.property_name = property_name
self.old_value = old_value
self.new_value = new_value
self.field = field
class ITextContent(zope.interface.Interface):
"""Representing text content XXX"""
data = zope.schema.Text(title=u"Document content")
class IXMLRepresentation(zope.interface.Interface):
"""Objects with an XML representation."""
xml = zeit.cms.content.field.XMLTree(
title=_("XML Source"))
class IXMLReference(zope.interface.Interface):
"""XML representation of an object reference.
How the object references is serialized is dependent on both the target
object and the type of reference. For instance, a feed might usually
use an <xi:include> tag, while an image uses <img>. And then there
might be references inside the <head> that always use a <reference> tag.
(NOTE: These are just examples, not actual zeit.cms policy!)
Adapting to IXMLReference yields an lxml.objectify tree::
node = zope.component.getAdapter(
content, zeit.cms.content.interfaces.IXMLReference, name='image')
The target uniqueId is always stored in the ``href`` attribute of the node.
"""
class IXMLReferenceUpdater(zope.interface.Interface):
"""Objects that update metadata etc on XML references."""
def update(xml_node, suppress_errors=False):
"""Update xml_node with data from the content object.
xml_node: lxml.objectify'ed element
"""
class IReference(IXMLRepresentation,
zeit.cms.interfaces.ICMSContent,
zope.location.interfaces.ILocation):
"""Reference to an ICMSContent object (optionally with properties of its
own).
To deserialize an IXMLReference, adapt the source ICMSContent and the XML
node to IReference (using the same adapter name that was used to create the
IXMLReference)::
reference = zope.component.getMultiAdapter(
(source, node), zeit.cms.content.interfaces.IReference,
name='image')
For widget support (DropObjectWidget/ObjectSequenceWidget), IReference
can be resolved as ICMSContent, using a uniqueId built from
"source uniqueId, source attribute name, target uniqueId".
"""
target = zope.interface.Attribute('The referenced ICMSContent object')
target_unique_id = zope.interface.Attribute(
'uniqueId of the referenced ICMSContent object')
attribute = zope.interface.Attribute(
'Attribute name of reference property on source')
def create(target, suppress_errors=False):
"""Create a new references from our source to the given target
(either an ICMSContent or a uniqueId)."""
def get(target, default=None):
"""If our source has a reference to the given target
(ICMSContent or uniqueId), return that, else return default."""
def update_metadata(suppress_errors=False):
"""Run XMLReferenceUpdater on our XML node."""
class IReferences(zope.interface.common.sequence.IReadSequence):
def __iter__(self):
# XXX not declared by IReadSequence,
# dear zope.interface are you serious?!
pass
def create(target):
"""Returns a new IReference to the given ICMSContent object."""
def get(target, default=None):
"""Returns IReference to the given target (uniqueId or ICMSContent)
if one exists."""
class IXMLSource(zope.interface.Interface):
"""str representing the xml of an object."""
class IXMLContent(zeit.cms.repository.interfaces.IDAVContent,
IXMLRepresentation):
"""Content with an XML representation."""
class ITemplateManagerContainer(zope.app.container.interfaces.IReadContainer):
"""Container which holds all template managers."""
class ITemplateManager(zope.app.container.interfaces.IReadContainer):
"""Manages templates for a content type."""
class ITemplate(IXMLRepresentation):
"""A template for xml content types."""
title = zope.schema.TextLine(title=_('Title'))
class IDAVPropertiesInXML(zope.interface.Interface):
"""Marker interface for objects which store their webdav properties in xml.
It is common for articles and CPs to store their webdav properties in the
xml, too. That is in addition to the Metadata stored as webdav properties.
"""
class IDAVPropertyXMLSynchroniser(zope.interface.Interface):
"""Synchronises dav properties to XML."""
def set(namespace, name):
"""Set value for the DAV property (name, namespace)."""
def sync():
"""Synchronise all properties."""
class ISynchronisingDAVPropertyToXMLEvent(zope.interface.Interface):
namespace = zope.interface.Attribute("DAV property namespace")
name = zope.interface.Attribute("DAV property name")
value = zope.interface.Attribute("DAV property value")
vetoed = zope.schema.Bool(
title=u"True if sync was vetoed.",
readonly=True,
default=False)
def veto():
"""Called by subscribers to veto the property being added to xml."""
class IAccessCounter(zope.interface.Interface):
"""Give information about how many times an object was accessed."""
hits = zope.schema.Int(
title=_('Hits today'),
description=_('Indicates how many times a page viewed today.'),
required=False,
default=None)
total_hits = zope.schema.Int(
title=_('Total hits'),
description=_('Indicates how many times a page was viewed in total, '
'i.e. during its entire life time.'),
required=False,
default=None)
detail_url = zope.schema.URI(
title=_('URI to the access counting details'),
required=False,
default=None)
class IContentSortKey(zope.interface.Interface):
"""Content objects can be adapted to this interface to get a sort key.
The sort key usually is a tuple of (weight, lowercased-name)
"""
class ILivePropertyManager(zope.interface.Interface):
"""Manages live properties."""
def register_live_property(name, namespace):
"""Register property as live property."""
def unregister_live_property(name, namespace):
"""Unregister property as live property."""
def is_live_property(name, namespace):
"""Return (bool) whether the property is a live property."""
class ISemanticChange(zope.interface.Interface):
"""Indicates when the content last changed meaningfully, as opposed to
small corrections like fixed typos. This might be shown to the reader,
e.g. as "Aktualisiert am" on article pages.
"""
last_semantic_change = zope.schema.Datetime(
title=_('Last semantic change'),
required=False,
readonly=True,
default=None)
has_semantic_change = zope.schema.Bool(
title=_('Update last semantic change'),
required=False,
default=False)
def update():
"""Set last semantic change to last modified."""
class IUUID(zope.interface.Interface):
"""Accessing the uuid of a content object."""
id = zope.schema.ASCIILine(
title=u"The uuid of the content object.",
default=None,
required=False)
shortened = zope.schema.ASCIILine(
title=u"id without `{urn:uuid:}` prefix",
readonly=True,
required=False,
default=None)
class IMemo(zope.interface.Interface):
"""Provide a memo for additional remarks on a content object."""
memo = zope.schema.Text(
title=_('Memo'),
required=False)
class IContentAdder(zope.interface.Interface):
type_ = zope.schema.Choice(
title=_("Type"),
source=zeit.cms.content.sources.AddableCMSContentTypeSource())
ressort = zope.schema.Choice(
title=_("Ressort"),
source=zeit.cms.content.sources.RessortSource(),
required=False)
sub_ressort = zope.schema.Choice(
title=_('Sub ressort'),
source=zeit.cms.content.sources.SubRessortSource(),
required=False)
year = zope.schema.Int(
title=_("Year"),
min=1900,
max=2100)
month = zope.schema.Int(
title=_("Month"),
min=1,
max=12)
class IAddLocation(zope.interface.Interface):
"""Marker interface that adapts a content type to a context object on which
the add form should be displayed.
Register this adapter for (content_type, IContentAdder), where content_type
is an interface like ICMSContent or IImageGroup.
"""
class IAddableContent(zope.interface.interfaces.IInterface):
"""Interface type to register additional addable entries
that are *not* ICMSContentTypes.
"""
class ISkipDefaultChannel(zope.interface.Interface):
"""Marker interface to opt out of setting default
ICommonMetadata.channels according to ressort/sub_ressort."""
| bsd-3-clause | 2,148,707,815,413,582,600 | 29.270922 | 79 | 0.659997 | false | 4.110362 | false | false | false |
Xevaquor/the-art-of-ai | 005/solitare.py | 1 | 3098 | from astar import astar
import copy
Moves = {
'W':(0,-1),
'S':(1,0),
'N':(-1,0),
'E':(0,1)
}
def t2l(t):
l = []
for x in range(5):
i = []
for y in range(5):
i.append(t[x][y])
l.append(i)
return l
def print_state(s):
for x in range(5):
print s[x][0], s[x][1], s[x][2], s[x][3], s[x][4]
guard = 0
def h(node, instance):
global guard
guard += 1
count = 0
for x in range(5):
for y in range(5):
if node[x][y]:
count += 1
if guard % 1000 == 0: print count
return count * 2
def stones_left(board):
count = 0
for x in range(5):
for y in range(5):
if board[x][y]:
count += 1
return count
class solitare:
def __init__(self):
self.start_state = [[True, True, True, True, True],
[True, True, True, True, True],
[True, True, True, True, True],
[True, True, False, True, True],
[True, True, True, True, True],
]
def is_target_state(self, s):
count = 0
for x in range(5):
for y in range(5):
if s[x][y]:
count += 1
if count == 2:
return False
#print count
return True
def get_start_state(self):
return self.start_state
def is_valid_move(self, move, state):
x,y,dir = move
if state[x][y] == False:
return False
dx, dy = Moves[dir]
newx, newy = x + dx, y + dy
if newx < 0 or newx >= 5 or newy < 0 or newy >= 5:
return False
if state[newx][newy] == False:
return False
newx += dx
newy += dy
if newx < 0 or newx >= 5 or newy < 0 or newy >= 5:
return False
return state[newx][newy] == False
def get_after_move(self, state, move):
start = stones_left(state)
x,y,dir = move
dx, dy = Moves[dir]
middlex, middley = x + dx, y + dy
lastx, lasty = middlex + dx, middley + dy
s2 = copy.deepcopy(state)
s2[x][y] = False
s2[middlex][middley] = False
s2[lastx][lasty] = True
stop = stones_left(s2)
assert start - 1 == stop
return s2
def get_children(self, parent):
successors = []
for x in range(5):
for y in range(5):
if parent[x][y]:
for m in Moves:
d = Moves[m]
if not self.is_valid_move((x,y,m), parent): continue
child = self.get_after_move(parent,(x,y,m)), (m, x,y), 1
successors.append(child)
#print 'legal moves', len(successors), 'left', h(parent, self)
return successors
l = solitare()
s = l.get_start_state()
print s
#print_state(s)
c = l.get_children(s)
#print l.get_children(s)
res = astar(l, h)
print res
| gpl-3.0 | -8,575,674,368,633,035,000 | 21.779412 | 80 | 0.453196 | false | 3.374728 | false | false | false |
OpenDMM/bitbake | lib/prserv/serv.py | 1 | 11381 | import os,sys,logging
import signal, time, atexit, threading
from SimpleXMLRPCServer import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler
import xmlrpclib
import threading
import Queue
try:
import sqlite3
except ImportError:
from pysqlite2 import dbapi2 as sqlite3
import bb.server.xmlrpc
import prserv
import prserv.db
import errno
logger = logging.getLogger("BitBake.PRserv")
if sys.hexversion < 0x020600F0:
print("Sorry, python 2.6 or later is required.")
sys.exit(1)
class Handler(SimpleXMLRPCRequestHandler):
def _dispatch(self,method,params):
try:
value=self.server.funcs[method](*params)
except:
import traceback
traceback.print_exc()
raise
return value
PIDPREFIX = "/tmp/PRServer_%s_%s.pid"
singleton = None
class PRServer(SimpleXMLRPCServer):
def __init__(self, dbfile, logfile, interface, daemon=True):
''' constructor '''
SimpleXMLRPCServer.__init__(self, interface,
logRequests=False, allow_none=True)
self.dbfile=dbfile
self.daemon=daemon
self.logfile=logfile
self.working_thread=None
self.host, self.port = self.socket.getsockname()
self.pidfile=PIDPREFIX % (self.host, self.port)
self.register_function(self.getPR, "getPR")
self.register_function(self.quit, "quit")
self.register_function(self.ping, "ping")
self.register_function(self.export, "export")
self.register_function(self.importone, "importone")
self.register_introspection_functions()
self.db = prserv.db.PRData(self.dbfile)
self.table = self.db["PRMAIN"]
self.requestqueue = Queue.Queue()
self.handlerthread = threading.Thread(target = self.process_request_thread)
self.handlerthread.daemon = False
def process_request_thread(self):
"""Same as in BaseServer but as a thread.
In addition, exception handling is done here.
"""
while True:
(request, client_address) = self.requestqueue.get()
try:
self.finish_request(request, client_address)
self.shutdown_request(request)
except:
self.handle_error(request, client_address)
self.shutdown_request(request)
self.table.sync()
def process_request(self, request, client_address):
self.requestqueue.put((request, client_address))
def export(self, version=None, pkgarch=None, checksum=None, colinfo=True):
try:
return self.table.export(version, pkgarch, checksum, colinfo)
except sqlite3.Error as exc:
logger.error(str(exc))
return None
def importone(self, version, pkgarch, checksum, value):
return self.table.importone(version, pkgarch, checksum, value)
def ping(self):
return not self.quit
def getinfo(self):
return (self.host, self.port)
def getPR(self, version, pkgarch, checksum):
try:
return self.table.getValue(version, pkgarch, checksum)
except prserv.NotFoundError:
logger.error("can not find value for (%s, %s)",version, checksum)
return None
except sqlite3.Error as exc:
logger.error(str(exc))
return None
def quit(self):
self.quit=True
return
def work_forever(self,):
self.quit = False
self.timeout = 0.5
logger.info("Started PRServer with DBfile: %s, IP: %s, PORT: %s, PID: %s" %
(self.dbfile, self.host, self.port, str(os.getpid())))
self.handlerthread.start()
while not self.quit:
self.handle_request()
self.table.sync()
logger.info("PRServer: stopping...")
self.server_close()
return
def start(self):
pid = self.daemonize()
# Ensure both the parent sees this and the child from the work_forever log entry above
logger.info("Started PRServer with DBfile: %s, IP: %s, PORT: %s, PID: %s" %
(self.dbfile, self.host, self.port, str(pid)))
def delpid(self):
os.remove(self.pidfile)
def daemonize(self):
"""
See Advanced Programming in the UNIX, Sec 13.3
"""
try:
pid = os.fork()
if pid > 0:
os.waitpid(pid, 0)
#parent return instead of exit to give control
return pid
except OSError as e:
raise Exception("%s [%d]" % (e.strerror, e.errno))
os.setsid()
"""
fork again to make sure the daemon is not session leader,
which prevents it from acquiring controlling terminal
"""
try:
pid = os.fork()
if pid > 0: #parent
os._exit(0)
except OSError as e:
raise Exception("%s [%d]" % (e.strerror, e.errno))
os.umask(0)
os.chdir("/")
sys.stdout.flush()
sys.stderr.flush()
si = file('/dev/null', 'r')
so = file(self.logfile, 'a+')
se = so
os.dup2(si.fileno(),sys.stdin.fileno())
os.dup2(so.fileno(),sys.stdout.fileno())
os.dup2(se.fileno(),sys.stderr.fileno())
# Clear out all log handlers prior to the fork() to avoid calling
# event handlers not part of the PRserver
for logger_iter in logging.Logger.manager.loggerDict.keys():
logging.getLogger(logger_iter).handlers = []
# Ensure logging makes it to the logfile
streamhandler = logging.StreamHandler()
streamhandler.setLevel(logging.DEBUG)
formatter = bb.msg.BBLogFormatter("%(levelname)s: %(message)s")
streamhandler.setFormatter(formatter)
logger.addHandler(streamhandler)
# write pidfile
pid = str(os.getpid())
pf = file(self.pidfile, 'w')
pf.write("%s\n" % pid)
pf.close()
self.work_forever()
self.delpid()
os._exit(0)
class PRServSingleton(object):
def __init__(self, dbfile, logfile, interface):
self.dbfile = dbfile
self.logfile = logfile
self.interface = interface
self.host = None
self.port = None
def start(self):
self.prserv = PRServer(self.dbfile, self.logfile, self.interface)
self.prserv.start()
self.host, self.port = self.prserv.getinfo()
def getinfo(self):
return (self.host, self.port)
class PRServerConnection(object):
def __init__(self, host, port):
if is_local_special(host, port):
host, port = singleton.getinfo()
self.host = host
self.port = port
self.connection, self.transport = bb.server.xmlrpc._create_server(self.host, self.port)
def terminate(self):
try:
logger.info("Terminating PRServer...")
self.connection.quit()
except Exception as exc:
sys.stderr.write("%s\n" % str(exc))
def getPR(self, version, pkgarch, checksum):
return self.connection.getPR(version, pkgarch, checksum)
def ping(self):
return self.connection.ping()
def export(self,version=None, pkgarch=None, checksum=None, colinfo=True):
return self.connection.export(version, pkgarch, checksum, colinfo)
def importone(self, version, pkgarch, checksum, value):
return self.connection.importone(version, pkgarch, checksum, value)
def getinfo(self):
return self.host, self.port
def start_daemon(dbfile, host, port, logfile):
pidfile = PIDPREFIX % (host, port)
try:
pf = file(pidfile,'r')
pid = int(pf.readline().strip())
pf.close()
except IOError:
pid = None
if pid:
sys.stderr.write("pidfile %s already exist. Daemon already running?\n"
% pidfile)
return 1
server = PRServer(os.path.abspath(dbfile), os.path.abspath(logfile), (host,port))
server.start()
return 0
def stop_daemon(host, port):
pidfile = PIDPREFIX % (host, port)
try:
pf = file(pidfile,'r')
pid = int(pf.readline().strip())
pf.close()
except IOError:
pid = None
if not pid:
sys.stderr.write("pidfile %s does not exist. Daemon not running?\n"
% pidfile)
try:
PRServerConnection(host, port).terminate()
except:
logger.critical("Stop PRService %s:%d failed" % (host,port))
time.sleep(0.5)
try:
if pid:
if os.path.exists(pidfile):
os.remove(pidfile)
wait_timeout = 0
while is_running(pid) and wait_timeout < 10:
print("Waiting for pr-server to exit.")
time.sleep(0.5)
wait_timeout += 1
if is_running(pid):
print("Sending SIGTERM to pr-server.")
os.kill(pid,signal.SIGTERM)
time.sleep(0.1)
except OSError as e:
err = str(e)
if err.find("No such process") <= 0:
raise e
return 0
def is_running(pid):
try:
os.kill(pid, 0)
except OSError as err:
if err.errno == errno.ESRCH:
return False
return True
def is_local_special(host, port):
if host.strip().upper() == 'localhost'.upper() and (not port):
return True
else:
return False
class PRServiceConfigError(Exception):
pass
def auto_start(d):
global singleton
host_params = filter(None, (d.getVar('PRSERV_HOST', True) or '').split(':'))
if not host_params:
return None
if len(host_params) != 2:
logger.critical('\n'.join(['PRSERV_HOST: incorrect format',
'Usage: PRSERV_HOST = "<hostname>:<port>"']))
raise PRServiceConfigError
if is_local_special(host_params[0], int(host_params[1])) and not singleton:
import bb.utils
cachedir = (d.getVar("PERSISTENT_DIR", True) or d.getVar("CACHE", True))
if not cachedir:
logger.critical("Please set the 'PERSISTENT_DIR' or 'CACHE' variable")
raise PRServiceConfigError
bb.utils.mkdirhier(cachedir)
dbfile = os.path.join(cachedir, "prserv.sqlite3")
logfile = os.path.join(cachedir, "prserv.log")
singleton = PRServSingleton(os.path.abspath(dbfile), os.path.abspath(logfile), ("localhost",0))
singleton.start()
if singleton:
host, port = singleton.getinfo()
else:
host = host_params[0]
port = int(host_params[1])
try:
connection = PRServerConnection(host,port)
connection.ping()
realhost, realport = connection.getinfo()
return str(realhost) + ":" + str(realport)
except Exception:
logger.critical("PRservice %s:%d not available" % (host, port))
raise PRServiceConfigError
def auto_shutdown(d=None):
global singleton
if singleton:
host, port = singleton.getinfo()
try:
PRServerConnection(host, port).terminate()
except:
logger.critical("Stop PRService %s:%d failed" % (host,port))
singleton = None
def ping(host, port):
conn=PRServerConnection(host, port)
return conn.ping()
| gpl-2.0 | 7,377,694,387,516,378,000 | 29.67655 | 103 | 0.59037 | false | 3.935339 | false | false | false |
jcoady9/youtube-dl | youtube_dl/extractor/adobepass.py | 1 | 6269 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
import time
import xml.etree.ElementTree as etree
from .common import InfoExtractor
from ..utils import (
unescapeHTML,
urlencode_postdata,
unified_timestamp,
)
class AdobePassIE(InfoExtractor):
_SERVICE_PROVIDER_TEMPLATE = 'https://sp.auth.adobe.com/adobe-services/%s'
_USER_AGENT = 'Mozilla/5.0 (X11; Linux i686; rv:47.0) Gecko/20100101 Firefox/47.0'
@staticmethod
def _get_mvpd_resource(provider_id, title, guid, rating):
channel = etree.Element('channel')
channel_title = etree.SubElement(channel, 'title')
channel_title.text = provider_id
item = etree.SubElement(channel, 'item')
resource_title = etree.SubElement(item, 'title')
resource_title.text = title
resource_guid = etree.SubElement(item, 'guid')
resource_guid.text = guid
resource_rating = etree.SubElement(item, 'media:rating')
resource_rating.attrib = {'scheme': 'urn:v-chip'}
resource_rating.text = rating
return '<rss version="2.0" xmlns:media="http://search.yahoo.com/mrss/">' + etree.tostring(channel).decode() + '</rss>'
def _extract_mvpd_auth(self, url, video_id, requestor_id, resource):
def xml_text(xml_str, tag):
return self._search_regex(
'<%s>(.+?)</%s>' % (tag, tag), xml_str, tag)
def is_expired(token, date_ele):
token_expires = unified_timestamp(re.sub(r'[_ ]GMT', '', xml_text(token, date_ele)))
return token_expires and token_expires <= int(time.time())
mvpd_headers = {
'ap_42': 'anonymous',
'ap_11': 'Linux i686',
'ap_z': self._USER_AGENT,
'User-Agent': self._USER_AGENT,
}
guid = xml_text(resource, 'guid')
requestor_info = self._downloader.cache.load('mvpd', requestor_id) or {}
authn_token = requestor_info.get('authn_token')
if authn_token and is_expired(authn_token, 'simpleTokenExpires'):
authn_token = None
if not authn_token:
# TODO add support for other TV Providers
mso_id = 'DTV'
username, password = self._get_netrc_login_info(mso_id)
if not username or not password:
return ''
def post_form(form_page, note, data={}):
post_url = self._html_search_regex(r'<form[^>]+action=(["\'])(?P<url>.+?)\1', form_page, 'post url', group='url')
return self._download_webpage(
post_url, video_id, note, data=urlencode_postdata(data or self._hidden_inputs(form_page)), headers={
'Content-Type': 'application/x-www-form-urlencoded',
})
provider_redirect_page = self._download_webpage(
self._SERVICE_PROVIDER_TEMPLATE % 'authenticate/saml', video_id,
'Downloading Provider Redirect Page', query={
'noflash': 'true',
'mso_id': mso_id,
'requestor_id': requestor_id,
'no_iframe': 'false',
'domain_name': 'adobe.com',
'redirect_url': url,
})
provider_login_page = post_form(
provider_redirect_page, 'Downloading Provider Login Page')
mvpd_confirm_page = post_form(provider_login_page, 'Logging in', {
'username': username,
'password': password,
})
post_form(mvpd_confirm_page, 'Confirming Login')
session = self._download_webpage(
self._SERVICE_PROVIDER_TEMPLATE % 'session', video_id,
'Retrieving Session', data=urlencode_postdata({
'_method': 'GET',
'requestor_id': requestor_id,
}), headers=mvpd_headers)
if '<pendingLogout' in session:
self._downloader.cache.store('mvpd', requestor_id, {})
return self._extract_mvpd_auth(url, video_id, requestor_id, resource)
authn_token = unescapeHTML(xml_text(session, 'authnToken'))
requestor_info['authn_token'] = authn_token
self._downloader.cache.store('mvpd', requestor_id, requestor_info)
authz_token = requestor_info.get(guid)
if authz_token and is_expired(authz_token, 'simpleTokenTTL'):
authz_token = None
if not authz_token:
authorize = self._download_webpage(
self._SERVICE_PROVIDER_TEMPLATE % 'authorize', video_id,
'Retrieving Authorization Token', data=urlencode_postdata({
'resource_id': resource,
'requestor_id': requestor_id,
'authentication_token': authn_token,
'mso_id': xml_text(authn_token, 'simpleTokenMsoID'),
'userMeta': '1',
}), headers=mvpd_headers)
if '<pendingLogout' in authorize:
self._downloader.cache.store('mvpd', requestor_id, {})
return self._extract_mvpd_auth(url, video_id, requestor_id, resource)
authz_token = unescapeHTML(xml_text(authorize, 'authzToken'))
requestor_info[guid] = authz_token
self._downloader.cache.store('mvpd', requestor_id, requestor_info)
mvpd_headers.update({
'ap_19': xml_text(authn_token, 'simpleSamlNameID'),
'ap_23': xml_text(authn_token, 'simpleSamlSessionIndex'),
})
short_authorize = self._download_webpage(
self._SERVICE_PROVIDER_TEMPLATE % 'shortAuthorize',
video_id, 'Retrieving Media Token', data=urlencode_postdata({
'authz_token': authz_token,
'requestor_id': requestor_id,
'session_guid': xml_text(authn_token, 'simpleTokenAuthenticationGuid'),
'hashed_guid': 'false',
}), headers=mvpd_headers)
if '<pendingLogout' in short_authorize:
self._downloader.cache.store('mvpd', requestor_id, {})
return self._extract_mvpd_auth(url, video_id, requestor_id, resource)
return short_authorize
| unlicense | 4,938,944,430,969,354,000 | 44.759124 | 129 | 0.566438 | false | 3.891372 | false | false | false |
jskurka/PyChess-Learning-Module | lib/pychess/widgets/gamenanny.py | 1 | 11390 | """ This module intends to work as glue between the gamemodel and the gamewidget
taking care of stuff that is neither very offscreen nor very onscreen
like bringing up dialogs and """
import math
import gtk
from pychess.Utils.Offer import Offer
#from pychess.Utils.GameModel import GameModel
#from pychess.Utils.TimeModel import TimeModel
from pychess.Utils.const import *
import pychess.ic.ICGameModel
from pychess.Utils.repr import *
from pychess.System import conf
from pychess.System import glock
from pychess.widgets import preferencesDialog
from gamewidget import getWidgets, key2gmwidg, isDesignGWShown
from gamewidget import MENU_ITEMS, ACTION_MENU_ITEMS
from pychess.ic.ICGameModel import ICGameModel
def nurseGame (gmwidg, gamemodel):
""" Call this function when gmwidget is just created """
gmwidg.connect("infront", on_gmwidg_infront)
gmwidg.connect("closed", on_gmwidg_closed)
gmwidg.connect("title_changed", on_gmwidg_title_changed)
# Because of the async loading of games, the game might already be started,
# when the glock is ready and nurseGame is called.
# Thus we support both cases.
if gamemodel.status == WAITING_TO_START:
gamemodel.connect("game_started", on_game_started, gmwidg)
gamemodel.connect("game_loaded", game_loaded, gmwidg)
else:
if gamemodel.uri:
game_loaded(gamemodel, gamemodel.uri, gmwidg)
on_game_started(gamemodel, gmwidg)
gamemodel.connect("game_saved", game_saved, gmwidg)
gamemodel.connect("game_ended", game_ended, gmwidg)
gamemodel.connect("game_unended", game_unended, gmwidg)
gamemodel.connect("game_resumed", game_unended, gmwidg)
#===============================================================================
# Gamewidget signals
#===============================================================================
def on_gmwidg_infront (gmwidg):
# Set right sensitivity states in menubar, when tab is switched
auto = gmwidg.gamemodel.players[0].__type__ != LOCAL and \
gmwidg.gamemodel.players[1].__type__ != LOCAL
for item in ACTION_MENU_ITEMS:
getWidgets()[item].props.sensitive = not auto
for widget in MENU_ITEMS:
sensitive = False
if widget == 'abort':
if isinstance(gmwidg.gamemodel, pychess.ic.ICGameModel.ICGameModel):
sensitive = True
elif widget == 'adjourn':
if isinstance(gmwidg.gamemodel, pychess.ic.ICGameModel.ICGameModel):
sensitive = True
elif widget == 'hint_mode':
if gmwidg.gamemodel.hintEngineSupportsVariant and conf.get("analyzer_check", True):
sensitive = True
elif widget == 'spy_mode':
if gmwidg.gamemodel.spyEngineSupportsVariant and conf.get("inv_analyzer_check", True):
sensitive = True
elif widget == 'show_sidepanels':
if not isDesignGWShown():
sensitive = True
else: sensitive = True
getWidgets()[widget].set_property('sensitive', sensitive)
# Change window title
getWidgets()['window1'].set_title('%s - PyChess' % gmwidg.getTabText())
def on_gmwidg_closed (gmwidg):
if len(key2gmwidg) == 1:
getWidgets()['window1'].set_title('%s - PyChess' % _('Welcome'))
def on_gmwidg_title_changed (gmwidg):
if gmwidg.isInFront():
getWidgets()['window1'].set_title('%s - PyChess' % gmwidg.getTabText())
#===============================================================================
# Gamemodel signals
#===============================================================================
# Connect game_loaded, game_saved and game_ended to statusbar
def game_loaded (gamemodel, uri, gmwidg):
if type(uri) in (str, unicode):
s = "%s: %s" % (_("Loaded game"), str(uri))
else: s = _("Loaded game")
glock.acquire()
try:
gmwidg.status(s)
finally:
glock.release()
def game_saved (gamemodel, uri, gmwidg):
glock.acquire()
try:
gmwidg.status("%s: %s" % (_("Saved game"), str(uri)))
finally:
glock.release()
def game_ended (gamemodel, reason, gmwidg):
nameDic = {"white": gamemodel.players[WHITE],
"black": gamemodel.players[BLACK],
"mover": gamemodel.curplayer}
if gamemodel.status == WHITEWON:
nameDic["winner"] = gamemodel.players[WHITE]
nameDic["loser"] = gamemodel.players[BLACK]
elif gamemodel.status == BLACKWON:
nameDic["winner"] = gamemodel.players[BLACK]
nameDic["loser"] = gamemodel.players[WHITE]
m1 = reprResult_long[gamemodel.status] % nameDic
m2 = reprReason_long[reason] % nameDic
md = gtk.MessageDialog()
md.set_markup("<b><big>%s</big></b>" % m1)
md.format_secondary_markup(m2)
if gamemodel.players[0].__type__ == LOCAL or gamemodel.players[1].__type__ == LOCAL:
if gamemodel.players[0].__type__ == REMOTE or gamemodel.players[1].__type__ == REMOTE:
md.add_button(_("Offer Rematch"), 0)
else:
md.add_button(_("Play Rematch"), 1)
if gamemodel.ply > 1:
md.add_button(_("Undo two moves"), 2)
elif gamemodel.ply == 1:
md.add_button(_("Undo one move"), 2)
def cb (messageDialog, responseId):
if responseId == 0:
if gamemodel.players[0].__type__ == REMOTE:
gamemodel.players[0].offerRematch()
else:
gamemodel.players[1].offerRematch()
elif responseId == 1:
from pychess.widgets.newGameDialog import createRematch
createRematch(gamemodel)
elif responseId == 2:
if gamemodel.curplayer.__type__ == LOCAL and gamemodel.ply > 1:
offer = Offer(TAKEBACK_OFFER, gamemodel.ply-2)
else:
offer = Offer(TAKEBACK_OFFER, gamemodel.ply-1)
if gamemodel.players[0].__type__ == LOCAL:
gamemodel.players[0].emit("offer", offer)
else: gamemodel.players[1].emit("offer", offer)
md.connect("response", cb)
glock.acquire()
try:
gmwidg.showMessage(md)
gmwidg.status("%s %s." % (m1,m2[0].lower()+m2[1:]))
if reason == WHITE_ENGINE_DIED:
engineDead(gamemodel.players[0], gmwidg)
elif reason == BLACK_ENGINE_DIED:
engineDead(gamemodel.players[1], gmwidg)
finally:
glock.release()
def game_unended (gamemodel, gmwidg):
glock.acquire()
try:
print "sending hideMessage"
gmwidg.hideMessage()
gmwidg.status("")
finally:
glock.release()
def on_game_started (gamemodel, gmwidg):
on_gmwidg_infront(gmwidg) # setup menu items sensitivity
# Rotate to human player
boardview = gmwidg.board.view
if gamemodel.players[1].__type__ == LOCAL:
if gamemodel.players[0].__type__ != LOCAL:
boardview.rotation = math.pi
elif conf.get("autoRotate", True) and \
gamemodel.curplayer == gamemodel.players[1]:
boardview.rotation = math.pi
# Play set-up sound
preferencesDialog.SoundTab.playAction("gameIsSetup")
# Connect player offers to statusbar
for player in gamemodel.players:
if player.__type__ == LOCAL:
player.connect("offer", offer_callback, gamemodel, gmwidg)
# Start analyzers if any
setAnalyzerEnabled(gmwidg, HINT, getWidgets()["hint_mode"].get_active())
setAnalyzerEnabled(gmwidg, SPY, getWidgets()["spy_mode"].get_active())
#===============================================================================
# Player signals
#===============================================================================
def offer_callback (player, offer, gamemodel, gmwidg):
if offer.type == DRAW_OFFER:
if gamemodel.status != RUNNING:
return # If the offer has already been handled by
# Gamemodel and the game was drawn, we need
# to do nothing
glock.acquire()
try:
gmwidg.status(_("You sent a draw offer"))
finally:
glock.release()
#===============================================================================
# Subfunctions
#===============================================================================
def engineDead (engine, gmwidg):
gmwidg.bringToFront()
d = gtk.MessageDialog(type=gtk.MESSAGE_ERROR, buttons=gtk.BUTTONS_OK)
d.set_markup(_("<big><b>Engine, %s, has died</b></big>") % repr(engine))
d.format_secondary_text(_("PyChess has lost connection to the engine, probably because it has died.\n\nYou can try to start a new game with the engine, or try to play against another one."))
d.connect("response", lambda d,r: d.hide())
d.show_all()
def setAnalyzerEnabled (gmwidg, analyzerType, enabled):
if not analyzerType in gmwidg.gamemodel.spectactors:
return
analyzer = gmwidg.gamemodel.spectactors[analyzerType]
if analyzerType == HINT:
arrow = gmwidg.board.view._set_greenarrow
else: arrow = gmwidg.board.view._set_redarrow
set_arrow = lambda x: gmwidg.board.view.runWhenReady(arrow, x)
if enabled:
if len(analyzer.getAnalysis()) >= 1:
if gmwidg.gamemodel.curplayer.__type__ == LOCAL or \
[player.__type__ for player in gmwidg.gamemodel.players] == [REMOTE, REMOTE]:
set_arrow (analyzer.getAnalysis()[0].cords)
else: set_arrow (None)
# This is a kludge using pythons ability to asign attributes to an
# object, even if those attributes are nowhere mentioned in the objects
# class. So don't go looking for it ;)
# Code is used to save our connection ids, enabling us to later dis-
# connect
if not hasattr (gmwidg.gamemodel, "anacons"):
gmwidg.gamemodel.anacons = {HINT:[], SPY:[]}
if not hasattr (gmwidg.gamemodel, "chacons"):
gmwidg.gamemodel.chacons = []
def on_analyze (analyzer, moves, score):
if moves and (gmwidg.gamemodel.curplayer.__type__ == LOCAL or \
[player.__type__ for player in gmwidg.gamemodel.players] == [REMOTE, REMOTE]):
set_arrow (moves[0].cords)
else: set_arrow (None)
def on_game_change (gamemodel):
set_arrow (None)
gmwidg.gamemodel.anacons[analyzerType].append(
analyzer.connect("analyze", on_analyze))
gmwidg.gamemodel.chacons.append(
gmwidg.gamemodel.connect("game_changed", on_game_change))
gmwidg.gamemodel.chacons.append(
gmwidg.gamemodel.connect("moves_undoing",
lambda model, moves: on_game_change(model)))
else:
if hasattr (gmwidg.gamemodel, "anacons"):
for conid in gmwidg.gamemodel.anacons[analyzerType]:
analyzer.disconnect(conid)
del gmwidg.gamemodel.anacons[analyzerType][:]
if hasattr (gmwidg.gamemodel, "chacons"):
for conid in gmwidg.gamemodel.chacons:
gmwidg.gamemodel.disconnect(conid)
del gmwidg.gamemodel.chacons[:]
set_arrow (None)
| gpl-3.0 | 3,256,816,127,223,838,700 | 38.006849 | 194 | 0.583231 | false | 3.727094 | false | false | false |
KirarinSnow/Google-Code-Jam | Code Jam Beta 2008/C.py | 1 | 3465 | #!/usr/bin/python
#
# Problem: Random Route
# Language: Python
# Author: KirarinSnow
# Usage: python thisfile.py <input.in >output.out
import heapq
MAX = 100000000000
def compute():
def countroutes(v, x, s): # num of routes into x toward v
if v in counts[x]:
return
if x[0] == s:
counts[x][v] = 1
elif x[0] not in pred:
counts[x][v] = 0
else:
pp = pred[x[0]]
num = 0
for i in pp:
pv = i
countroutes(v, pv, s)
num += counts[pv][v]
counts[x][v] = num
def countr2(v, x, s): # num of routes out of x toward v
if v in scounts[x]:
return
if x[1] not in succ:
scounts[x][v] = 0
else:
pp = succ[x[1]]
num = 0
for i in pp:
countr2(v, i, s)
num += scounts[i][v]
scounts[x][v] = num
line = raw_input().split()
nr = int(line[0])
source = line[1]
edges = []
vertices = dict()
pred = dict()
succ = dict()
for i in xrange(nr):
a, b, c = raw_input().split()
c = int(c)
edges.append((a,b,c,i))
if a not in vertices: #[d, inedge->{v->num}, outedges, v->total]
vertices[a] = [MAX,dict(),[],dict()]
if b not in vertices:
vertices[b] = [MAX,dict(),[],dict()]
vertices[a][2].append(edges[i])
vertices[b][1][edges[i]] = dict()
vertices[source][0] = 0 #d[s] = 0
q = map(lambda k: (vertices[k][0], k), vertices.keys())
heapq.heapify(q)
s = set()
# Dijkstra
while q:
node = heapq.heappop(q)
if node[0] == vertices[node[1]][0]: # valid key
s.add(node)
u = node[1]
for adj in vertices[u][2]:
w = adj[2]
v = adj[1]
if vertices[v][0] > vertices[u][0] + w:
vertices[v][0] = vertices[u][0] + w
heapq.heappush(q,(vertices[v][0],v)) # replace key
pred[v] = [adj]
elif vertices[v][0] == vertices[u][0] + w:
pred[v].append(adj)
for v in vertices:
if v in pred:
for e in pred[v]:
u = e[0]
if u not in succ:
succ[u] = []
succ[u].append(e)
nv = len(pred)
counts = dict()
scounts = dict()
for e in edges:
counts[e] = dict()
scounts[e] = dict()
totals = dict()
for v in vertices:
if v in pred:
for kk in pred[v]:
scounts[kk][v] = 1
for v in vertices:
totals[v] = 0.0
if v in pred:
for e in pred[v]:
countroutes(v, e, source)
totals[v] += counts[e][v]
if source in succ:
for e in succ[source]:
countr2(v, e, source)
edgecounts = len(edges)*[0]
for e in edges:
edgecounts[e[3]] = 0.0
for v in vertices:
for e in edges:
i = e[3]
u = e[0]
if v in counts[e] and v in scounts[e]:
edgecounts[i]+=(0.0+counts[e][v]*scounts[e][v])/totals[v]/nv
return ' '.join(map(lambda x: '%0.7f' % x, edgecounts))
for i in range(input()):
print "Case #%d: %s" % (i+1, compute())
| gpl-3.0 | -1,755,201,136,878,354,000 | 23.574468 | 76 | 0.435209 | false | 3.306298 | false | false | false |
balint256/gr-baz | apps/papr.py | 4 | 7476 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# papr.py
#
# Copyright 2014 Balint Seeber <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
import sys, math
import numpy
import matplotlib.pyplot as pyplot
from optparse import OptionParser
def moving_average(values, window):
weights = numpy.repeat(1.0, window) / window
sma = numpy.convolve(values, weights, 'same') # 'valid'
#print len(values), len(sma)
return sma
def main():
parser = OptionParser(usage="%prog: [options] <input file>")
parser.add_option("-t", "--type", type="string", default="c8", help="data type [default=%default]")
parser.add_option("-T", "--trim", type="int", default=None, help="max # of samples to use [default=%default]")
parser.add_option("-d", "--decim", type="int", default=None, help="decimation [default=%default]")
parser.add_option("-l", "--length", type="int", default="2048", help="target window length [default=%default]")
parser.add_option("-a", "--average", type="int", default="128", help="moving average window length [default=%default]")
parser.add_option("-m", "--max-length", type="int", default="1024", help="max window length [default=%default]")
parser.add_option("-D", "--max-decim", type="int", default=None, help="max decimation [default=%default]")
parser.add_option("-L", "--log", action="store_true", default=False, help="log scale [default=%default]")
parser.add_option("-M", "--show-mag", action="store_true", default=False, help="show magnitude plot [default=%default]")
#parser.add_option("-T", "--mag-trim", type="int", default=None, help="max # of samples to show in mag plot [default=%default]")
(options, args) = parser.parse_args()
if len(args) < 1:
print "Supply input file"
return
input_file = args[0]
dtype = numpy.dtype(options.type)
print "Opening", input_file, "as", dtype
data = numpy.fromfile(input_file, dtype)
print "File samples:", len(data)
if options.trim is not None:
print "Trimming to", options.trim
data = data[:options.trim]
print "Min,mean,max:", data.min(), data.mean(), data.max()
if options.decim is not None:
decim = options.decim # FIXME: Validate
else:
decim = len(data) / options.length
print "Decim:", decim
new_length = decim * options.length
print "New length:", new_length, ", skipping:", (len(data) - new_length)
data = data[:new_length]
data_mag = numpy.abs(data)
data_mag_min = data_mag.min()
if data_mag_min == 0.0:
print "Mag min: %f" % (data_mag_min)
else:
print "Mag min: %f (%f dB)" % (data_mag_min, 10.0*math.log10(data_mag_min))
data_mag_mean = data_mag.mean()
print "Mag mean: %f (%f dB)" % (data_mag_mean, 10.0*math.log10(data_mag_mean))
data_mag_max = data_mag.max()
print "Mag max: %f (%f dB)" % (data_mag_max, 10.0*math.log10(data_mag_max))
data_mag_squared = data_mag ** 2.0
mean_rms = math.sqrt(data_mag_squared.mean())
print "Mean RMS:", mean_rms, "(%f dB)" % (10.0*math.log10(mean_rms))
print "Moving average window length:", options.average
data_mag_squared_ma = moving_average(data_mag_squared, options.average)
#print len(data_mag_ma)
#len_diff = new_length - len(data_mag_ma)
#if options.decim is not None:
# decim = len(data_mag_ma) / options.length
#else:
# decim = len(data) / options.length
#print "Decim:", decim
#new_length = decim * options.length
#print "New length:", new_length
#data_mag_ma = data_mag_ma[:new_length]
#print "Moving average decim:", decim
#new_length = decim * options.length
#data_mag_ma = data_mag_ma[:new_length]
#print "New length:", len(data_mag_ma)
if decim > 1:
data_mag_ma_mat = data_mag_squared_ma.reshape(-1, decim)
data_mag_ma_mean = data_mag_ma_mat.mean(axis=1)
else:
data_mag_ma_mean = data_mag_squared_ma
print "Mean moving-average data length:", len(data_mag_ma_mean)
print "Min,mean,max: %f, %f, %f" % (data_mag_ma_mean.min(), data_mag_ma_mean.mean(), data_mag_ma_mean.max())
if options.max_decim is None:
assert((new_length % options.max_length) == 0)
decim_max = new_length / options.max_length
else:
decim_max = options.max_decim
print "Max decim:", decim_max
#new_length_max = decim_max * options.max_length
#data_mag_decim = data_mag[new_length/2:]
#len_diff = len(data_mag_decim) - new_length
#data_mag_decim = data_mag_decim[:-len_diff+1]
data_mag_decim = data_mag
if decim_max > 1:
data_mag_decim_mat = data_mag_decim.reshape(-1, decim_max)
data_mag_decim_max = data_mag_decim_mat.max(axis=1)
else:
data_mag_decim_max = data_mag_decim
repeat = options.length / options.max_length
print "Max repeat:", repeat
if repeat > 1:
data_mag_decim_max = numpy.repeat(data_mag_decim_max, repeat)
print "Min,mean,max: %f, %f, %f" % (data_mag_decim_max.min(), data_mag_decim_max.mean(), data_mag_decim_max.max())
data_mag_decim_max_squared = data_mag_decim_max ** 2.0
ratio = data_mag_decim_max_squared / data_mag_ma_mean
ratio_filtered = ratio[~numpy.isnan(ratio)]
print "NaNs:", (len(ratio) - len(ratio_filtered))
ratio_filtered2 = ratio_filtered[~numpy.isinf(ratio_filtered)]
print "Infs:", (len(ratio_filtered) - len(ratio_filtered2))
print "Min,mean,max: %f, %f, %f" % (ratio_filtered2.min(), ratio_filtered2.mean(), ratio_filtered2.max())
#print ratio
#print ratio_filtered
orig_ratio_len = len(ratio)
#trim = options.average - 1
#ratio = ratio[trim:-trim]
trim = 0
x = numpy.linspace(trim, trim + len(ratio), len(ratio), False)
#print len(x), len(ratio)
mean_ratio = ratio_filtered2.mean()
print "Mean ratio:", mean_ratio, "(%f dB)" % (10.0*math.log10(mean_ratio))
ratio_db = 10.0 * numpy.log10(ratio)
ratio_filtered_db = 10.0 * numpy.log10(ratio_filtered2)
print "Min,mean,max ratio (dB): %f, %f, %f" % (ratio_filtered_db.min(), ratio_filtered_db.mean(), ratio_filtered_db.max())
if options.show_mag:
subplot = pyplot.subplot(111)
subplot.grid(True)
print "Showing magnitude plot..."
#subplot.set_ylim(ymin=0.0)
#subplot.plot(data)
subplot.plot(data_mag)
data_mag_rms_ma = data_mag_squared_ma ** 0.5
subplot.plot(data_mag_rms_ma)
data_mag_rms_ma_mean = numpy.repeat(data_mag_ma_mean, decim) ** 0.5
subplot.plot(data_mag_rms_ma_mean)
data_mag_decim_max_repeat = numpy.repeat(data_mag_decim_max, decim)
subplot.plot(data_mag_decim_max_repeat)
pyplot.show()
subplot = pyplot.subplot(111)
if options.log:
subplot.set_yscale('log')
subplot.grid(True)
#subplot.set_ylim(ymin=(10.0**-18.), ymax=(10.0**-8.))
#plot, = subplot.plot(data_mag_mean)
#plot, = subplot.plot(data_mag_decim_max)
print "Showing PAPR plot..."
subplot.set_ylim(ymin=0.0, ymax=ratio_filtered_db.max())
subplot.set_xlim(xmax=orig_ratio_len)
plot, = subplot.plot(x, ratio_db)
pyplot.show()
return 0
if __name__ == '__main__':
main()
| gpl-3.0 | -7,776,518,439,012,479,000 | 34.264151 | 129 | 0.675896 | false | 2.758672 | false | false | false |
richardjgowers/MDA-RDFTool | rdftool.py | 1 | 3827 | """Tool for calculating RDFs
"""
from __future__ import print_function
import numpy as np
from MDAnalysis.lib.distances import distance_array
from analysisbase import AnalysisBase, blocks_of
class InterRDF(AnalysisBase):
"""Analysis object for calculating intermolecular RDF.
See the init method for arguments and keywords.
Run the analysis with method *run*
Results are stored in the following attributes:
rdf
The pair distribution function, normalised.
edges
The boundaries of each rdf bin.
bins
The center of each rdf bin.
"""
def __init__(self, *args, **kwargs):
"""InterRDF(g1, g2, nbins=75, range=(0.0, 15.0))
:Arguments:
*g1*
First AtomGroup
*g2*
Second AtomGroup
:Keywords:
*nbins*
Number of bins in the histogram [75]
*range*
The size of the RDF [0.0, 15.0]
*exclusion_block*
A tuple representing the tile to exclude from the distance
array. [None]
*start*
The frame to start at [0]
*stop*
The frame to end analysis at. [-1]
*step*
The step size through the trajectory in frames [0]
Keyword *exclusion_block* allows same molecule contributions to
be excluded from the rdf calculation.
"""
self.g1 = args[0]
self.g2 = args[1]
self.u = self.g1.universe
kwargs.update({'traj': self.u.trajectory})
self._setup_frames(**kwargs)
nbins = kwargs.pop('nbins', 75)
hrange = kwargs.pop('range', (0.0, 15.0))
self.rdf_settings = {'bins':nbins,
'range':hrange}
# Empty histogram to store the RDF
count, edges = np.histogram([-1], **self.rdf_settings)
count *= 0.0
self.count = count
self.edges = edges
self.bins = 0.5 * (edges[:-1] + edges[1:])
# Need to know average volume
self.volume = 0.0
# Allocate a results array which we will reuse
self._result = np.zeros((len(self.g1), len(self.g2)), dtype=np.float64)
# If provided exclusions, create a mask of _result which
# lets us take these out
exclusion_block = kwargs.pop('exclusion_block', None)
if not exclusion_block is None:
self._exclusion_block = exclusion_block
self._exclusion_mask = blocks_of(self._result, *exclusion_block)
self._maxrange = hrange[1] + 1.0
else:
self._exclusion_block = None
self._exclusion_mask = None
def _singleframe(self):
distance_array(self.g1.positions, self.g2.positions,
box=self.u.dimensions, result=self._result)
# Maybe exclude same molecule distances
if not self._exclusion_mask is None:
self._exclusion_mask[:] = self._maxrange
count = np.histogram(self._result, **self.rdf_settings)[0]
self.count += count
self.volume += self._ts.volume
def _normalise(self):
# Number of each selection
nA = len(self.g1)
nB = len(self.g2)
N = nA * nB
# If we had exclusions, take these into account
if self._exclusion_block:
xA, xB = self._exclusion_block
nblocks = nA / xA
N -= xA * xB * nblocks
# Volume in each radial shell
vol = np.power(self.edges[1:], 3) - np.power(self.edges[:-1], 3)
vol *= 4/3.0 * np.pi
# Number of frames
nframes = len(self.frames)
# Average number density
box_vol = self.volume / nframes
density = N / box_vol
rdf = self.count / (density * vol * nframes)
self.rdf = rdf
| gpl-2.0 | 1,466,646,118,820,482,000 | 29.133858 | 79 | 0.563888 | false | 3.905102 | false | false | false |
Chirayu-sopho/Hindi-DateTime-Parser | functions.py | 1 | 4957 | import datetime
from dateutil.relativedelta import *
## give final date and time after parsing by changing current date-time
def change_datetime ( c="0", y=0, mt=0, w=0, d=0, h=0, m=0, s=0):
#mt = mt + 12*y
#d = d + 30*mt
now = datetime.datetime.now()
change = relativedelta( years =+ y, months =+ mt, weeks =+ w, days =+ d, hours =+ h, minutes =+ m, seconds =+ s)
#print (now + change)
if c == "date":
return (now + change).date()
elif c == "time":
return (now + change).time()
## make separate date and time functions
#def change_date (y=0, m=0, w=0, d=0):
#def change_time (h=0, m=0, s=0):
## make separate functions for setting date and time and print -- if not provided the data
## give final date and time after parsing by setting date-time
def set_datetime (y=0, mt=0, d=0, h=0, m=0, s=0, c="0"):
a = ""
if d!=0:
a = a + str(d) + "/"
if mt!=0:
a = a + str(mt) + "/"
if y!=0:
a = a + str(y)
#a = a + " "
if h!=0:
a = a + str(h) + ":"
if m!=0:
a = a + str(m) + ":"
if s!=0:
a = a + str(s)
if c!="0":
a = a + " "
a = a + str(c)
#print (a, "a")
return a
## make function for am/pm
def get_disease (string):
with open("dataset.txt") as f:
content = f.readlines()
names = []
definitions = []
values = []
check = 1
## TODO
## remove the common words from defintion (or input) (or use replace) like a, the,disease, etc. while splitting definition in words
## Also do stemming
## Go through dataset once manually to get these words
for word in content:
if word[0] == 'n':
## TODO think better way in which pop is not required, directly append only if required
if check == 1:
names.append(word)
check = 0
if check == 0:
names.pop()
names.append(word)
if word[0] == 'd':
definitions.append(word)
check = 1
values.append(0)
#string = input("Give Text:")
words = string.split(" ")
for word in words:
for defintion in definitions:
defintion.replace('. ',' ')
defintion.replace(', ',' ')
definition_words = defintion.split(" ")
if word in definition_words:
values[definitions.index(defintion)] += 1
#print (word)
highest = 0
index_of_highest = 0
answer = []
## TODO if there are more than one highest
for value in values:
if value > highest:
highest = value
index_of_highest = values.index(value)
answer.append(names[index_of_highest])
answer.append(highest)
answer.append(definitions[index_of_highest])
for word in words:
newd = definitions[index_of_highest].replace('. ',' ')
newda = newd.replace(', ',' ')
definition_words = newda.split(" ")
## cannot pass with or in split, find better way
#print (definition_words)
if word in definition_words:
values[definitions.index(defintion)] += 1
answer.append(word)
# print (definitions[index_of_highest][defintion.index(word)])
## make definition sort only usable things
## find a way like , and parameters for passing more than value in relplace
return answer
def get_sentences(str):
import re
## use of regular expressions
## str cannot be changed further, always make a new object
words = str.split(" ")
Abbrs = ['Mr.', 'mr.', 'Mrs.', 'mrs.', 'Dr.', 'dr.' , 'Er.', 'er.', 'Prof.', 'prof.', 'Br.', 'br.', 'Fr.', 'fr.', 'Sr.', 'sr.', 'Jr.', 'jr.']
SentenceType = []
for abbr in Abbrs:
if abbr in words:
new_word = abbr.replace(abbr[len(abbr)-1], "")
str = str.replace(abbr, new_word)
#print (new_str)
## str.replace(abbr[len(abbr)-1], " ")
## Do directly in string without using words
for word in words:
if re.findall(r'\.(.)+\.', word):
new_word = word.replace('.','')
str = str.replace(word, new_word)
#print (word)
#print (new_word)
#print (new_str2)
if '.' in word[0:len(word)-2]:
new_word = word.replace('.', '[dot]')
str = str.replace(word, new_word)
for letter in str:
if letter == '.':
SentenceType.append("Assertive")
if letter == '?':
SentenceType.append("Interrogative")
if letter == '!' or letter == '!!':
SentenceType.append('Exclamatory')
sentences = re.split("[ ]*[.|?|!|!!]+[ ]*", str)
if (str[len(str)-1] == '.') or (str[len(str)-1] == '?') or (str[len(str)-1] == '!'):
sentences.pop()
return dict(zip(sentences, SentenceType))
## TODOs
## Extend Abbrs list
## Dots back in sentences
## If abbr of acronyms with dots at end of a sentence?
## what if sentence doesn't end with !!? Get the expression from this word.
## If already a new line exist.
## Also implement through machine learning to obtain results without help of punctuation.
## Sentence Type : What about Imperative, compound, complex etc. Exclamatory Sentence or Word
## ensure sentences are returned sequentially
def get_tokens(str):
words = str.split(" ")
return words
## Make an algorithm for different kind of words for forming effective tokens before returning
| mit | -5,793,549,747,210,335,000 | 18.987903 | 142 | 0.614686 | false | 2.924484 | false | false | false |
helixyte/TheLMA | thelma/repositories/rdb/schema/tables/worklistseriesexperimentdesign.py | 1 | 1221 | """
This file is part of the TheLMA (THe Laboratory Management Application) project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Worklist series experiment design table.
"""
from sqlalchemy import Column
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import PrimaryKeyConstraint
from sqlalchemy import Table
__docformat__ = "reStructuredText en"
__all__ = ['create_table']
def create_table(metadata, experiment_design_tbl, worklist_series_tbl):
"Table factory."
tbl = Table('worklist_series_experiment_design', metadata,
Column('experiment_design_id', Integer,
ForeignKey(experiment_design_tbl.c.experiment_design_id,
ondelete='CASCADE', onupdate='CASCADE'),
nullable=False, unique=True),
Column('worklist_series_id', Integer,
ForeignKey(worklist_series_tbl.c.worklist_series_id,
ondelete='CASCADE', onupdate='CASCADE'),
nullable=False)
)
PrimaryKeyConstraint(tbl.c.experiment_design_id, tbl.c.worklist_series_id)
return tbl
| mit | 1,640,412,216,775,302,100 | 38.387097 | 80 | 0.647011 | false | 4.625 | false | false | false |
DailyActie/Surrogate-Model | 01-codes/tensorflow-master/tensorflow/tensorboard/backend/server.py | 1 | 5519 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module for building TensorBoard servers.
This is its own module so it can be used in both actual code and test code.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import threading
import time
import six
from six.moves import BaseHTTPServer
from six.moves import socketserver
from tensorflow.python.platform import logging
from tensorflow.python.summary import event_accumulator
from tensorflow.tensorboard.backend import handler
# How many elements to store per tag, by tag type
TENSORBOARD_SIZE_GUIDANCE = {
event_accumulator.COMPRESSED_HISTOGRAMS: 500,
event_accumulator.IMAGES: 4,
event_accumulator.SCALARS: 1000,
event_accumulator.HISTOGRAMS: 1,
}
# How often to reload new data after the latest load (secs)
LOAD_INTERVAL = 60
def ParseEventFilesSpec(logdir):
"""Parses `logdir` into a map from paths to run group names.
The events files flag format is a comma-separated list of path specifications.
A path specification either looks like 'group_name:/path/to/directory' or
'/path/to/directory'; in the latter case, the group is unnamed. Group names
cannot start with a forward slash: /foo:bar/baz will be interpreted as a
spec with no name and path '/foo:bar/baz'.
Globs are not supported.
Args:
logdir: A comma-separated list of run specifications.
Returns:
A dict mapping directory paths to names like {'/path/to/directory': 'name'}.
Groups without an explicit name are named after their path. If logdir is
None, returns an empty dict, which is helpful for testing things that don't
require any valid runs.
"""
files = {}
if logdir is None:
return files
for specification in logdir.split(','):
# If the spec looks like /foo:bar/baz, then we assume it's a path with a
# colon.
if ':' in specification and specification[0] != '/':
# We split at most once so run_name:/path:with/a/colon will work.
run_name, path = specification.split(':', 1)
else:
run_name = None
path = specification
if not os.path.isabs(path):
# Create absolute path out of relative one.
path = os.path.join(os.path.realpath('.'), path)
files[path] = run_name
return files
def ReloadMultiplexer(multiplexer, path_to_run):
"""Loads all runs into the multiplexer.
Args:
multiplexer: The `EventMultiplexer` to add runs to and reload.
path_to_run: A dict mapping from paths to run names, where `None` as the run
name is interpreted as a run name equal to the path.
"""
start = time.time()
for (path, name) in six.iteritems(path_to_run):
multiplexer.AddRunsFromDirectory(path, name)
multiplexer.Reload()
duration = time.time() - start
logging.info('Multiplexer done loading. Load took %0.1f secs', duration)
def StartMultiplexerReloadingThread(multiplexer,
path_to_run,
load_interval=LOAD_INTERVAL):
"""Starts a thread to automatically reload the given multiplexer.
The thread will reload the multiplexer by calling `ReloadMultiplexer` every
`load_interval` seconds, starting immediately.
Args:
multiplexer: The `EventMultiplexer` to add runs to and reload.
path_to_run: A dict mapping from paths to run names, where `None` as the run
name is interpreted as a run name equal to the path.
load_interval: How many seconds to wait after one load before starting the
next load.
Returns:
A started `threading.Thread` that reloads the multiplexer.
"""
# Ensure the Multiplexer initializes in a loaded state before it adds runs
# So it can handle HTTP requests while runs are loading
multiplexer.Reload()
def _ReloadForever():
while True:
ReloadMultiplexer(multiplexer, path_to_run)
time.sleep(load_interval)
thread = threading.Thread(target=_ReloadForever)
thread.daemon = True
thread.start()
return thread
class ThreadedHTTPServer(socketserver.ThreadingMixIn,
BaseHTTPServer.HTTPServer):
"""A threaded HTTP server."""
daemon = True
def BuildServer(multiplexer, host, port):
"""Sets up an HTTP server for running TensorBoard.
Args:
multiplexer: An `EventMultiplexer` that the server will query for
information about events.
host: The host name.
port: The port number to bind to, or 0 to pick one automatically.
Returns:
A `BaseHTTPServer.HTTPServer`.
"""
factory = functools.partial(handler.TensorboardHandler, multiplexer)
return ThreadedHTTPServer((host, port), factory)
| mit | 5,267,909,568,332,017,000 | 34.152866 | 82 | 0.678746 | false | 4.281614 | false | false | false |
nicolewhite/py2neo | book/conf.py | 2 | 11546 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Py2neo documentation build configuration file, created by
# sphinx-quickstart on Fri Oct 17 16:03:15 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
sys.path.insert(0, os.path.abspath('_themes'))
import alabaster
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'alabaster',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Py2neo'
copyright = '2011-2014, Nigel Small'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
sys.path.insert(0, os.path.abspath('..'))
from py2neo import __version__
# The short X.Y version.
version = ".".join(__version__.split(".")[0:2])
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'logo': 'py2neo-2.0.200x260.png',
'logo_align': 'left',
'github_user': 'nigelsmall',
'github_repo': 'py2neo',
'github_branch': 'release/' + release,
'travis_button': True,
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [alabaster.get_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': [
'about.html', 'navigation.html', 'searchbox.html', 'donate.html',
]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Py2neodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Py2neo.tex', 'Py2neo Documentation',
'Nigel Small', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'py2neo', 'Py2neo Documentation',
['Nigel Small'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Py2neo', 'Py2neo Documentation',
'Nigel Small', 'Py2neo', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = 'Py2neo'
epub_author = 'Nigel Small'
epub_publisher = 'Nigel Small'
epub_copyright = '2014, Nigel Small'
# The basename for the epub file. It defaults to the project name.
#epub_basename = 'Py2neo'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
def get_class_name(full_module_name):
"""
Pull out the class name from the full_module_name
"""
#split the full_module_name by "."'s
return full_module_name.split('.')[1]
def process_docstring(app, what, name, obj, options, lines):
names = name.split(".")
module_name = names[0]
try:
class_name = names[1]
except IndexError:
class_name = None
try:
attr_name = names[2]
except IndexError:
attr_name = None
for i, line in enumerate(lines):
lines[i] = (line
.replace('«class»', class_name)
.replace('«class.lower»', class_name.lower()))
def setup(app):
app.connect('autodoc-process-docstring', process_docstring)
| apache-2.0 | -1,233,306,012,841,267,200 | 29.373684 | 80 | 0.693641 | false | 3.619316 | true | false | false |
Equitable/trump | trump/extensions/source/tx-pydatadatareaderst/pydatadatareaderstext.py | 2 | 1062 | """
This uses pandas.io.data.DataReader, all kwargs get passed to that.
start and end are optional, but must be of the form 'YYYY-MM-DD'.
Will default to since the beginning of available data, and run through "today".
data_column is required to be specified as well.
"""
stype = 'PyDataDataReaderST'
renew = True
class Source(object):
def __init__(self, ses, **kwargs):
import pandas.io.data as pydata
import datetime as dt
self.pydata = pydata
self.dt = dt
def getseries(self, ses, **kwargs):
fmt = "%Y-%m-%d"
if 'start' in kwargs:
kwargs['start'] = self.dt.datetime.strptime(kwargs['start'], fmt)
if 'end' in kwargs:
if kwargs['end'] == 'now':
kwargs['end'] = self.dt.datetime.now()
else:
kwargs['end'] = self.dt.datetime.strptime(kwargs['end'], fmt)
col = kwargs['data_column']
del kwargs['data_column']
adf = self.pydata.DataReader(**kwargs)
data = adf[col]
return data
| bsd-3-clause | 3,986,232,752,445,041,700 | 28.5 | 79 | 0.584746 | false | 3.726316 | false | false | false |
jackdesert/lyxblogger | src/LyxBlog/parsing.py | 1 | 5586 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
##################### A U T H O R ##########################
# #
# Copyright 2010 Jack Desert #
# <[email protected]> #
# http://TwoMoreLines.com #
# #
###################### L I C E N S E ##########################
# #
# This file is part of LyXBlogger. #
# #
# LyXBlogger is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published #
# by the Free Software Foundation, either version 3 of the License, #
# or (at your option) any later version. #
# #
# LyXBlogger is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with LyXBlogger. If not, see <http://www.gnu.org/licenses>. #
# #
########################################################################
import os, sys
import re
import wordpresslib
from misc import pr3
from misc import get_format
from misc import trim_cut_material
def get_html(input_file, CUT_FLAG):
# Read data from file
f = open(input_file, 'r')
html = f.read()
f.close()
pr3 ("FORMAT")
ELYXER_ENGINE = get_format(html)
# Trim designated cut material from bottom of post
html = trim_cut_material(html, CUT_FLAG, ELYXER_ENGINE)
# RECORD TITLE FROM HEADER TO USE AS POST
tit_exp = re.compile('''
<title> # Start of the <title> tag
..{1,}? # Anything in the middle (non-greedy)
</title> # Closing </title> tag
''', re.VERBOSE) # VERBOSE allows ''' '''
tit_obj = tit_exp.search(html)
# eLyXer uses 'Converted document' as the default title in the head
# and body. LyXHTML uses 'LyX Document' as the default, but only
# puts it in the head. The following code detects these default
# titles and asks for a real title
TITLE_EXPECTED_IN_BODY, TITLE_PROMPT = False, True
pr3 ("\nTITLE")
if(tit_obj):
TITLE_EXPECTED_IN_BODY = True
TITLE_PROMPT = False
full_title_tag = tit_obj.group()
blog_title = full_title_tag[7:-8] # Strip tags off
if (blog_title == 'Converted document'): # eLyXer's default (head and body)
TITLE_PROMPT = True
if (blog_title == 'LyX Document'): # LyXHTML's default (only in head)
TITLE_PROMPT = True
TITLE_EXPECTED_IN_BODY = False
if(TITLE_PROMPT):
pr3 ('No title found in document.')
pr3 ('Please enter a title now')
blog_title = sys.stdin.readline().replace('\n', '')
pr3 ('Using title: ' + blog_title)
# REMOVING TITLE FROM BODY
# Typical body title using ENGINE_INTERNAL:
# <h1 class="title"><a id='magicparlabel-309' />
# Example Article Title</h1>
# <h1 class="title">
# Typical body title using ELYXER_ENGINE using optional sizing:
# <h1 class="title">
# <span class="footnotesize">Hi Brian</span>
#
# </h1>
exp = re.compile('''
<h1\ # Beginning of tag with space
class="title"> # The rest of the tag
..{1,}? # Anything (non-greedy)
</h1> # Closing tag
''', re.VERBOSE | re.DOTALL) # .. can include linebreaks
bt_obj = exp.search(html)
if(bt_obj):
entire_bt_tag = bt_obj.group()
html = html.replace(entire_bt_tag, '')
elif (TITLE_EXPECTED_IN_BODY):
pass
#~ pr3 ('\nWARNING! The title of your entry may appear twice.')
#~ pr3 ('Please notify the author at [email protected] to')
#~ pr3 ('have this bug squashed.\n\n Press Enter to continue uploading.')
#~ sys.stdin.readline()
# What this really means is an opening title tag was found, but
# no title tag was found in the body.
# Eliminate everything outside the <body></body> tags
START_TAG = '<body>'
END_TAG = '</body>'
if (START_TAG in html):
html = html.partition(START_TAG)[2]
html = html.partition(END_TAG)[0]
# Reinvoke <code> and </code> tags from their escape sequence counterparts
html = html.replace('<code>', '<code>')
html = html.replace('</code>', '</code>')
# Remove Arrows from footnotes and margin notes
html = html.replace('[→', '[')
html = html.replace('→]', ']')
# Change the elyxer-generated id to a class, since wordpresslib appears to
# strip out all ids upon upload
html = html.replace("<div class=\"footer\" id=\"generated-by\">", "<div class=\"footer generated-by-elyxer\">")
return html, blog_title, ELYXER_ENGINE
| gpl-3.0 | -8,272,553,882,738,798,000 | 43.301587 | 115 | 0.509495 | false | 3.992847 | false | false | false |
agustinhenze/natsort.debian | test_natsort/profile_natsorted.py | 1 | 3093 | # -*- coding: utf-8 -*-
"""\
This file contains functions to profile natsorted with different
inputs and different settings.
"""
from __future__ import print_function
import cProfile
import random
import sys
sys.path.insert(0, '.')
from natsort import natsorted, index_natsorted
from natsort.compat.py23 import py23_range
# Sample lists to sort
nums = random.sample(py23_range(10000), 1000)
nstr = list(map(str, random.sample(py23_range(10000), 1000)))
astr = ['a'+x+'num' for x in map(str, random.sample(py23_range(10000), 1000))]
tstr = [['a'+x, 'a-'+x]
for x in map(str, random.sample(py23_range(10000), 1000))]
cstr = ['a'+x+'-'+x for x in map(str, random.sample(py23_range(10000), 1000))]
def prof_nums(a):
print('*** Basic Call, Numbers ***')
for _ in py23_range(1000):
natsorted(a)
cProfile.run('prof_nums(nums)', sort='time')
def prof_num_str(a):
print('*** Basic Call, Numbers as Strings ***')
for _ in py23_range(1000):
natsorted(a)
cProfile.run('prof_num_str(nstr)', sort='time')
def prof_str(a):
print('*** Basic Call, Strings ***')
for _ in py23_range(1000):
natsorted(a)
cProfile.run('prof_str(astr)', sort='time')
def prof_str_index(a):
print('*** Basic Index Call ***')
for _ in py23_range(1000):
index_natsorted(a)
cProfile.run('prof_str_index(astr)', sort='time')
def prof_nested(a):
print('*** Basic Call, Nested Strings ***')
for _ in py23_range(1000):
natsorted(a)
cProfile.run('prof_nested(tstr)', sort='time')
def prof_str_noexp(a):
print('*** No-Exp Call ***')
for _ in py23_range(1000):
natsorted(a, exp=False)
cProfile.run('prof_str_noexp(astr)', sort='time')
def prof_str_unsigned(a):
print('*** Unsigned Call ***')
for _ in py23_range(1000):
natsorted(a, signed=False)
cProfile.run('prof_str_unsigned(astr)', sort='time')
def prof_str_unsigned_noexp(a):
print('*** Unsigned No-Exp Call ***')
for _ in py23_range(1000):
natsorted(a, signed=False, exp=False)
cProfile.run('prof_str_unsigned_noexp(astr)', sort='time')
def prof_str_asint(a):
print('*** Int Call ***')
for _ in py23_range(1000):
natsorted(a, number_type=int)
cProfile.run('prof_str_asint(astr)', sort='time')
def prof_str_asint_unsigned(a):
print('*** Unsigned Int (Versions) Call ***')
for _ in py23_range(1000):
natsorted(a, number_type=int, signed=False)
cProfile.run('prof_str_asint_unsigned(astr)', sort='time')
def prof_str_key(a):
print('*** Basic Call With Key ***')
for _ in py23_range(1000):
natsorted(a, key=lambda x: x.upper())
cProfile.run('prof_str_key(astr)', sort='time')
def prof_str_index_key(a):
print('*** Basic Index Call With Key ***')
for _ in py23_range(1000):
index_natsorted(a, key=lambda x: x.upper())
cProfile.run('prof_str_index_key(astr)', sort='time')
def prof_str_unorderable(a):
print('*** Basic Index Call, "Unorderable" ***')
for _ in py23_range(1000):
natsorted(a)
cProfile.run('prof_str_unorderable(cstr)', sort='time')
| mit | 3,724,489,187,692,291,000 | 26.371681 | 78 | 0.630133 | false | 2.904225 | false | false | false |
janastu/followsheep-tracker-server | servers/emule.py | 1 | 5118 | from app import create_app
from flask import (render_template, request, redirect,
url_for, flash, make_response, Response)
from flaskext.uploads import (UploadSet, configure_uploads, ARCHIVES,
UploadConfiguration)
from flask.ext.pymongo import PyMongo
import hashlib
import subprocess
import json
import os
import zipfile
from functools import wraps
app = create_app()
UPLOAD_DEST = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'static/data')
uploaded_files = UploadSet('tracks', ARCHIVES,
default_dest=lambda app: app.instance_path)
configure_uploads(app, uploaded_files)
uploaded_files._config = UploadConfiguration(UPLOAD_DEST)
configure_uploads(app, uploaded_files)
mongo = PyMongo(app)
def check_auth(username, password):
"""This function is called to check if a username /
password combination is valid.
"""
users = app.config.get('USERS')
passkey = app.config.get('SECRET')[0]
if username in users and passkey == password:
return username
def authenticate():
"""Sends a 401 response that enables basic auth"""
return Response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'})
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
if not auth or not check_auth(auth.username, auth.password):
return authenticate()
return f(*args, **kwargs)
return decorated
@app.route('/')
def index():
return render_template('index.html', tracks=get_all_tracks())
@app.route('/upload', methods=['POST', 'GET'])
#@requires_auth
def upload():
if request.method == "POST":
filename = uploaded_files.save(request.files.get('track'))
hash = hashlib.md5()
fObject = open(os.path.join(UPLOAD_DEST, filename), 'r')
for chunk in iter(lambda: fObject.read(
4096), ""):
hash.update(chunk)
if mongo.db.tracks.find_one({'checksum': hash.hexdigest()}):
flash('Duplicate file!!')
return redirect(url_for('index'))
extract_file(filename, hash.hexdigest())
flash('Your upload was successful.')
return redirect(url_for('index'))
return render_template('upload.html')
def extract_file(name, checksum):
"""TODO: Insert assertions for error handling."""
"""Extract the zip and save the contents of the zip into a directory
organized by username in the config file."""
with zipfile.ZipFile(os.path.join(UPLOAD_DEST, name)) as zipF:
for fileName in zipF.infolist():
if fileName.filename.endswith('.json'):
configFilePath = fileName.filename
break
if configFilePath.find('/'):
configDirName = configFilePath.split('/')[0]
with zipF.open(configFilePath) as f:
config = json.load(f)
zipF.extractall(os.path.join(UPLOAD_DEST, 'extracted_data',
config.get('Device ID'),
config.get('User')))
for files in zipF.infolist():
if files.filename.endswith(".gpx"):
url = url_for('static',
filename=os.path.join('data',
'extracted_data',
config.get('Device ID'),
config.get('User'),
files.filename))
config['track-path'] = url
config['track-name'] = files.filename.rstrip('.gpx').split(
'/')[-1]
try:
dirPath = configDirName
except NameError:
dirPath = ''
subprocess.Popen(['bash', os.path.abspath(os.path.join(
os.path.dirname(__file__), os.pardir, 'scripts', 'convert.sh')),
os.path.join(UPLOAD_DEST, 'extracted_data',
config.get('Device ID'),
config.get('User'),
dirPath)])
config['data-path'] = config.get('track-path').rsplit('/', 1)[0]
config['checksum'] = checksum
mongo.db.tracks.save(config)
return True
def get_all_tracks():
tracks = [track for track in mongo.db.tracks.find()]
for track in tracks:
track['id'] = str(track['_id'])
track['device_ID'] = track['Device ID']
track['track_name'] = track['track-name']
del(track['_id'])
del(track['Device ID'])
del(track['track-name'])
return tracks
@app.route('/track/<ObjectId:id>', methods=["POST"])
def upload_track(id):
mongo.db.tracks.update({'_id': id}, {'$set': {
'track': json.loads(request.form.get('track'))}})
response = make_response()
return response
| mit | -1,589,133,590,861,117,000 | 35.297872 | 76 | 0.560961 | false | 4.315346 | true | false | false |
unbracketed/tipi | tipi/commands/base.py | 1 | 12932 | """
Base classes for writing management commands (named commands which can
be executed through ``tipi.py``).
"""
import os
import sys
from ConfigParser import ConfigParser
from optparse import make_option, OptionParser
from virtualenv import resolve_interpreter
class CommandError(Exception):
"""
Exception class indicating a problem while executing a management
command.
If this exception is raised during the execution of a management
command, it will be caught and turned into a nicely-printed error
message to the appropriate output stream (i.e., stderr); as a
result, raising this exception (with a sensible description of the
error) is the preferred way to indicate that something has gone
wrong in the execution of a command.
"""
pass
class BaseCommand(object):
"""
The base class from which all management commands ultimately
derive.
Use this class if you want access to all of the mechanisms which
parse the command-line arguments and work out what code to call in
response; if you don't need to change any of that behavior,
consider using one of the subclasses defined in this file.
If you are interested in overriding/customizing various aspects of
the command-parsing and -execution behavior, the normal flow works
as follows:
1. ``tipi.py`` loads the command class
and calls its ``run_from_argv()`` method.
2. The ``run_from_argv()`` method calls ``create_parser()`` to get
an ``OptionParser`` for the arguments, parses them, performs
any environment changes requested by options like
``pythonpath``, and then calls the ``execute()`` method,
passing the parsed arguments.
3. The ``execute()`` method attempts to carry out the command by
calling the ``handle()`` method with the parsed arguments; any
output produced by ``handle()`` will be printed to standard
output.
4. If ``handle()`` raised a ``CommandError``, ``execute()`` will
instead print an error message to ``stderr``.
Thus, the ``handle()`` method is typically the starting point for
subclasses; many built-in commands and command types either place
all of their logic in ``handle()``, or perform some additional
parsing work in ``handle()`` and then delegate from it to more
specialized methods as needed.
Several attributes affect behavior at various steps along the way:
``args``
A string listing the arguments accepted by the command,
suitable for use in help messages; e.g., a command which takes
a list of application names might set this to '<appname
appname ...>'.
``help``
A short description of the command, which will be printed in
help messages.
``option_list``
This is the list of ``optparse`` options which will be fed
into the command's ``OptionParser`` for parsing arguments.
"""
# Metadata about this command.
option_list = (
make_option('-v', '--verbose', action='store', dest='verbose', default='1',
type='choice', choices=['0', '1', '2'],
help='Verbosity level; 0=minimal output, 1=normal output, 2=all output'),
make_option('-p', '--python',
help='The Python interpreter to use, e.g., --python=python2.5 will use the python2.5 '
'interpreter to create the new environment. The default is the interpreter that '
'virtualenv was installed with (%s)' % sys.executable),
make_option('--traceback', action='store_true',
help='Print traceback on exception'),
)
help = ''
args = ''
#TODO syntax coloring support
#def __init__(self):
# #self.style = color_style()
# try:
# home = os.getenv('USERPROFILE') or os.getenv('HOME')
# config = ConfigParser(open(os.path.join(home, '.tipirc')))
# except IOError:
# pass
# except:
# pass
#
# self._interpreter = resolve_interpreter('python')
#
#@property
#def python_interpreter(self):
# return self._interpreter
def get_version(self):
"""
Return the Django version, which should be correct for all
built-in Django commands. User-supplied commands should
override this method.
"""
#TODO placeholder
return (0, 1, 0,)
def usage(self, subcommand):
"""
Return a brief description of how to use this command, by
default from the attribute ``self.help``.
"""
usage = '%%prog %s [options] %s' % (subcommand, self.args)
if self.help:
return '%s\n\n%s' % (usage, self.help)
else:
return usage
def create_parser(self, prog_name, subcommand):
"""
Create and return the ``OptionParser`` which will be used to
parse the arguments to this command.
"""
return OptionParser(prog=prog_name,
usage=self.usage(subcommand),
version=str(self.get_version()),
option_list=self.option_list)
def print_help(self, prog_name, subcommand):
"""
Print the help message for this command, derived from
``self.usage()``.
"""
parser = self.create_parser(prog_name, subcommand)
parser.print_help()
def run_from_argv(self, argv):
"""
Set up any environment changes requested, then run this command.
"""
parser = self.create_parser(argv[0], argv[1])
options, args = parser.parse_args(argv[2:])
self.execute(*args, **options.__dict__)
def execute(self, *args, **options):
"""
Try to execute this command. If the command raises a
``CommandError``, intercept it and print it sensibly to
stderr.
"""
try:
#output = self.handle(*args, **options)
print self.handle(*args, **options)
#if output:
# print output
except CommandError, e:
#sys.stderr.write(self.style.ERROR(str('Error: %s\n' % e)))
sys.stderr.write(str('Error: %s\n' % e))
sys.exit(1)
def handle(self, *args, **options):
"""
The actual logic of the command. Subclasses must implement
this method.
"""
raise NotImplementedError()
#class AppCommand(BaseCommand):
# """
# A management command which takes one or more installed application
# names as arguments, and does something with each of them.
#
# Rather than implementing ``handle()``, subclasses must implement
# ``handle_app()``, which will be called once for each application.
#
# """
# args = '<appname appname ...>'
#
# def handle(self, *app_labels, **options):
# from django.db import models
# if not app_labels:
# raise CommandError('Enter at least one appname.')
# try:
# app_list = [models.get_app(app_label) for app_label in app_labels]
# except (ImproperlyConfigured, ImportError), e:
# raise CommandError("%s. Are you sure your INSTALLED_APPS setting is correct?" % e)
# output = []
# for app in app_list:
# app_output = self.handle_app(app, **options)
# if app_output:
# output.append(app_output)
# return '\n'.join(output)
#
# def handle_app(self, app, **options):
# """
# Perform the command's actions for ``app``, which will be the
# Python module corresponding to an application name given on
# the command line.
#
# """
# raise NotImplementedError()
class LabelCommand(BaseCommand):
"""
A management command which takes one or more arbitrary arguments
(labels) on the command line, and does something with each of
them.
Rather than implementing ``handle()``, subclasses must implement
``handle_label()``, which will be called once for each label.
If the arguments should be names of installed applications, use
``AppCommand`` instead.
"""
args = '<label label ...>'
label = 'label'
def handle(self, *labels, **options):
if not labels:
raise CommandError('Enter at least one %s.' % self.label)
output = []
for label in labels:
label_output = self.handle_label(label, **options)
if label_output:
output.append(label_output)
return '\n'.join(output)
def handle_label(self, label, **options):
"""
Perform the command's actions for ``label``, which will be the
string as given on the command line.
"""
raise NotImplementedError()
#class NoArgsCommand(BaseCommand):
# """
# A command which takes no arguments on the command line.
#
# Rather than implementing ``handle()``, subclasses must implement
# ``handle_noargs()``; ``handle()`` itself is overridden to ensure
# no arguments are passed to the command.
#
# Attempting to pass arguments will raise ``CommandError``.
#
# """
# args = ''
#
# def handle(self, *args, **options):
# if args:
# raise CommandError("Command doesn't accept any arguments")
# return self.handle_noargs(**options)
#
# def handle_noargs(self, **options):
# """
# Perform this command's actions.
#
# """
# raise NotImplementedError()
#def copy_helper(style, app_or_project, name, directory, other_name=''):
# """
# Copies either a Django application layout template or a Django project
# layout template into the specified directory.
#
# """
# # style -- A color style object (see django.core.management.color).
# # app_or_project -- The string 'app' or 'project'.
# # name -- The name of the application or project.
# # directory -- The directory to which the layout template should be copied.
# # other_name -- When copying an application layout, this should be the name
# # of the project.
# import re
# import shutil
# other = {'project': 'app', 'app': 'project'}[app_or_project]
# if not re.search(r'^[_a-zA-Z]\w*$', name): # If it's not a valid directory name.
# # Provide a smart error message, depending on the error.
# if not re.search(r'^[_a-zA-Z]', name):
# message = 'make sure the name begins with a letter or underscore'
# else:
# message = 'use only numbers, letters and underscores'
# raise CommandError("%r is not a valid %s name. Please %s." % (name, app_or_project, message))
# top_dir = os.path.join(directory, name)
# try:
# os.mkdir(top_dir)
# except OSError, e:
# raise CommandError(e)
#
# # Determine where the app or project templates are. Use
# # django.__path__[0] because we don't know into which directory
# # django has been installed.
# template_dir = os.path.join(django.__path__[0], 'conf', '%s_template' % app_or_project)
#
# for d, subdirs, files in os.walk(template_dir):
# relative_dir = d[len(template_dir)+1:].replace('%s_name' % app_or_project, name)
# if relative_dir:
# os.mkdir(os.path.join(top_dir, relative_dir))
# for i, subdir in enumerate(subdirs):
# if subdir.startswith('.'):
# del subdirs[i]
# for f in files:
# if not f.endswith('.py'):
# # Ignore .pyc, .pyo, .py.class etc, as they cause various
# # breakages.
# continue
# path_old = os.path.join(d, f)
# path_new = os.path.join(top_dir, relative_dir, f.replace('%s_name' % app_or_project, name))
# fp_old = open(path_old, 'r')
# fp_new = open(path_new, 'w')
# fp_new.write(fp_old.read().replace('{{ %s_name }}' % app_or_project, name).replace('{{ %s_name }}' % other, other_name))
# fp_old.close()
# fp_new.close()
# try:
# shutil.copymode(path_old, path_new)
# _make_writeable(path_new)
# except OSError:
# sys.stderr.write(style.NOTICE("Notice: Couldn't set permission bits on %s. You're probably using an uncommon filesystem setup. No problem.\n" % path_new))
#
#def _make_writeable(filename):
# """
# Make sure that the file is writeable. Useful if our source is
# read-only.
#
# """
# import stat
# if sys.platform.startswith('java'):
# # On Jython there is no os.access()
# return
# if not os.access(filename, os.W_OK):
# st = os.stat(filename)
# new_permissions = stat.S_IMODE(st.st_mode) | stat.S_IWUSR
# os.chmod(filename, new_permissions)
| mit | 5,023,752,352,078,874,000 | 35.022284 | 171 | 0.597819 | false | 4.060283 | false | false | false |
jkolczasty/appletree | appletree/gui/rteditorbase.py | 1 | 12191 | #!/usr/bin/env python3
#
# Copyright 2017+ Jakub Kolasa <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
# __author__ = 'Jakub Kolasa <[email protected]'>
#
from appletree.gui.qt import Qt, QtCore, QtGui, FontDB, loadQImageFix
from appletree.helpers import T, messageDialog, getIconImage
import requests
import html
import re
import logging
from weakref import ref
import base64
RE_URL = re.compile(r'((file|http|ftp|https)://([\w_-]+(?:(?:\.[\w_-]+)+))([\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])?)')
class ImageResizeDialog(Qt.QDialog):
def __init__(self, win, title, name, w, h):
super(ImageResizeDialog, self).__init__(win)
self.w = w
self.h = h
self.keepaspect = True
self.aspect = float(w) / float(h)
self.result = False
self.setWindowTitle(title)
self.vbox = Qt.QVBoxLayout(self)
self.vbox.addWidget(Qt.QLabel(T(name)))
self.box = Qt.QGroupBox(self)
self.form = Qt.QFormLayout(self.box)
buttonbox = Qt.QDialogButtonBox()
buttonbox.setGeometry(Qt.QRect(150, 250, 341, 32))
buttonbox.setOrientation(QtCore.Qt.Horizontal)
buttonbox.setStandardButtons(Qt.QDialogButtonBox.Cancel | Qt.QDialogButtonBox.Ok)
buttonbox.setWindowTitle(title)
self.vbox.addWidget(self.box)
self.vbox.addWidget(buttonbox)
self.vbox.setStretch(2, 0)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
# self.setWindowModality(QtCore.QCoreApplication)
self.setModal(True)
self.ww = Qt.QSpinBox()
self.ww.setMinimum(16)
self.ww.setMaximum(0xffff)
self.ww.setValue(self.w)
self.ww.valueChanged.connect(self.on_changed_width)
self.form.addRow(T("Width"), self.ww)
self.ww.setFocus()
self.wh = Qt.QSpinBox()
self.ww.setMinimum(16)
self.wh.setMaximum(0xffff)
self.wh.setValue(self.h)
self.wh.valueChanged.connect(self.on_changed_height)
self.form.addRow(T("Height"), self.wh)
widget = Qt.QCheckBox()
widget.setChecked(True)
widget.stateChanged.connect(self.on_changed_aspect)
self.form.addRow(T("Keep aspect"), widget)
buttonbox.accepted.connect(self.on_accept)
buttonbox.rejected.connect(self.on_reject)
# QtCore.QMetaObject.connectSlotsByName(Dialog)
self.adjustSize()
self.setMinimumWidth(600)
self.setSizePolicy(Qt.QSizePolicy.MinimumExpanding, Qt.QSizePolicy.MinimumExpanding)
def exec_(self):
super(ImageResizeDialog, self).exec_()
# del self.fields
return self.result
def on_accept(self):
self.result = True
self.close()
def on_reject(self):
self.result = False
self.close()
def on_changed_width(self, w):
self.w = w
if not self.keepaspect:
return
self.keepaspect = False
h = float(w) / self.aspect
self.wh.setValue(int(h))
self.keepaspect = True
def on_changed_height(self, h):
self.h = h
if not self.keepaspect:
return
self.keepaspect = False
w = float(h) * self.aspect
self.ww.setValue(int(w))
self.keepaspect = True
def on_changed_aspect(self, newvalue):
self.keepaspect = newvalue
class ImageViewDialog(Qt.QDialog):
def __init__(self, win, title, image):
super(ImageViewDialog, self).__init__(win)
self.setWindowTitle(title)
vbox = Qt.QVBoxLayout(self)
scrollarea = Qt.QScrollArea(self)
scrollarea.setWidgetResizable(True)
label = Qt.QLabel(self)
label.setAlignment(QtCore.Qt.AlignVCenter | QtCore.Qt.AlignHCenter)
if image.__class__.__name__ == 'QImage':
pixmap = Qt.QPixmap()
pixmap.fromImage(image)
else:
pixmap = image
label.setPixmap(pixmap)
scrollarea.setWidget(label)
vbox.addWidget(scrollarea)
self.setSizePolicy(Qt.QSizePolicy.MinimumExpanding, Qt.QSizePolicy.MinimumExpanding)
class QTextEdit(Qt.QTextEdit):
contextMenuEventSingal = Qt.pyqtSignal(object)
linkClicked = Qt.pyqtSignal(object)
clickedAnchor = None
def __init__(self, *args, **kwargs):
super(QTextEdit, self).__init__()
self.win = ref(kwargs.get('parent'))
# self.contextMenuEventSingal = Qt.pyqtSignal(object)
flags = self.textInteractionFlags()
flags = QtCore.Qt.TextInteractionFlags(flags)
flags |= QtCore.Qt.LinksAccessibleByMouse
flags |= QtCore.Qt.LinksAccessibleByKeyboard
self.setTextInteractionFlags(flags)
self.setAcceptRichText(True)
self.setAutoFormatting(QTextEdit.AutoAll)
self.addShortcut('CTRL+B', self.on_bold)
self.addShortcut('CTRL+I', self.on_italic)
self.addShortcut('CTRL+U', self.on_underline)
self.addShortcut('CTRL+T', self.on_test)
def addShortcut(self, shortcut, callback):
action = Qt.QAction(self)
action.setShortcut(shortcut)
action.triggered.connect(callback)
self.addAction(action)
def mousePressEvent(self, event):
pos = event.pos()
self.clickedAnchor = self.anchorAt(pos)
return super(QTextEdit, self).mousePressEvent(event)
def mouseReleaseEvent(self, event):
if self.clickedAnchor and (event.button() & QtCore.Qt.LeftButton) and (
event.modifiers() & QtCore.Qt.ControlModifier):
pos = event.pos()
clickedAnchor = self.anchorAt(pos)
messageDialog("Link clicked", "Link you clicked: {0}".format(clickedAnchor), details=clickedAnchor)
self.linkClicked.emit(event)
self.clickedAnchor = None
return
return super(QTextEdit, self).mouseReleaseEvent(event)
def contextMenuEvent(self, event):
self.contextMenuEventSingal.emit(event)
def insertLink(self, url, cursor=None, addSpace=True):
if not cursor:
cursor = self.textCursor()
cursor = Qt.QTextCursor(cursor)
_cformat = cursor.charFormat()
font = _cformat.font()
_format = Qt.QTextCharFormat()
_format.setFont(font)
_format.setUnderlineStyle(1)
_format.setForeground(QtCore.Qt.blue)
_format.setAnchor(True)
_format.setAnchorHref(url)
cursor.insertText(url, _format)
if addSpace:
_format = Qt.QTextCharFormat()
_format.setFont(font)
cursor.insertText(" ", _format)
def insertText(self, s, cursor=None):
if not cursor:
cursor = self.textCursor()
cursor = Qt.QTextCursor(cursor)
_cformat = cursor.charFormat()
font = _cformat.font()
_format = Qt.QTextCharFormat()
_format.setFont(font)
cursor.insertText(s, _format)
def insertFromMimeData(self, mime):
if mime.hasText() and not mime.hasHtml():
global RE_URL
s = mime.text()
# replace links
s = html.escape(s, quote=False)
index = 0
c = 0
while c < 1000:
m = RE_URL.search(s, index)
if not m:
s2 = s[index:]
if c and s2.startswith(" "):
s2 = s2[1:]
self.insertText(s2)
break
pos = m.start()
s2 = s[index:pos]
if c and s2.startswith(" "):
s2 = s2[1:]
self.insertText(s2)
index2 = m.end()
self.insertLink(m.group(1))
c += 1
index = index2
return
return super(QTextEdit, self).insertFromMimeData(mime)
def on_bold(self):
if self.fontWeight() == QtGui.QFont.Bold:
self.setFontWeight(QtGui.QFont.Normal)
else:
self.setFontWeight(QtGui.QFont.Bold)
def on_italic(self):
self.setFontItalic(not self.fontItalic())
def on_underline(self):
self.setFontUnderline(not self.fontUnderline())
def on_strikeout(self):
# not implemented
font = self.currentFont()
font.setStrikeOut(not font.strikeOut())
self.setCurrentFont(font)
self.setFont(font)
def on_test(self):
pass
class RTDocument(Qt.QTextDocument):
def __init__(self, editor, docid, *args, **kwargs):
super(RTDocument, self).__init__(*args, **kwargs)
self.log = logging.getLogger("at.document." + docid)
self.editor = editor
self.docid = docid
def loadResourceRemote(self, url):
# TODO: show wait/progress dialog/info
try:
ret = requests.get(url)
if ret.status_code not in (200,):
return None
data = Qt.QByteArray(ret.content)
image = Qt.QPixmap()
image.loadFromData(data)
data.clear()
return image
except Exception as e:
self.log.error("Failed to retrive remote image: %s: %s", e.__class__.__name__, e)
def loadResourceMissing(self, _qurl):
image = getIconImage("noimage")
self.editor.doc.addResource(Qt.QTextDocument.ImageResource, _qurl, image)
return image
def loadResource(self, p_int, _qurl):
url = _qurl.toString()
if url.startswith('data:image/'):
return super(RTDocument, self).loadResource(p_int, _qurl)
self.editor.log.info("loadResource(): %s", url)
scheme = _qurl.scheme()
image = self.editor.project.doc.getImage(self.docid, url)
if image:
self.editor.doc.addResource(Qt.QTextDocument.ImageResource, _qurl, image)
return image
if scheme:
if scheme in ('http', 'https'):
self.editor.log.info("Trying retrive remote image: %s", url)
# remote image get it from network
image = self.loadResourceRemote(url)
if image:
self.editor.doc.addResource(Qt.QTextDocument.ImageResource, _qurl, image)
return image
if scheme == 'file':
try:
filename = Qt.QDir.toNativeSeparators(_qurl.toLocalFile())
self.editor.log.info("Trying retrive local image: %s", filename)
f = Qt.QFile(filename)
if not f.open(Qt.QFile.ReadOnly):
self.log.error("loadResource(): could not open file: %s", url)
return self.loadResourceMissing(_qurl)
data = f.readAll()
f.close()
del f
image = Qt.QPixmap()
image.loadFromData(data)
data.clear()
del data
if image:
self.editor.doc.addResource(Qt.QTextDocument.ImageResource, _qurl, image)
return image
except Exception as e:
self.log.error("Failed to load image: %s: %s", e.__class__.__name__, e)
res = super(RTDocument, self).loadResource(p_int, _qurl)
if res:
return res
return self.loadResourceMissing(_qurl)
| gpl-3.0 | -7,350,194,851,866,700,000 | 32.958217 | 114 | 0.585924 | false | 3.848169 | false | false | false |
irl/gajim | src/network_manager_listener.py | 1 | 3736 | # -*- coding: utf-8 -*-
## src/network_manager_listener.py
##
## Copyright (C) 2006 Jeffrey C. Ollie <jeff AT ocjtech.us>
## Nikos Kouremenos <kourem AT gmail.com>
## Stefan Bethge <stefan AT lanpartei.de>
## Copyright (C) 2006-2014 Yann Leboulanger <asterix AT lagaule.org>
##
## This file is part of Gajim.
##
## Gajim is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published
## by the Free Software Foundation; version 3 only.
##
## Gajim is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Gajim. If not, see <http://www.gnu.org/licenses/>.
##
import sys
from common import gajim
def device_now_active(self, *args):
"""
For Network Manager 0.6
"""
for connection in gajim.connections.values():
if gajim.config.get_per('accounts', connection.name,
'listen_to_network_manager') and connection.time_to_reconnect:
connection._reconnect()
def device_no_longer_active(self, *args):
"""
For Network Manager 0.6
"""
for connection in gajim.connections.values():
if gajim.config.get_per('accounts', connection.name,
'listen_to_network_manager') and connection.connected > 1:
connection._disconnectedReconnCB()
def state_changed(state):
"""
For Network Manager 0.7 - 0.9
"""
nm_state = props.Get("org.freedesktop.NetworkManager", "State")
if nm_state == 3 or nm_state == 70:
for connection in gajim.connections.values():
if gajim.config.get_per('accounts', connection.name,
'listen_to_network_manager') and connection.time_to_reconnect:
connection._reconnect()
else:
for connection in gajim.connections.values():
if gajim.config.get_per('accounts', connection.name,
'listen_to_network_manager') and connection.connected > 1:
connection._disconnectedReconnCB()
supported = False
from common import dbus_support
if dbus_support.supported:
import dbus
try:
from common.dbus_support import system_bus
bus = system_bus.bus()
if 'org.freedesktop.NetworkManager' in bus.list_names():
nm_object = bus.get_object('org.freedesktop.NetworkManager',
'/org/freedesktop/NetworkManager')
props = dbus.Interface(nm_object, "org.freedesktop.DBus.Properties")
bus.add_signal_receiver(state_changed,
'StateChanged',
'org.freedesktop.NetworkManager',
'org.freedesktop.NetworkManager',
'/org/freedesktop/NetworkManager')
supported = True
except dbus.DBusException:
try:
if 'org.freedesktop.NetworkManager' in bus.list_names():
supported = True
bus.add_signal_receiver(device_no_longer_active,
'DeviceNoLongerActive',
'org.freedesktop.NetworkManager',
'org.freedesktop.NetworkManager',
'/org/freedesktop/NetworkManager')
bus.add_signal_receiver(device_now_active,
'DeviceNowActive',
'org.freedesktop.NetworkManager',
'org.freedesktop.NetworkManager',
'/org/freedesktop/NetworkManager')
except Exception:
pass
| gpl-3.0 | -547,461,880,927,801,860 | 35.627451 | 80 | 0.615632 | false | 4.052061 | false | false | false |
LedgerHQ/blue-loader-python | ledgerblue/loadApp.py | 1 | 12915 | """
*******************************************************************************
* Ledger Blue
* (c) 2016 Ledger
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
********************************************************************************
"""
DEFAULT_ALIGNMENT = 1024
PAGE_ALIGNMENT = 64
import argparse
import os
NOCRC=False
if "NOCRC" in os.environ and len(os.environ["NOCRC"]) != 0:
NOCRC=os.environ["NOCRC"]
def get_argparser():
parser = argparse.ArgumentParser(description="Load an app onto the device from a hex file.")
parser.add_argument("--targetId", help="The device's target ID (default is Ledger Blue)", type=auto_int)
parser.add_argument("--targetVersion", help="Set the chip target version")
parser.add_argument("--fileName", help="The application hex file to be loaded onto the device")
parser.add_argument("--icon", help="The icon content to use (hex encoded)")
parser.add_argument("--curve", help="""A curve on which BIP 32 derivation is locked ("secp256k1", "prime256r1",
"ed25519" or "bls12381g1"), can be repeated""", action='append')
parser.add_argument("--path", help="""A BIP 32 path to which derivation is locked (format decimal a'/b'/c), can be
repeated""", action='append')
parser.add_argument("--path_slip21", help="""A SLIP 21 path to which derivation is locked""", action='append')
parser.add_argument("--appName", help="The name to give the application after loading it")
parser.add_argument("--signature", help="A signature of the application (hex encoded)")
parser.add_argument("--signApp", help="Sign application with provided signPrivateKey", action='store_true')
parser.add_argument("--appFlags", help="The application flags", type=auto_int)
parser.add_argument("--bootAddr", help="The application's boot address", type=auto_int)
parser.add_argument("--rootPrivateKey", help="""The Signer private key used to establish a Secure Channel (otherwise
a random one will be generated)""")
parser.add_argument("--signPrivateKey", help="Set the private key used to sign the loaded app")
parser.add_argument("--apdu", help="Display APDU log", action='store_true')
parser.add_argument("--deployLegacy", help="Use legacy deployment API", action='store_true')
parser.add_argument("--apilevel", help="Use given API level when interacting with the device", type=auto_int)
parser.add_argument("--delete", help="Delete the app with the same name before loading the provided one", action='store_true')
parser.add_argument("--params", help="Store icon and install parameters in a parameter section before the code", action='store_true')
parser.add_argument("--tlv", help="Use install parameters for all variable length parameters", action='store_true')
parser.add_argument("--dataSize", help="The code section's size in the provided hex file (to separate data from code, if not provided the whole allocated NVRAM section for the application will remain readonly.", type=auto_int)
parser.add_argument("--appVersion", help="The application version (as a string)")
parser.add_argument("--offline", help="Request to only output application load APDUs into given filename")
parser.add_argument("--offlineText", help="Request to only output application load APDUs into given filename in text mode", action='store_true')
parser.add_argument("--installparamsSize", help="The loaded install parameters section size (when parameters are already included within the .hex file.", type=auto_int)
parser.add_argument("--tlvraw", help="Add a custom install param with the hextag:hexvalue encoding", action='append')
parser.add_argument("--dep", help="Add a dependency over an appname[:appversion]", action='append')
parser.add_argument("--nocrc", help="Skip CRC generation when loading", action='store_true')
return parser
def auto_int(x):
return int(x, 0)
def parse_bip32_path(path, apilevel):
import struct
if len(path) == 0:
return b""
result = b""
elements = path.split('/')
if apilevel >= 5:
result = result + struct.pack('>B', len(elements))
for pathElement in elements:
element = pathElement.split('\'')
if len(element) == 1:
result = result + struct.pack(">I", int(element[0]))
else:
result = result + struct.pack(">I", 0x80000000 | int(element[0]))
return result
def parse_slip21_path(path):
import struct
result = struct.pack('>B', 0x80 | (len(path) + 1))
result = result + b'\x00' + string_to_bytes(path)
return result
def string_to_bytes(x):
import sys
if sys.version_info.major == 3:
return bytes(x, 'ascii')
else:
return bytes(x)
if __name__ == '__main__':
from .ecWrapper import PrivateKey
from .comm import getDongle
from .hexParser import IntelHexParser, IntelHexPrinter
from .hexLoader import HexLoader
from .hexLoader import *
from .deployed import getDeployedSecretV1, getDeployedSecretV2
import struct
import binascii
import sys
args = get_argparser().parse_args()
if args.apilevel == None:
args.apilevel = 10
if args.targetId == None:
args.targetId = 0x31000002
if args.fileName == None:
raise Exception("Missing fileName")
if args.appName == None:
raise Exception("Missing appName")
if args.path_slip21 != None and args.apilevel < 10:
raise Exception("SLIP 21 path not supported using this API level")
if args.appFlags == None:
args.appFlags = 0
if args.rootPrivateKey == None:
privateKey = PrivateKey()
publicKey = binascii.hexlify(privateKey.pubkey.serialize(compressed=False))
print("Generated random root public key : %s" % publicKey)
args.rootPrivateKey = privateKey.serialize()
args.appName = string_to_bytes(args.appName)
parser = IntelHexParser(args.fileName)
if args.bootAddr == None:
args.bootAddr = parser.getBootAddr()
path = b""
curveMask = 0xff
if args.curve != None:
curveMask = 0x00
for curve in args.curve:
if curve == 'secp256k1':
curveMask |= 0x01
elif curve == 'prime256r1':
curveMask |= 0x02
elif curve == 'ed25519':
curveMask |= 0x04
elif curve == 'bls12381g1':
curveMask |= 0x10
else:
raise Exception("Unknown curve " + curve)
if args.apilevel >= 5:
if (args.path_slip21 != None):
curveMask |= 0x08
path += struct.pack('>B',curveMask)
if args.path != None:
for item in args.path:
if len(item) != 0:
path += parse_bip32_path(item, args.apilevel)
if args.path_slip21 != None:
for item in args.path_slip21:
if len(item) != 0:
path += parse_slip21_path(item)
if (args.path == None) or ((len(args.path) == 1) and (len(args.path[0]) == 0)):
path += struct.pack('>B', 0) # Unrestricted, authorize all paths for regular derivation
else:
if args.curve != None:
print("Curve not supported using this API level, ignoring")
if args.path != None:
if len(args.path) > 1:
print("Multiple path levels not supported using this API level, ignoring")
else:
path = parse_bip32_path(args.path[0], args.apilevel)
if not args.icon is None:
args.icon = bytearray.fromhex(args.icon)
signature = None
if not args.signature is None:
signature = bytearray.fromhex(args.signature)
#prepend app's data with the icon content (could also add other various install parameters)
printer = IntelHexPrinter(parser)
# Use of Nested Encryption Key within the SCP protocol is mandartory for upgrades
cleardata_block_len=None
if args.appFlags & 2:
# Not true for scp < 3
# if signature is None:
# raise BaseException('Upgrades must be signed')
# ensure data can be decoded with code decryption key without troubles.
cleardata_block_len = 16
dongle = None
secret = None
if not args.offline:
dongle = getDongle(args.apdu)
if args.deployLegacy:
secret = getDeployedSecretV1(dongle, bytearray.fromhex(args.rootPrivateKey), args.targetId)
else:
secret = getDeployedSecretV2(dongle, bytearray.fromhex(args.rootPrivateKey), args.targetId)
else:
fileTarget = open(args.offline, "wb")
class FileCard():
def __init__(self, target):
self.target = target
def exchange(self, apdu):
if (args.apdu):
print(binascii.hexlify(apdu))
apdu = binascii.hexlify(apdu)
if sys.version_info.major == 2:
self.target.write(str(apdu) + '\n')
else:
self.target.write(apdu + '\n'.encode())
return bytearray([])
def apduMaxDataSize(self):
# ensure to allow for encryption of those apdu afterward
return 240
dongle = FileCard(fileTarget)
loader = HexLoader(dongle, 0xe0, not(args.offline), secret, cleardata_block_len=cleardata_block_len)
#tlv mode does not support explicit by name removal, would require a list app before to identify the hash to be removed
if (not (args.appFlags & 2)) and args.delete:
loader.deleteApp(args.appName)
if (args.tlv):
#if code length is not provided, then consider the whole provided hex file is the code and no data section is split
code_length = printer.maxAddr() - printer.minAddr()
if not args.dataSize is None:
code_length -= args.dataSize
else:
args.dataSize = 0
installparams = b""
# express dependency
if (args.dep):
for dep in args.dep:
appname = dep
appversion = None
# split if version is specified
if (dep.find(":") != -1):
(appname,appversion) = dep.split(":")
depvalue = encodelv(string_to_bytes(appname))
if(appversion):
depvalue += encodelv(string_to_bytes(appversion))
installparams += encodetlv(BOLOS_TAG_DEPENDENCY, depvalue)
#add raw install parameters as requested
if (args.tlvraw):
for tlvraw in args.tlvraw:
(hextag,hexvalue) = tlvraw.split(":")
installparams += encodetlv(int(hextag, 16), binascii.unhexlify(hexvalue))
if (not (args.appFlags & 2)) and ( args.installparamsSize is None or args.installparamsSize == 0 ):
#build install parameters
#mandatory app name
installparams += encodetlv(BOLOS_TAG_APPNAME, args.appName)
if not args.appVersion is None:
installparams += encodetlv(BOLOS_TAG_APPVERSION, string_to_bytes(args.appVersion))
if not args.icon is None:
installparams += encodetlv(BOLOS_TAG_ICON, bytes(args.icon))
if len(path) > 0:
installparams += encodetlv(BOLOS_TAG_DERIVEPATH, path)
# append install parameters to the loaded file
param_start = printer.maxAddr()+(PAGE_ALIGNMENT-(args.dataSize%PAGE_ALIGNMENT))%PAGE_ALIGNMENT
# only append install param section when not an upgrade as it has already been computed in the encrypted and signed chunk
printer.addArea(param_start, installparams)
paramsSize = len(installparams)
else:
paramsSize = args.installparamsSize
# split code and install params in the code
code_length -= args.installparamsSize
# create app
#ensure the boot address is an offset
if args.bootAddr > printer.minAddr():
args.bootAddr -= printer.minAddr()
loader.createApp(code_length, args.dataSize, paramsSize, args.appFlags, args.bootAddr|1)
elif (args.params):
paramsSectionContent = []
if not args.icon is None:
paramsSectionContent = args.icon
#take care of aligning the parameters sections to avoid possible invalid dereference of aligned words in the program nvram.
#also use the default MPU alignment
param_start = printer.minAddr()-len(paramsSectionContent)-(DEFAULT_ALIGNMENT-(len(paramsSectionContent)%DEFAULT_ALIGNMENT))
printer.addArea(param_start, paramsSectionContent)
# account for added regions (install parameters, icon ...)
appLength = printer.maxAddr() - printer.minAddr()
loader.createAppNoInstallParams(args.appFlags, appLength, args.appName, None, path, 0, len(paramsSectionContent), string_to_bytes(args.appVersion))
else:
# account for added regions (install parameters, icon ...)
appLength = printer.maxAddr() - printer.minAddr()
loader.createAppNoInstallParams(args.appFlags, appLength, args.appName, args.icon, path, None, None, string_to_bytes(args.appVersion))
hash = loader.load(0x0, 0xF0, printer, targetId=args.targetId, targetVersion=args.targetVersion, doCRC=not (args.nocrc or NOCRC))
print("Application full hash : " + hash)
if (signature == None and args.signApp):
masterPrivate = PrivateKey(bytes(bytearray.fromhex(args.signPrivateKey)))
signature = masterPrivate.ecdsa_serialize(masterPrivate.ecdsa_sign(bytes(binascii.unhexlify(hash)), raw=True))
print("Application signature: " + str(binascii.hexlify(signature)))
if (args.tlv):
loader.commit(signature)
else:
loader.run(args.bootAddr-printer.minAddr(), signature)
| apache-2.0 | -7,240,628,984,998,045,000 | 40.261981 | 227 | 0.710879 | false | 3.385321 | false | false | false |
DesertBot/DesertBot | desertbot/modules/utils/Sub.py | 1 | 6918 | """
Created on Feb 28, 2015
@author: StarlitGhost
"""
import re
from twisted.plugin import IPlugin
from twisted.words.protocols.irc import assembleFormattedText as colour, attributes as A
from zope.interface import implementer
from desertbot.message import IRCMessage
from desertbot.moduleinterface import IModule
from desertbot.modules.commandinterface import BotCommand
from desertbot.response import IRCResponse
from desertbot.utils import dictutils
class UnbalancedBracesException(Exception):
def __init__(self, message, column):
# Call the base exception constructor with the params it needs
super(UnbalancedBracesException, self).__init__(message)
# Store the message
self.message = message
# Store the column position of the unbalanced brace
self.column = column
class DictMergeError(Exception):
pass
@implementer(IPlugin, IModule)
class Sub(BotCommand):
def triggers(self):
return ['sub']
def help(self, query):
return [
"sub <text> - "
"executes nested commands in <text> and replaces the commands with their output",
"syntax: text {command params} more text {command {command params} {command params}}",
"example: .sub Some {rainbow magical} {flip topsy-turvy} text"]
def execute(self, message: IRCMessage):
subString = self._mangleEscapes(message.parameters)
try:
segments = list(self._parseSubcommandTree(subString))
except UnbalancedBracesException as e:
red = colour(A.bold[A.fg.lightRed['']])
normal = colour(A.normal[''])
error = (subString[:e.column]
+ red + subString[e.column]
+ normal + subString[e.column+1:])
error = self._unmangleEscapes(error, False)
return [
IRCResponse("Sub Error: {} (column {})".format(e.message, e.column), message.replyTo),
IRCResponse(error, message.replyTo)]
prevLevel = -1
responseStack = []
metadata = message.metadata
if 'tracking' in metadata:
metadata['tracking'].add('Sub')
else:
metadata['tracking'] = set('Sub')
for segment in segments:
(level, command, start, end) = segment
# grab the text replace var dict from the metadata, if present
if 'var' in metadata:
replaceVars = metadata['var']
else:
replaceVars = {}
# We've finished executing subcommands at the previous depth,
# so replace subcommands with their output at the current depth
if level < prevLevel:
command = self._substituteResponses(command, responseStack, level, replaceVars, start)
# Replace any replaceVars in the command
for var, value in replaceVars.items():
command = re.sub(r'\$\b{}\b'.format(re.escape(var)), '{}'.format(value), command)
# Build a new message out of this segment
inputMessage = IRCMessage(message.type, message.user, message.channel,
self.bot.commandChar + command.lstrip(),
self.bot,
metadata=metadata)
# Execute the constructed message
if inputMessage.command.lower() in self.bot.moduleHandler.mappedTriggers:
module = self.bot.moduleHandler.mappedTriggers[inputMessage.command.lower()]
response = module.execute(inputMessage)
"""@type : IRCResponse"""
else:
return IRCResponse("'{}' is not a recognized command trigger"
.format(inputMessage.command), message.replyTo)
# Push the response onto the stack
responseStack.append((level, response.response, start, end))
# merge response metadata back into our sub-global dict
metadata = dictutils.recursiveMerge(metadata, response.Metadata)
# update the replaceVars in case this is the outermost segment
# (and therefore we won't be looping again to pick them up)
if 'var' in metadata:
replaceVars = metadata['var']
prevLevel = level
responseString = self._substituteResponses(subString, responseStack, -1, replaceVars, -1)
responseString = self._unmangleEscapes(responseString)
return IRCResponse(responseString, message.replyTo, metadata=metadata)
@staticmethod
def _parseSubcommandTree(string):
"""Parse braced segments in string as tuples (level, contents, start index, end index)."""
stack = []
for i, c in enumerate(string):
if c == '{':
stack.append(i)
elif c == '}':
if stack:
start = stack.pop()
yield len(stack), string[start + 1: i], start, i
else:
raise UnbalancedBracesException("unbalanced closing brace", i)
if stack:
start = stack.pop()
raise UnbalancedBracesException("unbalanced opening brace", start)
@staticmethod
def _substituteResponses(command, responseStack, commandLevel, replaceVars, start):
# Pop responses off the stack and replace the subcommand that generated them
while len(responseStack) > 0:
level, responseString, rStart, rEnd = responseStack.pop()
if level <= commandLevel:
responseStack.append((level, responseString, rStart, rEnd))
break
cStart = rStart - start - 1
cEnd = rEnd - start
# Replace the subcommand with its output
command = command[:cStart] + responseString + command[cEnd:]
# Replace any replaceVars generated by functions
for var, value in replaceVars.items():
command = re.sub(r'\$\b{}\b'.format(re.escape(var)), '{}'.format(value), command)
return command
@staticmethod
def _mangleEscapes(string):
# Replace escaped left and right braces with something
# that should never show up in messages/responses
string = re.sub(r'(?<!\\)\\\{', '@LB@', string)
string = re.sub(r'(?<!\\)\\\}', '@RB@', string)
return string
@staticmethod
def _unmangleEscapes(string, unescape=True):
if unescape:
# Replace the mangled escaped braces with unescaped braces
string = string.replace('@LB@', '{')
string = string.replace('@RB@', '}')
else:
# Just unmangle them, ie, keep the escapes
string = string.replace('@LB@', '\\{')
string = string.replace('@RB@', '\\}')
return string
sub = Sub()
| mit | 5,800,723,203,344,052,000 | 38.531429 | 102 | 0.594102 | false | 4.557312 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.