repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
vicnet/weboob | modules/bred/bred/browser.py | 1 | 9230 | # -*- coding: utf-8 -*-
# Copyright(C) 2014 Romain Bignon
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import json
import time
import operator
from datetime import date
from weboob.capabilities.bank import Account
from weboob.browser import LoginBrowser, need_login, URL
from weboob.capabilities.base import find_object
from weboob.tools.capabilities.bank.transactions import sorted_transactions
from .pages import (
HomePage, LoginPage, UniversePage,
TokenPage, MoveUniversePage, SwitchPage,
LoansPage, AccountsPage, IbanPage, LifeInsurancesPage,
SearchPage, ProfilePage, EmailsPage, ErrorPage,
ErrorCodePage,
)
__all__ = ['BredBrowser']
class BredBrowser(LoginBrowser):
BASEURL = 'https://www.bred.fr'
home = URL('/$', HomePage)
login = URL('/transactionnel/Authentication', LoginPage)
error = URL('.*gestion-des-erreurs/erreur-pwd',
'.*gestion-des-erreurs/opposition',
'/pages-gestion-des-erreurs/erreur-technique',
'/pages-gestion-des-erreurs/message-tiers-oppose', ErrorPage)
universe = URL('/transactionnel/services/applications/menu/getMenuUnivers', UniversePage)
token = URL(r'/transactionnel/services/rest/User/nonce\?random=(?P<timestamp>.*)', TokenPage)
move_universe = URL('/transactionnel/services/applications/listes/(?P<key>.*)/default', MoveUniversePage)
switch = URL('/transactionnel/services/rest/User/switch', SwitchPage)
loans = URL('/transactionnel/services/applications/prets/liste', LoansPage)
accounts = URL('/transactionnel/services/rest/Account/accounts', AccountsPage)
iban = URL('/transactionnel/services/rest/Account/account/(?P<number>.*)/iban', IbanPage)
life_insurances = URL('/transactionnel/services/applications/avoirsPrepar/getAvoirs', LifeInsurancesPage)
search = URL('/transactionnel/services/applications/operations/getSearch/', SearchPage)
profile = URL('/transactionnel/services/rest/User/user', ProfilePage)
emails = URL('/transactionnel/services/applications/gestionEmail/getAdressesMails', EmailsPage)
error_code = URL('/.*\?errorCode=.*', ErrorCodePage)
def __init__(self, accnum, login, password, *args, **kwargs):
kwargs['username'] = login
# Bred only use first 8 char (even if the password is set to be bigger)
# The js login form remove after 8th char. No comment.
kwargs['password'] = password[:8]
super(BredBrowser, self).__init__(*args, **kwargs)
self.accnum = accnum
self.universes = None
self.current_univers = None
def do_login(self):
if 'hsess' not in self.session.cookies:
self.home.go() # set session token
assert 'hsess' in self.session.cookies, "Session token not correctly set"
# hard-coded authentication payload
data = dict(identifiant=self.username, password=self.password)
cookies = {k: v for k, v in self.session.cookies.items() if k in ('hsess', )}
self.session.cookies.update(cookies)
self.login.go(data=data)
@need_login
def get_universes(self):
"""Get universes (particulier, pro, etc)"""
self.get_and_update_bred_token()
self.universe.go(headers={'Accept': 'application/json'})
return self.page.get_universes()
def get_and_update_bred_token(self):
timestamp = int(time.time() * 1000)
x_token_bred = self.token.go(timestamp=timestamp).get_content()
self.session.headers.update({'X-Token-Bred': x_token_bred, }) # update headers for session
return {'X-Token-Bred': x_token_bred, }
def move_to_univers(self, univers):
if univers == self.current_univers:
return
self.move_universe.go(key=univers)
self.get_and_update_bred_token()
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json'
}
self.switch.go(
data=json.dumps({'all': 'false', 'univers': univers}),
headers=headers,
)
self.current_univers = univers
@need_login
def get_accounts_list(self):
accounts = []
for universe_key in self.get_universes():
self.move_to_univers(universe_key)
accounts.extend(self.get_list())
accounts.extend(self.get_life_insurance_list(accounts))
accounts.extend(self.get_loans_list())
# Life insurances are sometimes in multiple universes, we have to remove duplicates
unique_accounts = {account.id: account for account in accounts}
return sorted(unique_accounts.values(), key=operator.attrgetter('_univers'))
@need_login
def get_loans_list(self):
self.loans.go()
return self.page.iter_loans(current_univers=self.current_univers)
@need_login
def get_list(self):
self.accounts.go()
for acc in self.page.iter_accounts(accnum=self.accnum, current_univers=self.current_univers):
yield acc
@need_login
def get_life_insurance_list(self, accounts):
self.life_insurances.go()
for ins in self.page.iter_lifeinsurances(univers=self.current_univers):
ins.parent = find_object(accounts, _number=ins._parent_number, type=Account.TYPE_CHECKING)
yield ins
@need_login
def _make_api_call(self, account, start_date, end_date, offset, max_length=50):
HEADERS = {
'Accept': "application/json",
'Content-Type': 'application/json',
}
HEADERS.update(self.get_and_update_bred_token())
call_payload = {
"account": account._number,
"poste": account._nature,
"sousPoste": account._codeSousPoste or '00',
"devise": account.currency,
"fromDate": start_date.strftime('%Y-%m-%d'),
"toDate": end_date.strftime('%Y-%m-%d'),
"from": offset,
"size": max_length, # max length of transactions
"search": "",
"categorie": "",
}
self.search.go(data=json.dumps(call_payload), headers=HEADERS)
return self.page.get_transaction_list()
@need_login
def get_history(self, account, coming=False):
if account.type in (Account.TYPE_LOAN, Account.TYPE_LIFE_INSURANCE) or not account._consultable:
raise NotImplementedError()
if account._univers != self.current_univers:
self.move_to_univers(account._univers)
today = date.today()
seen = set()
offset = 0
next_page = True
while next_page:
operation_list = self._make_api_call(
account=account,
start_date=date(day=1, month=1, year=2000), end_date=date.today(),
offset=offset, max_length=50,
)
transactions = self.page.iter_history(account=account, operation_list=operation_list, seen=seen, today=today, coming=coming)
# Transactions are unsorted
for t in sorted_transactions(transactions):
if coming == t._coming:
yield t
elif coming and not t._coming:
# coming transactions are at the top of history
self.logger.debug('stopping coming after %s', t)
return
next_page = len(transactions) == 50
offset += 50
# This assert supposedly prevents infinite loops,
# but some customers actually have a lot of transactions.
assert offset < 100000, 'the site may be doing an infinite loop'
@need_login
def get_investment(self, account):
if account.type != Account.TYPE_LIFE_INSURANCE:
raise NotImplementedError()
for invest in account._investments:
yield invest
@need_login
def get_profile(self):
self.get_universes()
self.profile.go()
profile = self.page.get_profile()
self.emails.go()
self.page.set_email(profile=profile)
return profile
@need_login
def fill_account(self, account, fields):
if account.type == Account.TYPE_CHECKING and 'iban' in fields:
self.iban.go(number=account._number)
self.page.set_iban(account=account)
| lgpl-3.0 | 5,252,652,381,449,629,000 | 38.784483 | 136 | 0.626869 | false |
kawamon/hue | desktop/core/ext-py/Django-1.11.29/tests/template_tests/syntax_tests/test_if_equal.py | 23 | 9893 | from django.test import SimpleTestCase
from ..utils import setup
class IfEqualTagTests(SimpleTestCase):
@setup({'ifequal01': '{% ifequal a b %}yes{% endifequal %}'})
def test_ifequal01(self):
output = self.engine.render_to_string('ifequal01', {'a': 1, 'b': 2})
self.assertEqual(output, '')
@setup({'ifequal02': '{% ifequal a b %}yes{% endifequal %}'})
def test_ifequal02(self):
output = self.engine.render_to_string('ifequal02', {'a': 1, 'b': 1})
self.assertEqual(output, 'yes')
@setup({'ifequal03': '{% ifequal a b %}yes{% else %}no{% endifequal %}'})
def test_ifequal03(self):
output = self.engine.render_to_string('ifequal03', {'a': 1, 'b': 2})
self.assertEqual(output, 'no')
@setup({'ifequal04': '{% ifequal a b %}yes{% else %}no{% endifequal %}'})
def test_ifequal04(self):
output = self.engine.render_to_string('ifequal04', {'a': 1, 'b': 1})
self.assertEqual(output, 'yes')
@setup({'ifequal05': '{% ifequal a \'test\' %}yes{% else %}no{% endifequal %}'})
def test_ifequal05(self):
output = self.engine.render_to_string('ifequal05', {'a': 'test'})
self.assertEqual(output, 'yes')
@setup({'ifequal06': '{% ifequal a \'test\' %}yes{% else %}no{% endifequal %}'})
def test_ifequal06(self):
output = self.engine.render_to_string('ifequal06', {'a': 'no'})
self.assertEqual(output, 'no')
@setup({'ifequal07': '{% ifequal a "test" %}yes{% else %}no{% endifequal %}'})
def test_ifequal07(self):
output = self.engine.render_to_string('ifequal07', {'a': 'test'})
self.assertEqual(output, 'yes')
@setup({'ifequal08': '{% ifequal a "test" %}yes{% else %}no{% endifequal %}'})
def test_ifequal08(self):
output = self.engine.render_to_string('ifequal08', {'a': 'no'})
self.assertEqual(output, 'no')
@setup({'ifequal09': '{% ifequal a "test" %}yes{% else %}no{% endifequal %}'})
def test_ifequal09(self):
output = self.engine.render_to_string('ifequal09')
self.assertEqual(output, 'no')
@setup({'ifequal10': '{% ifequal a b %}yes{% else %}no{% endifequal %}'})
def test_ifequal10(self):
output = self.engine.render_to_string('ifequal10')
self.assertEqual(output, 'yes')
# SMART SPLITTING
@setup({'ifequal-split01': '{% ifequal a "test man" %}yes{% else %}no{% endifequal %}'})
def test_ifequal_split01(self):
output = self.engine.render_to_string('ifequal-split01')
self.assertEqual(output, 'no')
@setup({'ifequal-split02': '{% ifequal a "test man" %}yes{% else %}no{% endifequal %}'})
def test_ifequal_split02(self):
output = self.engine.render_to_string('ifequal-split02', {'a': 'foo'})
self.assertEqual(output, 'no')
@setup({'ifequal-split03': '{% ifequal a "test man" %}yes{% else %}no{% endifequal %}'})
def test_ifequal_split03(self):
output = self.engine.render_to_string('ifequal-split03', {'a': 'test man'})
self.assertEqual(output, 'yes')
@setup({'ifequal-split04': '{% ifequal a \'test man\' %}yes{% else %}no{% endifequal %}'})
def test_ifequal_split04(self):
output = self.engine.render_to_string('ifequal-split04', {'a': 'test man'})
self.assertEqual(output, 'yes')
@setup({'ifequal-split05': '{% ifequal a \'i "love" you\' %}yes{% else %}no{% endifequal %}'})
def test_ifequal_split05(self):
output = self.engine.render_to_string('ifequal-split05', {'a': ''})
self.assertEqual(output, 'no')
@setup({'ifequal-split06': '{% ifequal a \'i "love" you\' %}yes{% else %}no{% endifequal %}'})
def test_ifequal_split06(self):
output = self.engine.render_to_string('ifequal-split06', {'a': 'i "love" you'})
self.assertEqual(output, 'yes')
@setup({'ifequal-split07': '{% ifequal a \'i "love" you\' %}yes{% else %}no{% endifequal %}'})
def test_ifequal_split07(self):
output = self.engine.render_to_string('ifequal-split07', {'a': 'i love you'})
self.assertEqual(output, 'no')
@setup({'ifequal-split08': r"{% ifequal a 'I\'m happy' %}yes{% else %}no{% endifequal %}"})
def test_ifequal_split08(self):
output = self.engine.render_to_string('ifequal-split08', {'a': "I'm happy"})
self.assertEqual(output, 'yes')
@setup({'ifequal-split09': r"{% ifequal a 'slash\man' %}yes{% else %}no{% endifequal %}"})
def test_ifequal_split09(self):
output = self.engine.render_to_string('ifequal-split09', {'a': r'slash\man'})
self.assertEqual(output, 'yes')
@setup({'ifequal-split10': r"{% ifequal a 'slash\man' %}yes{% else %}no{% endifequal %}"})
def test_ifequal_split10(self):
output = self.engine.render_to_string('ifequal-split10', {'a': 'slashman'})
self.assertEqual(output, 'no')
# NUMERIC RESOLUTION
@setup({'ifequal-numeric01': '{% ifequal x 5 %}yes{% endifequal %}'})
def test_ifequal_numeric01(self):
output = self.engine.render_to_string('ifequal-numeric01', {'x': '5'})
self.assertEqual(output, '')
@setup({'ifequal-numeric02': '{% ifequal x 5 %}yes{% endifequal %}'})
def test_ifequal_numeric02(self):
output = self.engine.render_to_string('ifequal-numeric02', {'x': 5})
self.assertEqual(output, 'yes')
@setup({'ifequal-numeric03': '{% ifequal x 5.2 %}yes{% endifequal %}'})
def test_ifequal_numeric03(self):
output = self.engine.render_to_string('ifequal-numeric03', {'x': 5})
self.assertEqual(output, '')
@setup({'ifequal-numeric04': '{% ifequal x 5.2 %}yes{% endifequal %}'})
def test_ifequal_numeric04(self):
output = self.engine.render_to_string('ifequal-numeric04', {'x': 5.2})
self.assertEqual(output, 'yes')
@setup({'ifequal-numeric05': '{% ifequal x 0.2 %}yes{% endifequal %}'})
def test_ifequal_numeric05(self):
output = self.engine.render_to_string('ifequal-numeric05', {'x': 0.2})
self.assertEqual(output, 'yes')
@setup({'ifequal-numeric06': '{% ifequal x .2 %}yes{% endifequal %}'})
def test_ifequal_numeric06(self):
output = self.engine.render_to_string('ifequal-numeric06', {'x': 0.2})
self.assertEqual(output, 'yes')
@setup({'ifequal-numeric07': '{% ifequal x 2. %}yes{% endifequal %}'})
def test_ifequal_numeric07(self):
output = self.engine.render_to_string('ifequal-numeric07', {'x': 2})
self.assertEqual(output, '')
@setup({'ifequal-numeric08': '{% ifequal x "5" %}yes{% endifequal %}'})
def test_ifequal_numeric08(self):
output = self.engine.render_to_string('ifequal-numeric08', {'x': 5})
self.assertEqual(output, '')
@setup({'ifequal-numeric09': '{% ifequal x "5" %}yes{% endifequal %}'})
def test_ifequal_numeric09(self):
output = self.engine.render_to_string('ifequal-numeric09', {'x': '5'})
self.assertEqual(output, 'yes')
@setup({'ifequal-numeric10': '{% ifequal x -5 %}yes{% endifequal %}'})
def test_ifequal_numeric10(self):
output = self.engine.render_to_string('ifequal-numeric10', {'x': -5})
self.assertEqual(output, 'yes')
@setup({'ifequal-numeric11': '{% ifequal x -5.2 %}yes{% endifequal %}'})
def test_ifequal_numeric11(self):
output = self.engine.render_to_string('ifequal-numeric11', {'x': -5.2})
self.assertEqual(output, 'yes')
@setup({'ifequal-numeric12': '{% ifequal x +5 %}yes{% endifequal %}'})
def test_ifequal_numeric12(self):
output = self.engine.render_to_string('ifequal-numeric12', {'x': 5})
self.assertEqual(output, 'yes')
# FILTER EXPRESSIONS AS ARGUMENTS
@setup({'ifequal-filter01': '{% ifequal a|upper "A" %}x{% endifequal %}'})
def test_ifequal_filter01(self):
output = self.engine.render_to_string('ifequal-filter01', {'a': 'a'})
self.assertEqual(output, 'x')
@setup({'ifequal-filter02': '{% ifequal "A" a|upper %}x{% endifequal %}'})
def test_ifequal_filter02(self):
output = self.engine.render_to_string('ifequal-filter02', {'a': 'a'})
self.assertEqual(output, 'x')
@setup({'ifequal-filter03': '{% ifequal a|upper b|upper %}x{% endifequal %}'})
def test_ifequal_filter03(self):
output = self.engine.render_to_string('ifequal-filter03', {'a': 'x', 'b': 'X'})
self.assertEqual(output, 'x')
@setup({'ifequal-filter04': '{% ifequal x|slice:"1" "a" %}x{% endifequal %}'})
def test_ifequal_filter04(self):
output = self.engine.render_to_string('ifequal-filter04', {'x': 'aaa'})
self.assertEqual(output, 'x')
@setup({'ifequal-filter05': '{% ifequal x|slice:"1"|upper "A" %}x{% endifequal %}'})
def test_ifequal_filter05(self):
output = self.engine.render_to_string('ifequal-filter05', {'x': 'aaa'})
self.assertEqual(output, 'x')
class IfNotEqualTagTests(SimpleTestCase):
@setup({'ifnotequal01': '{% ifnotequal a b %}yes{% endifnotequal %}'})
def test_ifnotequal01(self):
output = self.engine.render_to_string('ifnotequal01', {'a': 1, 'b': 2})
self.assertEqual(output, 'yes')
@setup({'ifnotequal02': '{% ifnotequal a b %}yes{% endifnotequal %}'})
def test_ifnotequal02(self):
output = self.engine.render_to_string('ifnotequal02', {'a': 1, 'b': 1})
self.assertEqual(output, '')
@setup({'ifnotequal03': '{% ifnotequal a b %}yes{% else %}no{% endifnotequal %}'})
def test_ifnotequal03(self):
output = self.engine.render_to_string('ifnotequal03', {'a': 1, 'b': 2})
self.assertEqual(output, 'yes')
@setup({'ifnotequal04': '{% ifnotequal a b %}yes{% else %}no{% endifnotequal %}'})
def test_ifnotequal04(self):
output = self.engine.render_to_string('ifnotequal04', {'a': 1, 'b': 1})
self.assertEqual(output, 'no')
| apache-2.0 | 2,469,291,981,971,232,300 | 44.589862 | 98 | 0.603356 | false |
zakandrewking/cobrapy | cobra/test/data/update_pickles.py | 1 | 4077 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from collections import OrderedDict
from json import dump as json_dump
import cobra
from cobra.io import (
load_matlab_model, read_sbml_model, save_json_model, save_matlab_model,
write_sbml_model)
from cobra.io.sbml3 import write_sbml2
# This script regenerates pickles of cobra Models. Should be
# performed after updating core classes to prevent subtle bugs.
try:
from cPickle import load, dump
except:
from pickle import load, dump
# ecoli
ecoli_model = read_sbml_model("iJO1366.xml")
with open("iJO1366.pickle", "wb") as outfile:
dump(ecoli_model, outfile, protocol=2)
# salmonella
salmonella = read_sbml_model("salmonella.xml")
with open("salmonella.genes", "rb") as infile:
gene_names = load(infile)
for gene in salmonella.genes:
gene.name = gene_names[gene.id]
with open("salmonella.media", "rb") as infile:
salmonella.media_compositions = load(infile)
with open("salmonella.pickle", "wb") as outfile:
dump(salmonella, outfile, protocol=2)
# create mini model from textbook
textbook = read_sbml_model("textbook.xml.gz")
mini = cobra.Model("mini_textbook")
mini.compartments = textbook.compartments
for r in textbook.reactions:
if r.id in ("GLCpts", "PGI", "PFK", "FBA", "TPI", "GAPD", "PGK", "PGM",
"ENO", "PYK", "EX_glc__D_e", "EX_h_e", "H2Ot", "ATPM",
"PIt2r"):
mini.add_reaction(r.copy())
mini.reactions.ATPM.upper_bound = mini.reactions.PGI.upper_bound
mini.objective = ["PFK", "ATPM"] # No biomass, 2 reactions
# add in some information from iJO1366
mini.add_reaction(ecoli_model.reactions.LDH_D.copy())
mini.add_reaction(ecoli_model.reactions.EX_lac__D_e.copy())
r = cobra.Reaction("D_LACt2")
mini.add_reaction(r)
r.gene_reaction_rule = ecoli_model.reactions.D__LACt2pp.gene_reaction_rule
r.reaction = ecoli_model.reactions.D__LACt2pp.reaction.replace("_p", "_e")
mini.reactions.GLCpts.gene_reaction_rule = \
ecoli_model.reactions.GLCptspp.gene_reaction_rule
# adjust bounds
for i in ["ATPM", "D_LACt2", "EX_lac__D_e", "LDH_D"]:
mini.reactions.get_by_id(i).upper_bound = mini.reactions.PGI.upper_bound
for i in ["D_LACt2", "LDH_D"]:
mini.reactions.get_by_id(i).lower_bound = mini.reactions.PGI.lower_bound
# set names and annotation
for g in mini.genes:
try:
tg = textbook.genes.get_by_id(g.id)
except KeyError:
continue
g.name = tg.name
g.annotation = tg.annotation
mini.reactions.sort()
mini.genes.sort()
mini.metabolites.sort()
# output to various formats
with open("mini.pickle", "wb") as outfile:
dump(mini, outfile, protocol=2)
save_matlab_model(mini, "mini.mat")
save_json_model(mini, "mini.json", pretty=True)
write_sbml_model(mini, "mini_fbc2.xml")
write_sbml_model(mini, "mini_fbc2.xml.bz2")
write_sbml_model(mini, "mini_fbc2.xml.gz")
write_sbml2(mini, "mini_fbc1.xml", use_fbc_package=True)
write_sbml_model(mini, "mini_cobra.xml", use_fbc_package=False)
raven = load_matlab_model("raven.mat")
with open("raven.pickle", "wb") as outfile:
dump(raven, outfile, protocol=2)
# TODO:these need a reference solutions rather than circular solution checking!
# fva results
fva_result = cobra.flux_analysis.flux_variability_analysis(textbook)
clean_result = OrderedDict()
for key in sorted(fva_result):
clean_result[key] = {k: round(v, 5) for k, v in fva_result[key].items()}
with open("textbook_fva.json", "w") as outfile:
json_dump(clean_result, outfile)
# fva with pfba constraint
fva_result = cobra.flux_analysis.flux_variability_analysis(textbook,
pfba_factor=1.1)
clean_result = OrderedDict()
for key in sorted(fva_result):
clean_result[key] = {k: round(v, 5) for k, v in fva_result[key].items()}
with open("textbook_pfba_fva.json", "w") as outfile:
json_dump(clean_result, outfile)
# textbook solution
solution = cobra.flux_analysis.parsimonious.pfba(textbook)
with open('textbook_solution.pickle', 'wb') as f:
dump(solution, f, protocol=2)
| lgpl-2.1 | 1,391,955,940,159,979,000 | 34.452174 | 79 | 0.696591 | false |
antoyo/qutebrowser | qutebrowser/misc/split.py | 8 | 6637 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2016 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Our own fork of shlex.split with some added and removed features."""
import re
from qutebrowser.utils import log
class ShellLexer:
"""A lexical analyzer class for simple shell-like syntaxes.
Based on Python's shlex, but cleaned up, removed some features, and added
some features useful for qutebrowser.
Attributes:
FIXME
"""
def __init__(self, s):
self.string = s
self.whitespace = ' \t\r'
self.quotes = '\'"'
self.escape = '\\'
self.escapedquotes = '"'
self.keep = False
self.quoted = None
self.escapedstate = None
self.token = None
self.state = None
self.reset()
def reset(self):
"""Reset the state machine state to the defaults."""
self.quoted = False
self.escapedstate = ' '
self.token = ''
self.state = ' '
def __iter__(self): # pragma: no mccabe
"""Read a raw token from the input stream."""
# pylint: disable=too-many-branches,too-many-statements
self.reset()
for nextchar in self.string:
if self.state == ' ':
if self.keep:
self.token += nextchar
if nextchar in self.whitespace:
if self.token or self.quoted:
yield self.token
self.reset()
elif nextchar in self.escape:
self.escapedstate = 'a'
self.state = nextchar
elif nextchar in self.quotes:
self.state = nextchar
else:
self.token = nextchar
self.state = 'a'
elif self.state in self.quotes:
self.quoted = True
if nextchar == self.state:
if self.keep:
self.token += nextchar
self.state = 'a'
elif (nextchar in self.escape and
self.state in self.escapedquotes):
if self.keep:
self.token += nextchar
self.escapedstate = self.state
self.state = nextchar
else:
self.token += nextchar
elif self.state in self.escape:
# In posix shells, only the quote itself or the escape
# character may be escaped within quotes.
if (self.escapedstate in self.quotes and
nextchar != self.state and
nextchar != self.escapedstate and not self.keep):
self.token += self.state
self.token += nextchar
self.state = self.escapedstate
elif self.state == 'a':
if nextchar in self.whitespace:
self.state = ' '
assert self.token or self.quoted
yield self.token
self.reset()
if self.keep:
yield nextchar
elif nextchar in self.quotes:
if self.keep:
self.token += nextchar
self.state = nextchar
elif nextchar in self.escape:
if self.keep:
self.token += nextchar
self.escapedstate = 'a'
self.state = nextchar
else:
self.token += nextchar
else:
raise AssertionError("Invalid state {!r}!".format(self.state))
if self.state in self.escape and not self.keep:
self.token += self.state
if self.token or self.quoted:
yield self.token
def split(s, keep=False):
"""Split a string via ShellLexer.
Args:
keep: Whether to keep special chars in the split output.
"""
lexer = ShellLexer(s)
lexer.keep = keep
tokens = list(lexer)
if not tokens:
return []
out = []
spaces = ""
log.shlexer.vdebug("{!r} -> {!r}".format(s, tokens))
for t in tokens:
if t.isspace():
spaces += t
else:
out.append(spaces + t)
spaces = ""
if spaces:
out.append(spaces)
return out
def _combine_ws(parts, whitespace):
"""Combine whitespace in a list with the element following it.
Args:
parts: A list of strings.
whitespace: A string containing what's considered whitespace.
Return:
The modified list.
"""
out = []
ws = ''
for part in parts:
if not part:
continue
elif part in whitespace:
ws += part
else:
out.append(ws + part)
ws = ''
if ws:
out.append(ws)
return out
def simple_split(s, keep=False, maxsplit=None):
"""Split a string on whitespace, optionally keeping the whitespace.
Args:
s: The string to split.
keep: Whether to keep whitespace.
maxsplit: The maximum count of splits.
Return:
A list of split strings.
"""
whitespace = '\n\t '
if maxsplit == 0:
# re.split with maxsplit=0 splits everything, while str.split splits
# nothing (which is the behavior we want).
if keep:
return [s]
else:
return [s.strip(whitespace)]
elif maxsplit is None:
maxsplit = 0
if keep:
pattern = '([' + whitespace + '])'
parts = re.split(pattern, s, maxsplit)
return _combine_ws(parts, whitespace)
else:
pattern = '[' + whitespace + ']'
parts = re.split(pattern, s, maxsplit)
parts[-1] = parts[-1].rstrip()
return [p for p in parts if p]
| gpl-3.0 | -454,895,341,120,122,600 | 30.454976 | 78 | 0.530661 | false |
plowman/python-mcparseface | models/syntaxnet/tensorflow/tensorflow/contrib/ffmpeg/decode_audio_op_test.py | 6 | 2865 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for third_party.tensorflow.contrib.ffmpeg.decode_audio_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import tensorflow as tf
from tensorflow.contrib import ffmpeg
from tensorflow.python.platform import resource_loader
class DecodeAudioOpTest(tf.test.TestCase):
def _loadFileAndTest(self, filename, file_format, duration_sec,
samples_per_second, channel_count):
"""Loads an audio file and validates the output tensor.
Args:
filename: The filename of the input file.
file_format: The format of the input file.
duration_sec: The duration of the audio contained in the file in seconds.
samples_per_second: The desired sample rate in the output tensor.
channel_count: The desired channel count in the output tensor.
"""
with self.test_session():
path = os.path.join(
resource_loader.get_data_files_path(), 'testdata', filename)
with open(path, 'rb') as f:
contents = f.read()
audio_op = ffmpeg.decode_audio(
contents, file_format=file_format,
samples_per_second=samples_per_second, channel_count=channel_count)
audio = audio_op.eval()
self.assertEqual(len(audio.shape), 2)
self.assertNear(duration_sec * samples_per_second,
audio.shape[0],
# Duration should be specified within 10%:
0.1 * audio.shape[0])
self.assertEqual(audio.shape[1], channel_count)
def testMonoMp3(self):
self._loadFileAndTest('mono_16khz.mp3', 'mp3', 0.57, 20000, 1)
self._loadFileAndTest('mono_16khz.mp3', 'mp3', 0.57, 20000, 2)
def testStereoMp3(self):
self._loadFileAndTest('stereo_48khz.mp3', 'mp3', 0.79, 50000, 1)
self._loadFileAndTest('stereo_48khz.mp3', 'mp3', 0.79, 20000, 2)
def testMonoWav(self):
self._loadFileAndTest('mono_10khz.wav', 'wav', 0.57, 5000, 1)
self._loadFileAndTest('mono_10khz.wav', 'wav', 0.57, 10000, 4)
def testOgg(self):
self._loadFileAndTest('mono_10khz.ogg', 'ogg', 0.57, 10000, 1)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 | -4,921,773,979,012,477,000 | 36.207792 | 79 | 0.658988 | false |
caxiam/sqlalchemy-jsonapi-collections | jsonapiquery/errors.py | 1 | 1697 | import functools
import json
class CollectErrors:
def __init__(self, exc_type):
self.errors = []
self.exc_type = exc_type
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type == self.exc_type:
self.errors.append(exc_value)
return True
class JSONAPIQueryError(Exception):
namespace = 120000
def __init__(self, detail, item, code, meta=None):
self.detail = detail
self.item = item
self.code = code
self.meta = meta or {}
def __iter__(self):
yield from self.message.items()
def __repr__(self):
return json.dumps(self.message)
@property
def message(self):
return {
'code': self.namespace + self.code,
'detail': self.detail,
'source': {'parameter': self.source},
}
@property
def source(self):
source = self.item.source
while True:
if isinstance(source, str):
break
source = source.source
return source
InvalidPath = functools.partial(JSONAPIQueryError, code=1)
InvalidFieldType = functools.partial(JSONAPIQueryError, code=2)
InvalidValue = functools.partial(JSONAPIQueryError, code=3)
InvalidQuery = functools.partial(
JSONAPIQueryError, detail='Invalid query specified.', code=4)
InvalidPaginationValue = InvalidQuery = functools.partial(
JSONAPIQueryError, detail='Pagination values must be integers.', code=5)
def make_error_response(errors: list) -> dict:
"""Return a JSONAPI compliant error response."""
return {'errors': [dict(error) for error in errors]}
| apache-2.0 | 1,136,566,707,540,158,800 | 25.515625 | 76 | 0.619328 | false |
domluna/deep-rl-gym-tutorials | q_learning/utils.py | 1 | 1954 | from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import tensorflow as tf
import numpy as np
import os
import warnings
from six.moves import range
from skimage.color import rgb2gray
from skimage.transform import resize
from skimage import img_as_ubyte
def preprocess(observation, new_height, new_width):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
return img_as_ubyte(resize(rgb2gray(observation), (new_height, new_width)))
def load_checkpoint(saver, dir, sess):
ckpt = tf.train.get_checkpoint_state(dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
print("Restored from checkpoint {}".format(ckpt.model_checkpoint_path))
else:
print("No checkpoint")
def save_checkpoint(saver, dir, sess, step=None):
if not os.path.exists(dir):
os.makedirs(dir)
save_path = saver.save(sess, dir + '/graph', step)
print("Models saved in file: {} ...".format(save_path))
def noop_start(env, replay, buf, max_actions=30):
"""
SHOULD BE RUN AT THE START OF AN EPISODE
"""
obs = env.reset()
for _ in range(np.random.randint(replay.history_window, max_actions)):
next_obs, reward, terminal, _ = env.step(0) # 0 is a noop action in Atari envs
replay.add((obs, 0, reward, terminal))
buf.add(obs)
obs = next_obs
return obs
def random_start(env, replay, n):
"""Sample and add `n` random actions to the Experience Replay.
If a terminal state is reached, the environmment will reset
and sampling with continue.
"""
obs = env.reset()
for _ in range(n):
action = env.action_space.sample()
next_obs, reward, terminal, _ = env.step(action)
replay.add((obs, action, reward, terminal))
if terminal:
obs = env.reset()
else:
obs = next_obs
| mit | -1,835,463,773,438,896,000 | 31.566667 | 86 | 0.657114 | false |
DirtyUnicorns/android_external_chromium_org | tools/cr/cr/plugin.py | 103 | 9675 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""The plugin management system for the cr tool.
This holds the Plugin class and supporting code, that controls how plugins are
found and used.
The module registers a scan hook with the cr.loader system to enable it to
discover plugins as they are loaded.
"""
from operator import attrgetter
import cr
import cr.loader
def _PluginConfig(name, only_enabled=False, only_active=False):
config = cr.Config(name)
config.only_active = only_active
config.only_enabled = only_enabled or config.only_active
config.property_name = name.lower() + '_config'
return config
_selectors = cr.Config('PRIORITY')
CONFIG_TYPES = [
# Lowest priority, always there default values.
_PluginConfig('DEFAULT').AddChild(_selectors),
# Only turned on if the plugin is enabled.
_PluginConfig('ENABLED', only_enabled=True),
# Only turned on while the plugin is the active one.
_PluginConfig('ACTIVE', only_active=True),
# Holds detected values for active plugins.
_PluginConfig('DETECTED', only_active=True),
# Holds overrides, used in custom setup plugins.
_PluginConfig('OVERRIDES'),
]
cr.config.GLOBALS.extend(CONFIG_TYPES)
_plugins = {}
# Actually a decorator, so pylint: disable=invalid-name
class classproperty(object):
"""This adds a property to a class.
This is like a simple form of @property except it is for the class, rather
than instances of the class. Only supports readonly properties.
"""
def __init__(self, getter):
self.getter = getter
def __get__(self, instance, owner):
return self.getter(owner)
class DynamicChoices(object):
"""Manages the list of active plugins for command line options.
Looks like a simple iterable, but it can change as the underlying plugins
arrive and enable/disable themselves. This allows it to be used as the
set of valid choices for the argparse command line options.
"""
# If this is True, all DynamicChoices only return active plugins.
# If false, all plugins are included.
only_active = True
def __init__(self, cls):
self.cls = cls
def __contains__(self, name):
return self.cls.FindPlugin(name, self.only_active) is not None
def __iter__(self):
return [p.name for p in self.cls.Plugins()].__iter__()
def _FindRoot(cls):
if Plugin.Type in cls.__bases__:
return cls
for base in cls.__bases__:
result = _FindRoot(base)
if result is not None:
return result
return None
class Plugin(cr.loader.AutoExport):
"""Base class for managing registered plugin types."""
class Type(object):
"""Base class that tags a class as an abstract plugin type."""
class activemethod(object):
"""A decorator that delegates a static method to the active plugin.
Makes a static method that delegates to the equivalent method on the
active instance of the plugin type.
"""
def __init__(self, method):
self.method = method
def __get__(self, instance, owner):
def unbound(*args, **kwargs):
active = owner.GetActivePlugin()
if not active:
print 'No active', owner.__name__
exit(1)
method = getattr(active, self.method.__name__, None)
if not method:
print owner.__name__, 'does not support', self.method.__name__
exit(1)
return method(*args, **kwargs)
def bound(*args, **kwargs):
return self.method(instance, *args, **kwargs)
if instance is None:
return unbound
return bound
def __init__(self):
# Default the name to the lowercased class name.
self._name = self.__class__.__name__.lower()
# Strip the common suffix if present.
self._root = _FindRoot(self.__class__)
rootname = self._root.__name__.lower()
if self._name.endswith(rootname) and self.__class__ != self._root:
self._name = self._name[:-len(rootname)]
for config_root in CONFIG_TYPES:
config = cr.Config()
setattr(self, config_root.property_name, config)
self._is_active = False
def Init(self):
"""Post plugin registration initialisation method."""
for config_root in CONFIG_TYPES:
config = getattr(self, config_root.property_name)
config.name = self.name
if config_root.only_active and not self.is_active:
config.enabled = False
if config_root.only_enabled and not self.enabled:
config.enabled = False
child = getattr(self.__class__, config_root.name, None)
if child is not None:
child.name = self.__class__.__name__
config.AddChild(child)
config_root.AddChild(config)
@property
def name(self):
return self._name
@property
def priority(self):
return 0
@property
def enabled(self):
# By default all non type classes are enabled.
return Plugin.Type not in self.__class__.__bases__
@property
def is_active(self):
return self._is_active
def Activate(self):
assert not self._is_active
self._is_active = True
for config_root in CONFIG_TYPES:
if config_root.only_active:
getattr(self, config_root.property_name).enabled = True
def Deactivate(self):
assert self._is_active
self._is_active = False
for config_root in CONFIG_TYPES:
if config_root.only_active:
getattr(self, config_root.property_name).enabled = False
@classmethod
def ClassInit(cls):
pass
@classmethod
def GetInstance(cls):
"""Gets an instance of this plugin.
This looks in the plugin registry, and if an instance is not found a new
one is built and registered.
Returns:
The registered plugin instance.
"""
plugin = _plugins.get(cls, None)
if plugin is None:
# Run delayed class initialization
cls.ClassInit()
# Build a new instance of cls, and register it as the main instance.
plugin = cls()
_plugins[cls] = plugin
# Wire up the hierarchy for Config objects.
for name, value in cls.__dict__.items():
if isinstance(value, cr.Config):
for base in cls.__bases__:
child = getattr(base, name, None)
if child is not None:
value.AddChild(child)
plugin.Init()
return plugin
@classmethod
def AllPlugins(cls):
# Don't yield abstract roots, just children. We detect roots as direct
# sub classes of Plugin.Type
if Plugin.Type not in cls.__bases__:
yield cls.GetInstance()
for child in cls.__subclasses__():
for p in child.AllPlugins():
yield p
@classmethod
def UnorderedPlugins(cls):
"""Returns all enabled plugins of type cls, in undefined order."""
plugin = cls.GetInstance()
if plugin.enabled:
yield plugin
for child in cls.__subclasses__():
for p in child.UnorderedPlugins():
yield p
@classmethod
def Plugins(cls):
"""Return all enabled plugins of type cls in priority order."""
return sorted(cls.UnorderedPlugins(),
key=attrgetter('priority'), reverse=True)
@classmethod
def Choices(cls):
return DynamicChoices(cls)
@classmethod
def FindPlugin(cls, name, only_active=True):
if only_active:
plugins = cls.UnorderedPlugins()
else:
plugins = cls.AllPlugins()
for plugin in plugins:
if plugin.name == name or plugin.__class__.__name__ == name:
return plugin
return None
@classmethod
def GetPlugin(cls, name):
result = cls.FindPlugin(name)
if result is None:
raise KeyError(name)
return result
@classmethod
def GetAllActive(cls):
return [plugin for plugin in cls.UnorderedPlugins() if plugin.is_active]
@classmethod
def GetActivePlugin(cls):
"""Gets the active plugin of type cls.
This method will select a plugin to be the active one, and will activate
the plugin if needed.
Returns:
the plugin that is currently active.
"""
plugin, _ = _GetActivePlugin(cls)
return plugin
@classproperty
def default(cls):
"""Returns the plugin that should be used if the user did not choose one."""
result = None
for plugin in cls.UnorderedPlugins():
if not result or plugin.priority > result.priority:
result = plugin
return result
@classmethod
def Select(cls):
"""Called to determine which plugin should be the active one."""
plugin = cls.default
selector = getattr(cls, 'SELECTOR', None)
if selector:
if plugin is not None:
_selectors[selector] = plugin.name
name = cr.context.Find(selector)
if name is not None:
plugin = cls.FindPlugin(name)
return plugin
def ChainModuleConfigs(module):
"""Detects and connects the default Config objects from a module."""
for config_root in CONFIG_TYPES:
if hasattr(module, config_root.name):
config = getattr(module, config_root.name)
config.name = module.__name__
config_root.AddChild(config)
cr.loader.scan_hooks.append(ChainModuleConfigs)
def _GetActivePlugin(cls):
activated = False
actives = cls.GetAllActive()
plugin = cls.Select()
for active in actives:
if active != plugin:
active.Deactivate()
if plugin and not plugin.is_active:
activated = True
plugin.Activate()
return plugin, activated
def Activate():
"""Activates a plugin for all known plugin types."""
types = Plugin.Type.__subclasses__()
modified = True
while modified:
modified = False
for child in types:
_, activated = _GetActivePlugin(child)
if activated:
modified = True
| bsd-3-clause | -47,986,741,874,317,416 | 27.794643 | 80 | 0.662326 | false |
Tchanders/socorro | webapp-django/crashstats/api/views.py | 4 | 14434 | import json
import re
import datetime
from django import http
from django.shortcuts import render
from django.contrib.auth.models import Permission
from django.contrib.sites.models import RequestSite
from django.core.urlresolvers import reverse
from django.conf import settings
from django import forms
# explicit import because django.forms has an __all__
from django.forms.forms import DeclarativeFieldsMetaclass
from ratelimit.decorators import ratelimit
from waffle.decorators import waffle_switch
import crashstats.supersearch.models
from crashstats.crashstats import models
from crashstats.crashstats import utils
from crashstats.tokens.models import Token
from .cleaner import Cleaner
# List of all modules that contain models we want to expose.
MODELS_MODULES = (
models,
crashstats.supersearch.models,
)
# See http://www.iana.org/assignments/http-status-codes
REASON_PHRASES = {
100: 'CONTINUE',
101: 'SWITCHING PROTOCOLS',
102: 'PROCESSING',
200: 'OK',
201: 'CREATED',
202: 'ACCEPTED',
203: 'NON-AUTHORITATIVE INFORMATION',
204: 'NO CONTENT',
205: 'RESET CONTENT',
206: 'PARTIAL CONTENT',
207: 'MULTI-STATUS',
208: 'ALREADY REPORTED',
226: 'IM USED',
300: 'MULTIPLE CHOICES',
301: 'MOVED PERMANENTLY',
302: 'FOUND',
303: 'SEE OTHER',
304: 'NOT MODIFIED',
305: 'USE PROXY',
306: 'RESERVED',
307: 'TEMPORARY REDIRECT',
400: 'BAD REQUEST',
401: 'UNAUTHORIZED',
402: 'PAYMENT REQUIRED',
403: 'FORBIDDEN',
404: 'NOT FOUND',
405: 'METHOD NOT ALLOWED',
406: 'NOT ACCEPTABLE',
407: 'PROXY AUTHENTICATION REQUIRED',
408: 'REQUEST TIMEOUT',
409: 'CONFLICT',
410: 'GONE',
411: 'LENGTH REQUIRED',
412: 'PRECONDITION FAILED',
413: 'REQUEST ENTITY TOO LARGE',
414: 'REQUEST-URI TOO LONG',
415: 'UNSUPPORTED MEDIA TYPE',
416: 'REQUESTED RANGE NOT SATISFIABLE',
417: 'EXPECTATION FAILED',
418: "I'M A TEAPOT",
422: 'UNPROCESSABLE ENTITY',
423: 'LOCKED',
424: 'FAILED DEPENDENCY',
426: 'UPGRADE REQUIRED',
428: 'PRECONDITION REQUIRED',
429: 'TOO MANY REQUESTS',
431: 'REQUEST HEADER FIELDS TOO LARGE',
500: 'INTERNAL SERVER ERROR',
501: 'NOT IMPLEMENTED',
502: 'BAD GATEWAY',
503: 'SERVICE UNAVAILABLE',
504: 'GATEWAY TIMEOUT',
505: 'HTTP VERSION NOT SUPPORTED',
506: 'VARIANT ALSO NEGOTIATES',
507: 'INSUFFICIENT STORAGE',
508: 'LOOP DETECTED',
510: 'NOT EXTENDED',
511: 'NETWORK AUTHENTICATION REQUIRED',
}
class APIWhitelistError(Exception):
pass
class MultipleStringField(forms.TypedMultipleChoiceField):
"""Field that do not validate if the field values are in self.choices"""
def validate(self, value):
"""Nothing to do here"""
if self.required and not value:
raise forms.ValidationError(self.error_messages['required'])
TYPE_MAP = {
basestring: forms.CharField,
list: MultipleStringField,
datetime.date: forms.DateField,
datetime.datetime: forms.DateTimeField,
int: forms.IntegerField,
}
def fancy_init(self, model, *args, **kwargs):
self.model = model
self.__old_init__(*args, **kwargs)
for parameter in model().get_annotated_params():
required = parameter['required']
name = parameter['name']
if parameter['type'] not in TYPE_MAP:
raise NotImplementedError(parameter['type'])
field_class = TYPE_MAP[parameter['type']]
self.fields[name] = field_class(required=required)
class FormWrapperMeta(DeclarativeFieldsMetaclass):
def __new__(cls, name, bases, attrs):
attrs['__old_init__'] = bases[0].__init__
attrs['__init__'] = fancy_init
return super(FormWrapperMeta, cls).__new__(cls, name, bases, attrs)
class FormWrapper(forms.Form):
__metaclass__ = FormWrapperMeta
# Names of models we don't want to serve at all
BLACKLIST = (
# not because it's sensitive but because it's only used for writes
'ReleasesFeatured',
# only used for doing posts
'Releases',
# because it's only used for the admin
'Field',
'SuperSearchField',
'SuperSearchMissingFields',
# because it's very sensitive and we don't want to expose it
'Query',
# because it's an internal thing only
'SuperSearchFields',
)
def has_permissions(user, permissions):
for permission in permissions:
if not user.has_perm(permission):
return False
return True
@waffle_switch('!app_api_all_disabled')
@ratelimit(
key='ip',
method=['GET', 'POST', 'PUT'],
rate=utils.ratelimit_rate,
block=True
)
@utils.add_CORS_header # must be before `utils.json_view`
@utils.json_view
def model_wrapper(request, model_name):
if model_name in BLACKLIST:
raise http.Http404("Don't know what you're talking about!")
for source in MODELS_MODULES:
try:
model = getattr(source, model_name)
break
except AttributeError:
pass
else:
raise http.Http404('no model called `%s`' % model_name)
required_permissions = getattr(model(), 'API_REQUIRED_PERMISSIONS', None)
if isinstance(required_permissions, basestring):
required_permissions = [required_permissions]
if (
required_permissions and
not has_permissions(request.user, required_permissions)
):
permission_names = []
for permission in required_permissions:
codename = permission.split('.', 1)[1]
try:
permission_names.append(
Permission.objects.get(
codename=codename
).name
)
except Permission.DoesNotExist:
permission_names.append(codename)
# you're not allowed to use this model
return http.HttpResponseForbidden(
"Use of this endpoint requires the '%s' permission\n" %
(', '.join(permission_names))
)
# it being set to None means it's been deliberately disabled
if getattr(model, 'API_WHITELIST', False) is False:
raise APIWhitelistError('No API_WHITELIST defined for %r' % model)
instance = model()
if request.method == 'POST':
function = instance.post
else:
function = instance.get
# assume first that it won't need a binary response
binary_response = False
form = FormWrapper(model, request.REQUEST)
if form.is_valid():
try:
result = function(**form.cleaned_data)
except models.BadStatusCodeError as e:
error_code = e.status
message = e.message
if error_code >= 400 and error_code < 500:
# if the error message looks like JSON,
# carry that forward in the response
try:
json.loads(message)
return http.HttpResponse(
message,
status=error_code,
content_type='application/json; charset=UTF-8'
)
except ValueError:
# The error from the middleware was not a JSON error.
# Not much more we can do.
reason = REASON_PHRASES.get(
error_code,
'UNKNOWN STATUS CODE'
)
return http.HttpResponse(reason, status=error_code)
if error_code >= 500:
# special case
reason = REASON_PHRASES[424]
return http.HttpResponse(
reason,
status=424,
content_type='text/plain'
)
raise
except ValueError as e:
if (
# built in json module ValueError
'No JSON object could be decoded' in e or
# ujson module ValueError
'Expected object or value' in e
):
return http.HttpResponse(
'Not a valid JSON response',
status=400
)
raise
# Some models allows to return a binary reponse. It does so based on
# the models `BINARY_RESPONSE` dict in which all keys and values
# need to be in the valid query. For example, if the query is
# `?foo=bar&other=thing&bar=baz` and the `BINARY_RESPONSE` dict is
# exactly: {'foo': 'bar', 'bar': 'baz'} it will return a binary
# response with content type `application/octet-stream`.
for key, value in model.API_BINARY_RESPONSE.items():
if form.cleaned_data.get(key) == value:
binary_response = True
else:
binary_response = False
break
if binary_response:
# if you don't have all required permissions, you'll get a 403
required_permissions = model.API_BINARY_PERMISSIONS
if isinstance(required_permissions, basestring):
required_permissions = [required_permissions]
if (
required_permissions and
not has_permissions(request.user, required_permissions)
):
permission_names = []
for permission in required_permissions:
codename = permission.split('.', 1)[1]
try:
permission_names.append(
Permission.objects.get(
codename=codename
).name
)
except Permission.DoesNotExist:
permission_names.append(codename)
# you're not allowed to get the binary response
return http.HttpResponseForbidden(
"Binary response requires the '%s' permission\n" %
(', '.join(permission_names))
)
elif not request.user.has_perm('crashstats.view_pii'):
clean_scrub = getattr(model, 'API_CLEAN_SCRUB', None)
if isinstance(model.API_WHITELIST, models.Lazy):
# This is necessary because in Cleaner() we're going to
# rely on asking `isinstance(whitelist, dict)` and there's
# no easy or convenient way to be lazy about that.
model.API_WHITELIST = model.API_WHITELIST.materialize()
if result and model.API_WHITELIST:
cleaner = Cleaner(
model.API_WHITELIST,
clean_scrub=clean_scrub,
# if True, uses warnings.warn() to show fields
# not whitelisted
debug=settings.DEBUG,
)
cleaner.start(result)
else:
# custom override of the status code
return {'errors': dict(form.errors)}, 400
if binary_response:
assert model.API_BINARY_FILENAME, 'No API_BINARY_FILENAME set on model'
response = http.HttpResponse(
result,
content_type='application/octet-stream'
)
filename = model.API_BINARY_FILENAME % form.cleaned_data
response['Content-Disposition'] = (
'attachment; filename="%s"' % filename
)
return response
return result
@waffle_switch('!app_api_all_disabled')
def documentation(request):
endpoints = [
]
all_models = []
for source in MODELS_MODULES:
all_models += [getattr(source, x) for x in dir(source)]
for model in all_models:
try:
if not issubclass(model, models.SocorroMiddleware):
continue
if model is models.SocorroMiddleware:
continue
if model.__name__ in BLACKLIST:
continue
except TypeError:
# most likely a builtin class or something
continue
model_inst = model()
if (
model_inst.API_REQUIRED_PERMISSIONS and
not has_permissions(
request.user,
model_inst.API_REQUIRED_PERMISSIONS
)
):
continue
endpoints.append(_describe_model(model))
base_url = (
'%s://%s' % (request.is_secure() and 'https' or 'http',
RequestSite(request).domain)
)
if request.user.is_authenticated():
your_tokens = Token.objects.active().filter(user=request.user)
else:
your_tokens = Token.objects.none()
context = {
'endpoints': endpoints,
'base_url': base_url,
'count_tokens': your_tokens.count()
}
return render(request, 'api/documentation.html', context)
def _describe_model(model):
model_inst = model()
params = list(model_inst.get_annotated_params())
params.sort(key=lambda x: (not x['required'], x['name']))
methods = []
if model.get:
methods.append('GET')
elif model.post:
methods.append('POST')
docstring = model.__doc__
if docstring:
docstring = dedent_left(docstring.rstrip(), 4)
required_permissions = []
if model_inst.API_REQUIRED_PERMISSIONS:
permissions = model_inst.API_REQUIRED_PERMISSIONS
if isinstance(permissions, basestring):
permissions = [permissions]
for permission in permissions:
codename = permission.split('.', 1)[1]
required_permissions.append(
Permission.objects.get(codename=codename).name
)
data = {
'name': model.__name__,
'url': reverse('api:model_wrapper', args=(model.__name__,)),
'parameters': params,
'defaults': getattr(model, 'defaults', {}),
'methods': methods,
'docstring': docstring,
'required_permissions': required_permissions,
}
return data
def dedent_left(text, spaces):
"""
If the string is:
' One\n'
' Two\n'
'Three\n'
And you set @spaces=2
Then return this:
' One\n'
' Two\n'
'Three\n'
"""
lines = []
regex = re.compile('^\s{%s}' % spaces)
for line in text.splitlines():
line = regex.sub('', line)
lines.append(line)
return '\n'.join(lines)
| mpl-2.0 | -2,504,321,937,024,819,000 | 31.004435 | 79 | 0.578634 | false |
uni-peter-zheng/tp-qemu | qemu/tests/block_stream_simple.py | 5 | 1367 | import logging
from autotest.client.shared import error
from qemu.tests import blk_stream
class BlockStreamSimple(blk_stream.BlockStream):
def __init__(self, test, params, env, tag):
super(BlockStreamSimple, self).__init__(test, params, env, tag)
@error.context_aware
def query_status(self):
"""
query running block streaming job info;
"""
error.context("query job status", logging.info)
if not self.get_status():
raise error.TestFail("No active job")
def run(test, params, env):
"""
block_stream_simple test:
1). launch block streaming job w/ max speed in param "default_speed"
if defined by users or w/o max speed limit by default,
to be noted, default_speed=0 means no limit in qemu side.
2). reset max job speed before steady status(optional)
3). cancel active job on the device(optional)
:param test: Kvm test object
:param params: Dictionary with the test parameters
:param env: Dictionary with test environment.
"""
tag = params.get("source_images", "image1")
simple_test = BlockStreamSimple(test, params, env, tag)
try:
simple_test.create_snapshots()
simple_test.start()
simple_test.action_when_streaming()
simple_test.action_after_finished()
finally:
simple_test.clean()
| gpl-2.0 | -2,845,077,598,982,918,000 | 31.547619 | 72 | 0.65545 | false |
santisiri/popego | envs/ALPHA-POPEGO/lib/python2.5/site-packages/twisted/web2/iweb.py | 1 | 13392 | # -*- test-case-name: twisted.web2.test -*-
"""
I contain the interfaces for several web related objects including IRequest
and IResource. I am based heavily on ideas from nevow.inevow
"""
from zope.interface import Attribute, Interface, interface
# server.py interfaces
class IResource(Interface):
"""
An HTTP resource.
I serve 2 main purposes: one is to provide a standard representation for
what HTTP specification calls an 'entity', and the other is to provide an
mechanism for mapping URLs to content.
"""
def locateChild(req, segments):
"""Locate another object which can be adapted to IResource.
@return: A 2-tuple of (resource, remaining-path-segments),
or a deferred which will fire the above.
Causes the object publishing machinery to continue on
with specified resource and segments, calling the
appropriate method on the specified resource.
If you return (self, L{server.StopTraversal}), this
instructs web2 to immediately stop the lookup stage,
and switch to the rendering stage, leaving the
remaining path alone for your render function to
handle.
"""
def renderHTTP(req):
"""Return an IResponse or a deferred which will fire an
IResponse. This response will be written to the web browser
which initiated the request.
"""
# Is there a better way to do this than this funky extra class?
_default = object()
class SpecialAdaptInterfaceClass(interface.InterfaceClass):
# A special adapter for IResource to handle the extra step of adapting
# from IOldNevowResource-providing resources.
def __call__(self, other, alternate=_default):
result = super(SpecialAdaptInterfaceClass, self).__call__(other, alternate)
if result is not alternate:
return result
result = IOldNevowResource(other, alternate)
if result is not alternate:
result = IResource(result)
return result
if alternate is not _default:
return alternate
raise TypeError('Could not adapt', other, self)
IResource.__class__ = SpecialAdaptInterfaceClass
class IOldNevowResource(Interface):
# Shared interface with inevow.IResource
"""
I am a web resource.
"""
def locateChild(ctx, segments):
"""Locate another object which can be adapted to IResource
Return a tuple of resource, path segments
"""
def renderHTTP(ctx):
"""Return a string or a deferred which will fire a string. This string
will be written to the web browser which initiated this request.
Unlike iweb.IResource, this expects the incoming data to have already been read
and parsed into request.args and request.content, and expects to return a
string instead of a response object.
"""
class ICanHandleException(Interface):
# Shared interface with inevow.ICanHandleException
def renderHTTP_exception(request, failure):
"""Render an exception to the given request object.
"""
def renderInlineException(request, reason):
"""Return stan representing the exception, to be printed in the page,
not replacing the page."""
# http.py interfaces
class IResponse(Interface):
"""I'm a response."""
code = Attribute("The HTTP response code")
headers = Attribute("A http_headers.Headers instance of headers to send")
stream = Attribute("A stream.IByteStream of outgoing data, or else None.")
class IRequest(Interface):
"""I'm a request for a web resource
"""
method = Attribute("The HTTP method from the request line, e.g. GET")
uri = Attribute("The raw URI from the request line. May or may not include host.")
clientproto = Attribute("Protocol from the request line, e.g. HTTP/1.1")
headers = Attribute("A http_headers.Headers instance of incoming headers.")
stream = Attribute("A stream.IByteStream of incoming data.")
def writeResponse(response):
"""Write an IResponse object to the client"""
chanRequest = Attribute("The ChannelRequest. I wonder if this is public really?")
class IOldRequest(Interface):
# Shared interface with inevow.ICurrentSegments
"""An old HTTP request.
Subclasses should override the process() method to determine how
the request will be processed.
@ivar method: The HTTP method that was used.
@ivar uri: The full URI that was requested (includes arguments).
@ivar path: The path only (arguments not included).
@ivar args: All of the arguments, including URL and POST arguments.
@type args: A mapping of strings (the argument names) to lists of values.
i.e., ?foo=bar&foo=baz&quux=spam results in
{'foo': ['bar', 'baz'], 'quux': ['spam']}.
@ivar received_headers: All received headers
"""
# Methods for received request
def getHeader(key):
"""Get a header that was sent from the network.
"""
def getCookie(key):
"""Get a cookie that was sent from the network.
"""
def getAllHeaders():
"""Return dictionary of all headers the request received."""
def getRequestHostname():
"""Get the hostname that the user passed in to the request.
This will either use the Host: header (if it is available) or the
host we are listening on if the header is unavailable.
"""
def getHost():
"""Get my originally requesting transport's host.
Don't rely on the 'transport' attribute, since Request objects may be
copied remotely. For information on this method's return value, see
twisted.internet.tcp.Port.
"""
def getClientIP():
pass
def getClient():
pass
def getUser():
pass
def getPassword():
pass
def isSecure():
pass
def getSession(sessionInterface = None):
pass
def URLPath():
pass
def prePathURL():
pass
def rememberRootURL():
"""
Remember the currently-processed part of the URL for later
recalling.
"""
def getRootURL():
"""
Get a previously-remembered URL.
"""
# Methods for outgoing request
def finish():
"""We are finished writing data."""
def write(data):
"""
Write some data as a result of an HTTP request. The first
time this is called, it writes out response data.
"""
def addCookie(k, v, expires=None, domain=None, path=None, max_age=None, comment=None, secure=None):
"""Set an outgoing HTTP cookie.
In general, you should consider using sessions instead of cookies, see
twisted.web.server.Request.getSession and the
twisted.web.server.Session class for details.
"""
def setResponseCode(code, message=None):
"""Set the HTTP response code.
"""
def setHeader(k, v):
"""Set an outgoing HTTP header.
"""
def redirect(url):
"""Utility function that does a redirect.
The request should have finish() called after this.
"""
def setLastModified(when):
"""Set the X{Last-Modified} time for the response to this request.
If I am called more than once, I ignore attempts to set
Last-Modified earlier, only replacing the Last-Modified time
if it is to a later value.
If I am a conditional request, I may modify my response code
to L{NOT_MODIFIED} if appropriate for the time given.
@param when: The last time the resource being returned was
modified, in seconds since the epoch.
@type when: number
@return: If I am a X{If-Modified-Since} conditional request and
the time given is not newer than the condition, I return
L{http.CACHED<CACHED>} to indicate that you should write no
body. Otherwise, I return a false value.
"""
def setETag(etag):
"""Set an X{entity tag} for the outgoing response.
That's \"entity tag\" as in the HTTP/1.1 X{ETag} header, \"used
for comparing two or more entities from the same requested
resource.\"
If I am a conditional request, I may modify my response code
to L{NOT_MODIFIED} or L{PRECONDITION_FAILED}, if appropriate
for the tag given.
@param etag: The entity tag for the resource being returned.
@type etag: string
@return: If I am a X{If-None-Match} conditional request and
the tag matches one in the request, I return
L{http.CACHED<CACHED>} to indicate that you should write
no body. Otherwise, I return a false value.
"""
def setHost(host, port, ssl=0):
"""Change the host and port the request thinks it's using.
This method is useful for working with reverse HTTP proxies (e.g.
both Squid and Apache's mod_proxy can do this), when the address
the HTTP client is using is different than the one we're listening on.
For example, Apache may be listening on https://www.example.com, and then
forwarding requests to http://localhost:8080, but we don't want HTML produced
by Twisted to say 'http://localhost:8080', they should say 'https://www.example.com',
so we do::
request.setHost('www.example.com', 443, ssl=1)
This method is experimental.
"""
class IChanRequestCallbacks(Interface):
"""The bits that are required of a Request for interfacing with a
IChanRequest object"""
def __init__(chanRequest, command, path, version, contentLength, inHeaders):
"""Create a new Request object.
@param chanRequest: the IChanRequest object creating this request
@param command: the HTTP command e.g. GET
@param path: the HTTP path e.g. /foo/bar.html
@param version: the parsed HTTP version e.g. (1,1)
@param contentLength: how much data to expect, or None if unknown
@param inHeaders: the request headers"""
def process():
"""Process the request. Called as soon as it's possibly reasonable to
return a response. handleContentComplete may or may not have been called already."""
def handleContentChunk(data):
"""Called when a piece of incoming data has been received."""
def handleContentComplete():
"""Called when the incoming data stream is finished."""
def connectionLost(reason):
"""Called if the connection was lost."""
class IChanRequest(Interface):
def writeIntermediateResponse(code, headers=None):
"""Write a non-terminating response.
Intermediate responses cannot contain data.
If the channel does not support intermediate responses, do nothing.
@ivar code: The response code. Should be in the 1xx range.
@type code: int
@ivar headers: the headers to send in the response
@type headers: C{twisted.web.http_headers.Headers}
"""
pass
def writeHeaders(code, headers):
"""Write a final response.
@param code: The response code. Should not be in the 1xx range.
@type code: int
@param headers: the headers to send in the response. They will be augmented
with any connection-oriented headers as necessary for the protocol.
@type headers: C{twisted.web.http_headers.Headers}
"""
pass
def write(data):
"""Write some data.
@param data: the data bytes
@type data: str
"""
pass
def finish():
"""Finish the request, and clean up the connection if necessary.
"""
pass
def abortConnection():
"""Forcibly abort the connection without cleanly closing.
Use if, for example, you can't write all the data you promised.
"""
pass
def registerProducer(producer, streaming):
"""Register a producer with the standard API."""
pass
def unregisterProducer():
"""Unregister a producer."""
pass
def getHostInfo():
"""Returns a tuple of (address, socket user connected to,
boolean, was it secure). Note that this should not necsessarily
always return the actual local socket information from
twisted. E.g. in a CGI, it should use the variables coming
from the invoking script.
"""
def getRemoteHost():
"""Returns an address of the remote host.
Like getHostInfo, this information may come from the real
socket, or may come from additional information, depending on
the transport.
"""
persistent = Attribute("""Whether this request supports HTTP connection persistence. May be set to False. Should not be set to other values.""")
class ISite(Interface):
pass
__all__ = ['ICanHandleException', 'IChanRequest', 'IChanRequestCallbacks', 'IOldNevowResource', 'IOldRequest', 'IRequest', 'IResource', 'IResponse', 'ISite']
| bsd-3-clause | 2,801,832,953,199,898,600 | 34.335092 | 157 | 0.636499 | false |
aio-libs/aiohttp_session | examples/postgres_storage.py | 1 | 3068 | import json
import uuid
from typing import Any, Callable, Dict, Optional
import psycopg2.extras
from aiohttp import web
from aiohttp_session import AbstractStorage, Session
from aiopg import Pool
class PgStorage(AbstractStorage):
"""PG storage"""
def __init__(self, pg_pool: Pool, *, cookie_name: str = "AIOHTTP_SESSION", # type: ignore[no-any-unimported]
domain: Optional[str] = None, max_age: Optional[int] = None,
path: str = '/', secure: Optional[bool] = None, httponly: bool = True,
key_factory: Callable[[], str] = lambda: uuid.uuid4().hex,
encoder: Callable[[object], str] = psycopg2.extras.Json,
decoder: Callable[[str], Any] = json.loads):
super().__init__(cookie_name=cookie_name, domain=domain,
max_age=max_age, path=path, secure=secure,
httponly=httponly,
encoder=encoder, decoder=decoder)
self._pg = pg_pool
self._key_factory = key_factory
async def load_session(self, request: web.Request) -> Session:
cookie = self.load_cookie(request)
data = {}
if cookie is None:
return Session(None, data={}, new=True, max_age=self.max_age)
else:
async with self._pg.acquire() as conn:
key = uuid.UUID(cookie)
async with conn.cursor(cursor_factory=psycopg2.extras.DictCursor) as cur:
await cur.execute("SELECT session, extract(epoch from created) FROM web.sessions WHERE uuid = %s", (key,))
data = await cur.fetchone()
if not data:
return Session(None, data={}, new=True, max_age=self.max_age)
return Session(key, data=data, new=False, max_age=self.max_age)
async def save_session(self, request: web.Request, response: web.StreamResponse,
session: Session) -> None:
key = session.identity
if key is None:
key = self._key_factory()
self.save_cookie(response, key, max_age=session.max_age)
else:
if session.empty:
self.save_cookie(response, "", max_age=session.max_age)
else:
key = str(key)
self.save_cookie(response, key, max_age=session.max_age)
data = self._get_session_data(session)
if not data:
return
data_encoded = self._encoder(data["session"])
expire = data["created"] + (session.max_age or 0)
async with self._pg.acquire() as conn:
async with conn.cursor() as cur:
await cur.execute("INSERT INTO web.sessions (uuid,session,created,expire)"
" VALUES (%s, %s, to_timestamp(%s),to_timestamp(%s))"
" ON CONFLICT (uuid)"
" DO UPDATE"
" SET (session,expire)=(EXCLUDED.session, EXCLUDED.expire)",
[key, data_encoded, data["created"], expire])
| apache-2.0 | -7,523,939,616,466,870,000 | 42.211268 | 126 | 0.558344 | false |
guillaume-philippon/aquilon | lib/aquilon/aqdb/model/building.py | 1 | 1260 | # -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2014 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Building is a subclass of Location """
from sqlalchemy import Column, ForeignKey, String
from aquilon.aqdb.model import Location, City, Campus
_TN = 'building'
class Building(Location):
""" Building is a subtype of location """
__tablename__ = _TN
__mapper_args__ = {'polymorphic_identity': _TN}
valid_parents = [City, Campus]
id = Column(ForeignKey(Location.id, ondelete='CASCADE'), primary_key=True)
address = Column(String(255), nullable=False)
__table_args__ = ({'info': {'unique_fields': ['name']}},)
| apache-2.0 | 6,849,712,266,774,559,000 | 33.054054 | 78 | 0.70873 | false |
anryko/ansible | lib/ansible/modules/network/nxos/nxos_lldp_global.py | 19 | 6889 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#############################################
# WARNING #
#############################################
#
# This file is auto generated by the resource
# module builder playbook.
#
# Do not edit this file manually.
#
# Changes to this file will be over written
# by the resource module builder.
#
# Changes should be made in the model used to
# generate this file or in the resource module
# builder template.
#
#############################################
"""
The module file for nxos_lldp_global
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'
}
DOCUMENTATION = """
---
module: nxos_lldp_global
version_added: 2.9
short_description: Configure and manage Link Layer Discovery Protocol(LLDP) attributes on NX-OS platforms.
description: This module configures and manages the Link Layer Discovery Protocol(LLDP) attributes on NX-OS platforms.
author: Adharsh Srivats Rangarajan (@adharshsrivatsr)
notes:
- Tested against NxOS 7.3.(0)D1(1) on VIRL
- The LLDP feature needs to be enabled before using this module
options:
config:
description:
- A list of link layer discovery configurations
type: dict
suboptions:
holdtime:
description:
- Amount of time the receiving device should hold the information (in seconds)
type: int
port_id:
description:
- This attribute defines if the interface names should be advertised in the long(0) or short(1) form.
type: int
choices: [0, 1]
reinit:
description:
- Amount of time to delay the initialization of LLDP on any interface (in seconds)
type: int
timer:
description:
- Frequency at which LLDP updates need to be transmitted (in seconds)
type: int
tlv_select:
description:
- This attribute can be used to specify the TLVs that need to be sent and received in the LLDP packets. By default, all TLVs are advertised
type: dict
suboptions:
dcbxp:
description:
- Used to specify the Data Center Bridging Exchange Protocol TLV
type: bool
management_address:
description:
- Used to specify the management address in TLV messages
type: dict
suboptions:
v4:
description: Management address with TLV v4
type: bool
v6:
description: Management address with TLV v6
type: bool
port:
description:
- Used to manage port based attributes in TLV messages
type: dict
suboptions:
description:
description:
- Used to specify the port description TLV
type: bool
vlan:
description:
- Used to specify the port VLAN ID TLV
type: bool
power_management:
description:
- Used to specify IEEE 802.3 DTE Power via MDI TLV
type: bool
system:
description:
- Used to manage system based attributes in TLV messages
type: dict
suboptions:
capabilities:
description:
- Used to specify the system capabilities TLV
type: bool
description:
description:
- Used to specify the system description TLV
type: bool
name:
description:
- Used to specify the system name TLV
type: bool
state:
description:
- The state of the configuration after module completion
type: str
choices:
- merged
- replaced
- deleted
default: merged
"""
EXAMPLES = """
# Using merged
# Before state:
# -------------
#
# user(config)# show running-config | include lldp
# feature lldp
- name: Merge provided configuration with device configuration
nxos_lldp_global:
config:
timer: 35
holdtime: 100
state: merged
# After state:
# ------------
#
# user(config)# show running-config | include lldp
# feature lldp
# lldp timer 35
# lldp holdtime 100
# Using replaced
# Before state:
# -------------
#
# user(config)# show running-config | include lldp
# feature lldp
# lldp holdtime 100
# lldp reinit 5
# lldp timer 35
- name: Replace device configuration of specific LLDP attributes with provided configuration
nxos_lldp_global:
config:
timer: 40
tlv_select:
system:
description: true
name: false
management_address:
v4: true
state: replaced
# After state:
# ------------
#
# user(config)# show running-config | include lldp
# feature lldp
# lldp timer 40
# no lldp tlv-select system-name
# Using deleted
# Before state:
# -------------
#
# user(config)# show running-config | include lldp
# feature lldp
# lldp holdtime 5
# lldp reinit 3
- name: Delete LLDP configuration (this will by default remove all lldp configuration)
nxos_lldp_global:
state: deleted
# After state:
# ------------
#
# user(config)# show running-config | include lldp
# feature lldp
"""
RETURN = """
before:
description: The configuration as structured data prior to module invocation.
returned: always
type: dict
sample: >
The configuration returned will always be in the same format
of the parameters above.
after:
description: The configuration as structured data after module completion.
returned: when changed
type: dict
sample: >
The configuration returned will always be in the same format
of the parameters above.
commands:
description: The set of commands pushed to the remote device.
returned: always
type: list
sample: ['lldp holdtime 125', 'lldp reinit 4', 'no lldp tlv-select system-name']
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.nxos.argspec.lldp_global.lldp_global import Lldp_globalArgs
from ansible.module_utils.network.nxos.config.lldp_global.lldp_global import Lldp_global
def main():
"""
Main entry point for module execution
:returns: the result form module invocation
"""
module = AnsibleModule(argument_spec=Lldp_globalArgs.argument_spec,
supports_check_mode=True)
result = Lldp_global(module).execute_module()
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | 326,287,199,728,916,000 | 26.556 | 149 | 0.6072 | false |
chromium/chromium | third_party/blink/tools/blinkpy/web_tests/port/browser_test_driver_unittest.py | 7 | 2202 | # Copyright (C) 2014 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from blinkpy.common.host_mock import MockHost
from blinkpy.web_tests.port.test import TestPort
from blinkpy.web_tests.port.browser_test_driver import BrowserTestDriver
from blinkpy.web_tests.port.server_process_mock import MockServerProcess
class BrowserTestDriverTest(unittest.TestCase):
def test_read_stdin_path(self):
port = TestPort(MockHost())
driver = BrowserTestDriver(port, 0)
driver._server_process = MockServerProcess(
lines=['StdinPath: /foo/bar', '#EOF'])
content_block = driver._read_block(0)
self.assertEqual(content_block.stdin_path, '/foo/bar')
driver._stdin_directory = None
| bsd-3-clause | -5,011,113,789,510,793,000 | 47.933333 | 72 | 0.764759 | false |
prefetchnta/questlab | bin/x64bin/python/36/Lib/distutils/command/install_lib.py | 3 | 8614 | """distutils.command.install_lib
Implements the Distutils 'install_lib' command
(install all Python modules)."""
import os
import importlib.util
import sys
from distutils.core import Command
from distutils.errors import DistutilsOptionError
# Extension for Python source files.
PYTHON_SOURCE_EXTENSION = ".py"
class install_lib(Command):
description = "install all Python modules (extensions and pure Python)"
# The byte-compilation options are a tad confusing. Here are the
# possible scenarios:
# 1) no compilation at all (--no-compile --no-optimize)
# 2) compile .pyc only (--compile --no-optimize; default)
# 3) compile .pyc and "opt-1" .pyc (--compile --optimize)
# 4) compile "opt-1" .pyc only (--no-compile --optimize)
# 5) compile .pyc and "opt-2" .pyc (--compile --optimize-more)
# 6) compile "opt-2" .pyc only (--no-compile --optimize-more)
#
# The UI for this is two options, 'compile' and 'optimize'.
# 'compile' is strictly boolean, and only decides whether to
# generate .pyc files. 'optimize' is three-way (0, 1, or 2), and
# decides both whether to generate .pyc files and what level of
# optimization to use.
user_options = [
('install-dir=', 'd', "directory to install to"),
('build-dir=','b', "build directory (where to install from)"),
('force', 'f', "force installation (overwrite existing files)"),
('compile', 'c', "compile .py to .pyc [default]"),
('no-compile', None, "don't compile .py files"),
('optimize=', 'O',
"also compile with optimization: -O1 for \"python -O\", "
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
('skip-build', None, "skip the build steps"),
]
boolean_options = ['force', 'compile', 'skip-build']
negative_opt = {'no-compile' : 'compile'}
def initialize_options(self):
# let the 'install' command dictate our installation directory
self.install_dir = None
self.build_dir = None
self.force = 0
self.compile = None
self.optimize = None
self.skip_build = None
def finalize_options(self):
# Get all the information we need to install pure Python modules
# from the umbrella 'install' command -- build (source) directory,
# install (target) directory, and whether to compile .py files.
self.set_undefined_options('install',
('build_lib', 'build_dir'),
('install_lib', 'install_dir'),
('force', 'force'),
('compile', 'compile'),
('optimize', 'optimize'),
('skip_build', 'skip_build'),
)
if self.compile is None:
self.compile = True
if self.optimize is None:
self.optimize = False
if not isinstance(self.optimize, int):
try:
self.optimize = int(self.optimize)
if self.optimize not in (0, 1, 2):
raise AssertionError
except (ValueError, AssertionError):
raise DistutilsOptionError("optimize must be 0, 1, or 2")
def run(self):
# Make sure we have built everything we need first
self.build()
# Install everything: simply dump the entire contents of the build
# directory to the installation directory (that's the beauty of
# having a build directory!)
outfiles = self.install()
# (Optionally) compile .py to .pyc
if outfiles is not None and self.distribution.has_pure_modules():
self.byte_compile(outfiles)
# -- Top-level worker functions ------------------------------------
# (called from 'run()')
def build(self):
if not self.skip_build:
if self.distribution.has_pure_modules():
self.run_command('build_py')
if self.distribution.has_ext_modules():
self.run_command('build_ext')
def install(self):
if os.path.isdir(self.build_dir):
outfiles = self.copy_tree(self.build_dir, self.install_dir)
else:
self.warn("'%s' does not exist -- no Python modules to install" %
self.build_dir)
return
return outfiles
def byte_compile(self, files):
if sys.dont_write_bytecode:
self.warn('byte-compiling is disabled, skipping.')
return
from distutils.util import byte_compile
# Get the "--root" directory supplied to the "install" command,
# and use it as a prefix to strip off the purported filename
# encoded in bytecode files. This is far from complete, but it
# should at least generate usable bytecode in RPM distributions.
install_root = self.get_finalized_command('install').root
if self.compile:
byte_compile(files, optimize=0,
force=self.force, prefix=install_root,
dry_run=self.dry_run)
if self.optimize > 0:
byte_compile(files, optimize=self.optimize,
force=self.force, prefix=install_root,
verbose=self.verbose, dry_run=self.dry_run)
# -- Utility methods -----------------------------------------------
def _mutate_outputs(self, has_any, build_cmd, cmd_option, output_dir):
if not has_any:
return []
build_cmd = self.get_finalized_command(build_cmd)
build_files = build_cmd.get_outputs()
build_dir = getattr(build_cmd, cmd_option)
prefix_len = len(build_dir) + len(os.sep)
outputs = []
for file in build_files:
outputs.append(os.path.join(output_dir, file[prefix_len:]))
return outputs
def _bytecode_filenames(self, py_filenames):
bytecode_files = []
for py_file in py_filenames:
# Since build_py handles package data installation, the
# list of outputs can contain more than just .py files.
# Make sure we only report bytecode for the .py files.
ext = os.path.splitext(os.path.normcase(py_file))[1]
if ext != PYTHON_SOURCE_EXTENSION:
continue
if self.compile:
bytecode_files.append(importlib.util.cache_from_source(
py_file, optimization=''))
if self.optimize > 0:
bytecode_files.append(importlib.util.cache_from_source(
py_file, optimization=self.optimize))
return bytecode_files
# -- External interface --------------------------------------------
# (called by outsiders)
def get_outputs(self):
"""Return the list of files that would be installed if this command
were actually run. Not affected by the "dry-run" flag or whether
modules have actually been built yet.
"""
pure_outputs = \
self._mutate_outputs(self.distribution.has_pure_modules(),
'build_py', 'build_lib',
self.install_dir)
if self.compile:
bytecode_outputs = self._bytecode_filenames(pure_outputs)
else:
bytecode_outputs = []
ext_outputs = \
self._mutate_outputs(self.distribution.has_ext_modules(),
'build_ext', 'build_lib',
self.install_dir)
return pure_outputs + bytecode_outputs + ext_outputs
def get_inputs(self):
"""Get the list of files that are input to this command, ie. the
files that get installed as they are named in the build tree.
The files in this list correspond one-to-one to the output
filenames returned by 'get_outputs()'.
"""
inputs = []
if self.distribution.has_pure_modules():
build_py = self.get_finalized_command('build_py')
inputs.extend(build_py.get_outputs())
if self.distribution.has_ext_modules():
build_ext = self.get_finalized_command('build_ext')
inputs.extend(build_ext.get_outputs())
return inputs
| lgpl-2.1 | 4,027,984,148,738,640,400 | 37.695853 | 77 | 0.550035 | false |
manipopopo/tensorflow | tensorflow/contrib/autograph/converters/decorators_test.py | 5 | 4348 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for decorators module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import wraps
from tensorflow.contrib.autograph.converters import decorators
from tensorflow.contrib.autograph.core import converter_testing
from tensorflow.contrib.autograph.pyct import compiler
from tensorflow.contrib.autograph.pyct import transformer
from tensorflow.python.platform import test
# The Python parser only briefly captures decorators into the AST.
# The interpreter desugars them on load, and the decorated function loses any
# trace of the decorator (which is normally what you would expect, since
# they are meant to be transparent).
# However, decorators are still visible when you analyze the function
# from inside a decorator, before it was applied - as is the case
# with our conversion decorators.
def simple_decorator(f):
return lambda a: f(a) + 1
def self_transform_decorator(transform):
def decorator(f):
@wraps(f)
def wrapper(*args):
# This removing wrapper is defined in the test below. This setup is so
# intricate in order to simulate how we use the transformer in practice.
transformed_f = transform(f, (self_transform_decorator,))
return transformed_f(*args) + 1
return wrapper
return decorator
class DecoratorsTest(converter_testing.TestCase):
def _transform(self, f, autograph_decorators):
namespace = {
'self_transform_decorator': self_transform_decorator,
'simple_decorator': simple_decorator,
'converter_testing': converter_testing,
}
node, ctx = self.prepare(
f,
namespace,
recursive=False,
autograph_decorators=autograph_decorators)
node = decorators.transform(node, ctx)
import_line = '\n'.join(ctx.program.additional_imports)
result, _ = compiler.ast_to_object(node, source_prefix=import_line)
return getattr(result, f.__name__)
def test_noop(self):
def test_fn(a):
return a
with self.converted(test_fn, decorators, {}) as result:
self.assertEqual(1, result.test_fn(1))
def test_function(self):
@self_transform_decorator(self._transform)
def test_fn(a):
return a
# 2 = 1 (a) + 1 (decorator applied exactly once)
self.assertEqual(2, test_fn(1))
def test_method(self):
class TestClass(object):
@self_transform_decorator(self._transform)
def test_fn(self, a):
return a
# 2 = 1 (a) + 1 (decorator applied exactly once)
self.assertEqual(2, TestClass().test_fn(1))
def test_multiple_decorators(self):
class TestClass(object):
# Note that reversing the order of this two doesn't work.
@classmethod
@self_transform_decorator(self._transform)
def test_fn(cls, a):
return a
# 2 = 1 (a) + 1 (decorator applied exactly once)
self.assertEqual(2, TestClass.test_fn(1))
def test_nested_decorators_local(self):
@self_transform_decorator(self._transform)
def test_fn(a):
@simple_decorator
def inner_fn(b):
return b + 11
return inner_fn(a)
# Expected to fail because simple_decorator could not be imported.
with self.assertRaises(transformer.AutographParseError):
test_fn(1)
def test_nested_decorators_imported(self):
@self_transform_decorator(self._transform)
def test_fn(a):
@converter_testing.imported_decorator
def inner_fn(b):
return b + 11
return inner_fn(a)
# 14 = 1 (a) + 1 (simple_decorator) + 11 (inner_fn)
self.assertEqual(14, test_fn(1))
if __name__ == '__main__':
test.main()
| apache-2.0 | 2,167,846,485,572,631,000 | 29.194444 | 80 | 0.683303 | false |
babyliynfg/cross | tools/project-creator/Python2.6.6/Lib/webbrowser.py | 4 | 21759 | #! /usr/bin/env python
"""Interfaces for launching and remotely controlling Web browsers."""
# Maintained by Georg Brandl.
import os
import shlex
import sys
import stat
import subprocess
import time
__all__ = ["Error", "open", "open_new", "open_new_tab", "get", "register"]
class Error(Exception):
pass
_browsers = {} # Dictionary of available browser controllers
_tryorder = [] # Preference order of available browsers
def register(name, klass, instance=None, update_tryorder=1):
"""Register a browser connector and, optionally, connection."""
_browsers[name.lower()] = [klass, instance]
if update_tryorder > 0:
_tryorder.append(name)
elif update_tryorder < 0:
_tryorder.insert(0, name)
def get(using=None):
"""Return a browser launcher instance appropriate for the environment."""
if using is not None:
alternatives = [using]
else:
alternatives = _tryorder
for browser in alternatives:
if '%s' in browser:
# User gave us a command line, split it into name and args
browser = shlex.split(browser)
if browser[-1] == '&':
return BackgroundBrowser(browser[:-1])
else:
return GenericBrowser(browser)
else:
# User gave us a browser name or path.
try:
command = _browsers[browser.lower()]
except KeyError:
command = _synthesize(browser)
if command[1] is not None:
return command[1]
elif command[0] is not None:
return command[0]()
raise Error("could not locate runnable browser")
# Please note: the following definition hides a builtin function.
# It is recommended one does "import webbrowser" and uses webbrowser.open(url)
# instead of "from webbrowser import *".
def open(url, new=0, autoraise=1):
for name in _tryorder:
browser = get(name)
if browser.open(url, new, autoraise):
return True
return False
def open_new(url):
return open(url, 1)
def open_new_tab(url):
return open(url, 2)
def _synthesize(browser, update_tryorder=1):
"""Attempt to synthesize a controller base on existing controllers.
This is useful to create a controller when a user specifies a path to
an entry in the BROWSER environment variable -- we can copy a general
controller to operate using a specific installation of the desired
browser in this way.
If we can't create a controller in this way, or if there is no
executable for the requested browser, return [None, None].
"""
cmd = browser.split()[0]
if not _iscommand(cmd):
return [None, None]
name = os.path.basename(cmd)
try:
command = _browsers[name.lower()]
except KeyError:
return [None, None]
# now attempt to clone to fit the new name:
controller = command[1]
if controller and name.lower() == controller.basename:
import copy
controller = copy.copy(controller)
controller.name = browser
controller.basename = os.path.basename(browser)
register(browser, None, controller, update_tryorder)
return [None, controller]
return [None, None]
if sys.platform[:3] == "win":
def _isexecutable(cmd):
cmd = cmd.lower()
if os.path.isfile(cmd) and cmd.endswith((".exe", ".bat")):
return True
for ext in ".exe", ".bat":
if os.path.isfile(cmd + ext):
return True
return False
else:
def _isexecutable(cmd):
if os.path.isfile(cmd):
mode = os.stat(cmd)[stat.ST_MODE]
if mode & stat.S_IXUSR or mode & stat.S_IXGRP or mode & stat.S_IXOTH:
return True
return False
def _iscommand(cmd):
"""Return True if cmd is executable or can be found on the executable
search path."""
if _isexecutable(cmd):
return True
path = os.environ.get("PATH")
if not path:
return False
for d in path.split(os.pathsep):
exe = os.path.join(d, cmd)
if _isexecutable(exe):
return True
return False
# General parent classes
class BaseBrowser(object):
"""Parent class for all browsers. Do not use directly."""
args = ['%s']
def __init__(self, name=""):
self.name = name
self.basename = name
def open(self, url, new=0, autoraise=1):
raise NotImplementedError
def open_new(self, url):
return self.open(url, 1)
def open_new_tab(self, url):
return self.open(url, 2)
class GenericBrowser(BaseBrowser):
"""Class for all browsers started with a command
and without remote functionality."""
def __init__(self, name):
if isinstance(name, basestring):
self.name = name
self.args = ["%s"]
else:
# name should be a list with arguments
self.name = name[0]
self.args = name[1:]
self.basename = os.path.basename(self.name)
def open(self, url, new=0, autoraise=1):
cmdline = [self.name] + [arg.replace("%s", url)
for arg in self.args]
try:
if sys.platform[:3] == 'win':
p = subprocess.Popen(cmdline)
else:
p = subprocess.Popen(cmdline, close_fds=True)
return not p.wait()
except OSError:
return False
class BackgroundBrowser(GenericBrowser):
"""Class for all browsers which are to be started in the
background."""
def open(self, url, new=0, autoraise=1):
cmdline = [self.name] + [arg.replace("%s", url)
for arg in self.args]
try:
if sys.platform[:3] == 'win':
p = subprocess.Popen(cmdline)
else:
setsid = getattr(os, 'setsid', None)
if not setsid:
setsid = getattr(os, 'setpgrp', None)
p = subprocess.Popen(cmdline, close_fds=True, preexec_fn=setsid)
return (p.poll() is None)
except OSError:
return False
class UnixBrowser(BaseBrowser):
"""Parent class for all Unix browsers with remote functionality."""
raise_opts = None
remote_args = ['%action', '%s']
remote_action = None
remote_action_newwin = None
remote_action_newtab = None
background = False
redirect_stdout = True
def _invoke(self, args, remote, autoraise):
raise_opt = []
if remote and self.raise_opts:
# use autoraise argument only for remote invocation
autoraise = int(bool(autoraise))
opt = self.raise_opts[autoraise]
if opt: raise_opt = [opt]
cmdline = [self.name] + raise_opt + args
if remote or self.background:
inout = file(os.devnull, "r+")
else:
# for TTY browsers, we need stdin/out
inout = None
# if possible, put browser in separate process group, so
# keyboard interrupts don't affect browser as well as Python
setsid = getattr(os, 'setsid', None)
if not setsid:
setsid = getattr(os, 'setpgrp', None)
p = subprocess.Popen(cmdline, close_fds=True, stdin=inout,
stdout=(self.redirect_stdout and inout or None),
stderr=inout, preexec_fn=setsid)
if remote:
# wait five secons. If the subprocess is not finished, the
# remote invocation has (hopefully) started a new instance.
time.sleep(1)
rc = p.poll()
if rc is None:
time.sleep(4)
rc = p.poll()
if rc is None:
return True
# if remote call failed, open() will try direct invocation
return not rc
elif self.background:
if p.poll() is None:
return True
else:
return False
else:
return not p.wait()
def open(self, url, new=0, autoraise=1):
if new == 0:
action = self.remote_action
elif new == 1:
action = self.remote_action_newwin
elif new == 2:
if self.remote_action_newtab is None:
action = self.remote_action_newwin
else:
action = self.remote_action_newtab
else:
raise Error("Bad 'new' parameter to open(); " +
"expected 0, 1, or 2, got %s" % new)
args = [arg.replace("%s", url).replace("%action", action)
for arg in self.remote_args]
success = self._invoke(args, True, autoraise)
if not success:
# remote invocation failed, try straight way
args = [arg.replace("%s", url) for arg in self.args]
return self._invoke(args, False, False)
else:
return True
class Mozilla(UnixBrowser):
"""Launcher class for Mozilla/Netscape browsers."""
raise_opts = ["-noraise", "-raise"]
remote_args = ['-remote', 'openURL(%s%action)']
remote_action = ""
remote_action_newwin = ",new-window"
remote_action_newtab = ",new-tab"
background = True
Netscape = Mozilla
class Galeon(UnixBrowser):
"""Launcher class for Galeon/Epiphany browsers."""
raise_opts = ["-noraise", ""]
remote_args = ['%action', '%s']
remote_action = "-n"
remote_action_newwin = "-w"
background = True
class Opera(UnixBrowser):
"Launcher class for Opera browser."
raise_opts = ["", "-raise"]
remote_args = ['-remote', 'openURL(%s%action)']
remote_action = ""
remote_action_newwin = ",new-window"
remote_action_newtab = ",new-page"
background = True
class Elinks(UnixBrowser):
"Launcher class for Elinks browsers."
remote_args = ['-remote', 'openURL(%s%action)']
remote_action = ""
remote_action_newwin = ",new-window"
remote_action_newtab = ",new-tab"
background = False
# elinks doesn't like its stdout to be redirected -
# it uses redirected stdout as a signal to do -dump
redirect_stdout = False
class Konqueror(BaseBrowser):
"""Controller for the KDE File Manager (kfm, or Konqueror).
See the output of ``kfmclient --commands``
for more information on the Konqueror remote-control interface.
"""
def open(self, url, new=0, autoraise=1):
# XXX Currently I know no way to prevent KFM from opening a new win.
if new == 2:
action = "newTab"
else:
action = "openURL"
devnull = file(os.devnull, "r+")
# if possible, put browser in separate process group, so
# keyboard interrupts don't affect browser as well as Python
setsid = getattr(os, 'setsid', None)
if not setsid:
setsid = getattr(os, 'setpgrp', None)
try:
p = subprocess.Popen(["kfmclient", action, url],
close_fds=True, stdin=devnull,
stdout=devnull, stderr=devnull)
except OSError:
# fall through to next variant
pass
else:
p.wait()
# kfmclient's return code unfortunately has no meaning as it seems
return True
try:
p = subprocess.Popen(["konqueror", "--silent", url],
close_fds=True, stdin=devnull,
stdout=devnull, stderr=devnull,
preexec_fn=setsid)
except OSError:
# fall through to next variant
pass
else:
if p.poll() is None:
# Should be running now.
return True
try:
p = subprocess.Popen(["kfm", "-d", url],
close_fds=True, stdin=devnull,
stdout=devnull, stderr=devnull,
preexec_fn=setsid)
except OSError:
return False
else:
return (p.poll() is None)
class Grail(BaseBrowser):
# There should be a way to maintain a connection to Grail, but the
# Grail remote control protocol doesn't really allow that at this
# point. It probably never will!
def _find_grail_rc(self):
import glob
import pwd
import socket
import tempfile
tempdir = os.path.join(tempfile.gettempdir(),
".grail-unix")
user = pwd.getpwuid(os.getuid())[0]
filename = os.path.join(tempdir, user + "-*")
maybes = glob.glob(filename)
if not maybes:
return None
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
for fn in maybes:
# need to PING each one until we find one that's live
try:
s.connect(fn)
except socket.error:
# no good; attempt to clean it out, but don't fail:
try:
os.unlink(fn)
except IOError:
pass
else:
return s
def _remote(self, action):
s = self._find_grail_rc()
if not s:
return 0
s.send(action)
s.close()
return 1
def open(self, url, new=0, autoraise=1):
if new:
ok = self._remote("LOADNEW " + url)
else:
ok = self._remote("LOAD " + url)
return ok
#
# Platform support for Unix
#
# These are the right tests because all these Unix browsers require either
# a console terminal or an X display to run.
def register_X_browsers():
# The default GNOME browser
if "GNOME_DESKTOP_SESSION_ID" in os.environ and _iscommand("gnome-open"):
register("gnome-open", None, BackgroundBrowser("gnome-open"))
# The default KDE browser
if "KDE_FULL_SESSION" in os.environ and _iscommand("kfmclient"):
register("kfmclient", Konqueror, Konqueror("kfmclient"))
# The Mozilla/Netscape browsers
for browser in ("mozilla-firefox", "firefox",
"mozilla-firebird", "firebird",
"seamonkey", "mozilla", "netscape"):
if _iscommand(browser):
register(browser, None, Mozilla(browser))
# Konqueror/kfm, the KDE browser.
if _iscommand("kfm"):
register("kfm", Konqueror, Konqueror("kfm"))
elif _iscommand("konqueror"):
register("konqueror", Konqueror, Konqueror("konqueror"))
# Gnome's Galeon and Epiphany
for browser in ("galeon", "epiphany"):
if _iscommand(browser):
register(browser, None, Galeon(browser))
# Skipstone, another Gtk/Mozilla based browser
if _iscommand("skipstone"):
register("skipstone", None, BackgroundBrowser("skipstone"))
# Opera, quite popular
if _iscommand("opera"):
register("opera", None, Opera("opera"))
# Next, Mosaic -- old but still in use.
if _iscommand("mosaic"):
register("mosaic", None, BackgroundBrowser("mosaic"))
# Grail, the Python browser. Does anybody still use it?
if _iscommand("grail"):
register("grail", Grail, None)
# Prefer X browsers if present
if os.environ.get("DISPLAY"):
register_X_browsers()
# Also try console browsers
if os.environ.get("TERM"):
# The Links/elinks browsers <http://artax.karlin.mff.cuni.cz/~mikulas/links/>
if _iscommand("links"):
register("links", None, GenericBrowser("links"))
if _iscommand("elinks"):
register("elinks", None, Elinks("elinks"))
# The Lynx browser <http://lynx.isc.org/>, <http://lynx.browser.org/>
if _iscommand("lynx"):
register("lynx", None, GenericBrowser("lynx"))
# The w3m browser <http://w3m.sourceforge.net/>
if _iscommand("w3m"):
register("w3m", None, GenericBrowser("w3m"))
#
# Platform support for Windows
#
if sys.platform[:3] == "win":
class WindowsDefault(BaseBrowser):
def open(self, url, new=0, autoraise=1):
try:
os.startfile(url)
except WindowsError:
# [Error 22] No application is associated with the specified
# file for this operation: '<URL>'
return False
else:
return True
_tryorder = []
_browsers = {}
# First try to use the default Windows browser
register("windows-default", WindowsDefault)
# Detect some common Windows browsers, fallback to IE
iexplore = os.path.join(os.environ.get("PROGRAMFILES", "C:\\Program Files"),
"Internet Explorer\\IEXPLORE.EXE")
for browser in ("firefox", "firebird", "seamonkey", "mozilla",
"netscape", "opera", iexplore):
if _iscommand(browser):
register(browser, None, BackgroundBrowser(browser))
#
# Platform support for MacOS
#
try:
import ic
except ImportError:
pass
else:
class InternetConfig(BaseBrowser):
def open(self, url, new=0, autoraise=1):
ic.launchurl(url)
return True # Any way to get status?
register("internet-config", InternetConfig, update_tryorder=-1)
if sys.platform == 'darwin':
# Adapted from patch submitted to SourceForge by Steven J. Burr
class MacOSX(BaseBrowser):
"""Launcher class for Aqua browsers on Mac OS X
Optionally specify a browser name on instantiation. Note that this
will not work for Aqua browsers if the user has moved the application
package after installation.
If no browser is specified, the default browser, as specified in the
Internet System Preferences panel, will be used.
"""
def __init__(self, name):
self.name = name
def open(self, url, new=0, autoraise=1):
assert "'" not in url
# hack for local urls
if not ':' in url:
url = 'file:'+url
# new must be 0 or 1
new = int(bool(new))
if self.name == "default":
# User called open, open_new or get without a browser parameter
script = 'open location "%s"' % url.replace('"', '%22') # opens in default browser
else:
# User called get and chose a browser
if self.name == "OmniWeb":
toWindow = ""
else:
# Include toWindow parameter of OpenURL command for browsers
# that support it. 0 == new window; -1 == existing
toWindow = "toWindow %d" % (new - 1)
cmd = 'OpenURL "%s"' % url.replace('"', '%22')
script = '''tell application "%s"
activate
%s %s
end tell''' % (self.name, cmd, toWindow)
# Open pipe to AppleScript through osascript command
osapipe = os.popen("osascript", "w")
if osapipe is None:
return False
# Write script to osascript's stdin
osapipe.write(script)
rc = osapipe.close()
return not rc
# Don't clear _tryorder or _browsers since OS X can use above Unix support
# (but we prefer using the OS X specific stuff)
register("MacOSX", None, MacOSX('default'), -1)
#
# Platform support for OS/2
#
if sys.platform[:3] == "os2" and _iscommand("netscape"):
_tryorder = []
_browsers = {}
register("os2netscape", None,
GenericBrowser(["start", "netscape", "%s"]), -1)
# OK, now that we know what the default preference orders for each
# platform are, allow user to override them with the BROWSER variable.
if "BROWSER" in os.environ:
_userchoices = os.environ["BROWSER"].split(os.pathsep)
_userchoices.reverse()
# Treat choices in same way as if passed into get() but do register
# and prepend to _tryorder
for cmdline in _userchoices:
if cmdline != '':
cmd = _synthesize(cmdline, -1)
if cmd[1] is None:
register(cmdline, None, GenericBrowser(cmdline), -1)
cmdline = None # to make del work if _userchoices was empty
del cmdline
del _userchoices
# what to do if _tryorder is now empty?
def main():
import getopt
usage = """Usage: %s [-n | -t] url
-n: open new window
-t: open new tab""" % sys.argv[0]
try:
opts, args = getopt.getopt(sys.argv[1:], 'ntd')
except getopt.error, msg:
print >>sys.stderr, msg
print >>sys.stderr, usage
sys.exit(1)
new_win = 0
for o, a in opts:
if o == '-n': new_win = 1
elif o == '-t': new_win = 2
if len(args) != 1:
print >>sys.stderr, usage
sys.exit(1)
url = args[0]
open(url, new_win)
print "\a"
if __name__ == "__main__":
main()
| mit | -3,437,311,423,236,437,000 | 30.819005 | 98 | 0.548601 | false |
cloudera/hue | apps/sqoop/src/sqoop/api/link.py | 2 | 5885 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import json
import logging
import socket
import sys
from django.utils.encoding import smart_str
from sqoop import client, conf
from sqoop.client.exception import SqoopException
from sqoop.api.decorators import get_link_or_exception
from desktop.lib.django_util import JsonResponse
from desktop.lib.exceptions import StructuredException
from desktop.lib.rest.http_client import RestException
from sqoop.api.exception import handle_rest_exception
from sqoop.api.utils import list_to_dict
from django.views.decorators.cache import never_cache
if sys.version_info[0] > 2:
from django.utils.translation import gettext as _
else:
from django.utils.translation import ugettext as _
__all__ = ['get_links', 'create_link', 'update_link', 'link', 'links', 'link_clone', 'link_delete']
LOG = logging.getLogger(__name__)
@never_cache
def get_links(request):
response = {
'status': 0,
'errors': None,
'links': []
}
try:
c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE, ssl_cert_ca_verify=conf.SSL_CERT_CA_VERIFY.get())
response['links'] = list_to_dict(c.get_links())
except RestException as e:
response.update(handle_rest_exception(e, _('Could not get links.')))
return JsonResponse(response)
@never_cache
def create_link(request):
response = {
'status': 0,
'errors': None,
'link': None
}
if 'link' not in request.POST:
raise StructuredException(code="INVALID_REQUEST_ERROR", message=_('Error saving link'), data={'errors': 'Link is missing.'}, error_code=400)
d = json.loads(smart_str(request.POST.get('link')))
link = client.Link.from_dict(d)
try:
c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE, ssl_cert_ca_verify=conf.SSL_CERT_CA_VERIFY.get())
response['link'] = c.create_link(link).to_dict()
except RestException as e:
response.update(handle_rest_exception(e, _('Could not create link.')))
except SqoopException as e:
response['status'] = 100
response['errors'] = e.to_dict()
return JsonResponse(response)
@never_cache
def update_link(request, link):
response = {
'status': 0,
'errors': None,
'link': None
}
if 'link' not in request.POST:
raise StructuredException(code="INVALID_REQUEST_ERROR", message=_('Error saving link'), data={'errors': 'Link is missing.'}, error_code=400)
link.update_from_dict(json.loads(smart_str(request.POST.get('link'))))
try:
c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE, ssl_cert_ca_verify=conf.SSL_CERT_CA_VERIFY.get())
response['link'] = c.update_link(link).to_dict()
except RestException as e:
response.update(handle_rest_exception(e, _('Could not update link.')))
except SqoopException as e:
response['status'] = 100
response['errors'] = e.to_dict()
return JsonResponse(response)
@never_cache
def links(request):
if request.method == 'GET':
return get_links(request)
elif request.method == 'POST':
return create_link(request)
else:
raise StructuredException(code="INVALID_METHOD", message=_('GET or POST request required.'), error_code=405)
@never_cache
@get_link_or_exception()
def link(request, link):
response = {
'status': 0,
'errors': None,
'link': None
}
if request.method == 'GET':
response['link'] = link.to_dict()
return JsonResponse(response)
elif request.method == 'POST':
return update_link(request, link)
else:
raise StructuredException(code="INVALID_METHOD", message=_('GET or POST request required.'), error_code=405)
@never_cache
@get_link_or_exception()
def link_clone(request, link):
if request.method != 'POST':
raise StructuredException(code="INVALID_METHOD", message=_('POST request required.'), error_code=405)
response = {
'status': 0,
'errors': None,
'link': None
}
link.id = -1
link.name = '%s-copy' % link.name
try:
c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE, ssl_cert_ca_verify=conf.SSL_CERT_CA_VERIFY.get())
response['link'] = c.create_link(link).to_dict()
except RestException as e:
response.update(handle_rest_exception(e, _('Could not clone link.')))
except SqoopException as e:
response['status'] = 100
response['errors'] = e.to_dict()
return JsonResponse(response)
@never_cache
@get_link_or_exception()
def link_delete(request, link):
if request.method != 'POST':
raise StructuredException(code="INVALID_METHOD", message=_('POST request required.'), error_code=405)
response = {
'status': 0,
'errors': None
}
try:
c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE, ssl_cert_ca_verify=conf.SSL_CERT_CA_VERIFY.get())
c.delete_link(link)
except RestException as e:
response.update(handle_rest_exception(e, _('Could not delete link.')))
except SqoopException as e:
response['status'] = 100
response['errors'] = e.to_dict()
return JsonResponse(response)
| apache-2.0 | -6,707,982,355,164,739,000 | 32.628571 | 145 | 0.704333 | false |
gioman/QGIS | python/plugins/processing/gui/HistoryDialog.py | 1 | 5564 | # -*- coding: utf-8 -*-
"""
***************************************************************************
HistoryDialog.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.PyQt import uic
from qgis.PyQt.QtCore import Qt
from qgis.PyQt.QtWidgets import QAction, QPushButton, QDialogButtonBox, QStyle, QMessageBox, QFileDialog, QMenu, QTreeWidgetItem
from qgis.PyQt.QtGui import QIcon
from processing.gui import TestTools
from processing.core.ProcessingLog import ProcessingLog
pluginPath = os.path.split(os.path.dirname(__file__))[0]
WIDGET, BASE = uic.loadUiType(
os.path.join(pluginPath, 'ui', 'DlgHistory.ui'))
class HistoryDialog(BASE, WIDGET):
def __init__(self):
super(HistoryDialog, self).__init__(None)
self.setupUi(self)
self.groupIcon = QIcon()
self.groupIcon.addPixmap(self.style().standardPixmap(
QStyle.SP_DirClosedIcon), QIcon.Normal, QIcon.Off)
self.groupIcon.addPixmap(self.style().standardPixmap(
QStyle.SP_DirOpenIcon), QIcon.Normal, QIcon.On)
self.keyIcon = QIcon()
self.keyIcon.addPixmap(self.style().standardPixmap(QStyle.SP_FileIcon))
self.clearButton = QPushButton(self.tr('Clear'))
self.clearButton.setToolTip(self.tr('Clear history'))
self.buttonBox.addButton(self.clearButton, QDialogButtonBox.ActionRole)
self.saveButton = QPushButton(self.tr('Save As...'))
self.saveButton.setToolTip(self.tr('Save history'))
self.buttonBox.addButton(self.saveButton, QDialogButtonBox.ActionRole)
self.tree.doubleClicked.connect(self.executeAlgorithm)
self.tree.currentItemChanged.connect(self.changeText)
self.clearButton.clicked.connect(self.clearLog)
self.saveButton.clicked.connect(self.saveLog)
self.tree.setContextMenuPolicy(Qt.CustomContextMenu)
self.tree.customContextMenuRequested.connect(self.showPopupMenu)
self.fillTree()
def clearLog(self):
reply = QMessageBox.question(self,
self.tr('Confirmation'),
self.tr('Are you sure you want to clear the history?'),
QMessageBox.Yes | QMessageBox.No,
QMessageBox.No
)
if reply == QMessageBox.Yes:
ProcessingLog.clearLog()
self.fillTree()
def saveLog(self):
fileName, filter = QFileDialog.getSaveFileName(self,
self.tr('Save file'), '.', self.tr('Log files (*.log *.LOG)'))
if fileName == '':
return
if not fileName.lower().endswith('.log'):
fileName += '.log'
ProcessingLog.saveLog(fileName)
def fillTree(self):
self.tree.clear()
entries = ProcessingLog.getLogEntries()
groupItem = QTreeWidgetItem()
groupItem.setText(0, 'ALGORITHM')
groupItem.setIcon(0, self.groupIcon)
for entry in entries:
item = TreeLogEntryItem(entry, True)
item.setIcon(0, self.keyIcon)
groupItem.insertChild(0, item)
self.tree.addTopLevelItem(groupItem)
def executeAlgorithm(self):
item = self.tree.currentItem()
if isinstance(item, TreeLogEntryItem):
if item.isAlg:
script = 'import processing\n'
script += item.entry.text.replace('run(', 'runAndLoadResults(')
exec(script)
def changeText(self):
item = self.tree.currentItem()
if isinstance(item, TreeLogEntryItem):
self.text.setText(item.entry.text.replace('|', '\n'))
def createTest(self):
item = self.tree.currentItem()
if isinstance(item, TreeLogEntryItem):
if item.isAlg:
TestTools.createTest(item.entry.text)
def showPopupMenu(self, point):
item = self.tree.currentItem()
if isinstance(item, TreeLogEntryItem):
if item.isAlg:
popupmenu = QMenu()
createTestAction = QAction(self.tr('Create test'), self.tree)
createTestAction.triggered.connect(self.createTest)
popupmenu.addAction(createTestAction)
popupmenu.exec_(self.tree.mapToGlobal(point))
class TreeLogEntryItem(QTreeWidgetItem):
def __init__(self, entry, isAlg):
QTreeWidgetItem.__init__(self)
self.entry = entry
self.isAlg = isAlg
self.setText(0, '[' + entry.date + '] ' + entry.text.split('|')[0])
| gpl-2.0 | 2,202,754,153,261,598,700 | 37.109589 | 128 | 0.563623 | false |
psci2195/espresso-ffans | testsuite/python/interactions_non-bonded.py | 1 | 27466 | #
# Copyright (C) 2013-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import espressomd
import numpy as np
import unittest as ut
import unittest_decorators as utx
import tests_common
class InteractionsNonBondedTest(ut.TestCase):
system = espressomd.System(box_l=[1.0, 1.0, 1.0])
box_l = 10.
start_pos = np.random.rand(3) * box_l
axis = np.random.rand(3)
axis /= np.linalg.norm(axis)
step = axis * 0.01
step_width = np.linalg.norm(step)
def setUp(self):
self.system.box_l = [self.box_l] * 3
self.system.cell_system.skin = 0.
self.system.time_step = .1
self.system.part.add(id=0, pos=self.start_pos, type=0)
self.system.part.add(id=1, pos=self.start_pos, type=0)
def tearDown(self):
self.system.non_bonded_inter.reset()
self.system.part.clear()
# Required, since assertAlmostEqual does NOT check significant places
def assertFractionAlmostEqual(self, a, b, **args):
if abs(b) < 1E-8:
self.assertAlmostEqual(a, b, **args)
else:
self.assertAlmostEqual(a / b, 1., **args)
def assertItemsFractionAlmostEqual(self, a, b):
for i, ai in enumerate(a):
self.assertFractionAlmostEqual(ai, b[i])
#
# Tests
#
# Test Generic Lennard-Jones Potential
@utx.skipIfMissingFeatures("LENNARD_JONES_GENERIC")
def test_lj_generic(self):
lj_eps = 2.12
lj_sig = 1.37
lj_cut = 2.122
lj_off = 0.185
lj_b1 = 4.22
lj_b2 = 3.63
lj_e1 = 10.32
lj_e2 = 5.81
lj_shift = -0.13
self.system.non_bonded_inter[0, 0].generic_lennard_jones.set_params(
epsilon=lj_eps, sigma=lj_sig, cutoff=lj_cut, offset=lj_off,
b1=lj_b1, b2=lj_b2, e1=lj_e1, e2=lj_e2, shift=lj_shift)
E_ref = tests_common.lj_generic_potential(
r=np.arange(1, 232) * self.step_width, eps=lj_eps, sig=lj_sig,
cutoff=lj_cut, offset=lj_off, b1=lj_b1, b2=lj_b2, e1=lj_e1,
e2=lj_e2, shift=lj_shift)
for i in range(231):
self.system.part[1].pos = self.system.part[1].pos + self.step
self.system.integrator.run(recalc_forces=True, steps=0)
# Calculate energies
E_sim = self.system.analysis.energy()["non_bonded"]
# Calculate forces
f0_sim = self.system.part[0].f
f1_sim = self.system.part[1].f
f1_ref = self.axis * tests_common.lj_generic_force(
espressomd, r=(i + 1) * self.step_width, eps=lj_eps, sig=lj_sig,
cutoff=lj_cut, offset=lj_off, b1=lj_b1, b2=lj_b2, e1=lj_e1,
e2=lj_e2)
# Check that energies match, ...
self.assertFractionAlmostEqual(E_sim, E_ref[i])
# force equals minus the counter-force ...
self.assertTrue((f0_sim == -f1_sim).all())
# and has correct value.
self.assertItemsFractionAlmostEqual(f1_sim, f1_ref)
self.system.non_bonded_inter[0, 0].generic_lennard_jones.set_params(
epsilon=0.)
# Test WCA Potential
@utx.skipIfMissingFeatures("WCA")
def test_wca(self):
wca_eps = 2.12
wca_sig = 1.37
wca_cutoff = wca_sig * 2.**(1. / 6.)
wca_shift = -((wca_sig / wca_cutoff)**12 - (wca_sig / wca_cutoff)**6)
self.system.non_bonded_inter[0, 0].wca.set_params(epsilon=wca_eps,
sigma=wca_sig)
E_ref = tests_common.lj_generic_potential(
r=np.arange(1, 232) * self.step_width, eps=wca_eps, sig=wca_sig,
cutoff=wca_cutoff, shift=4. * wca_shift)
for i in range(231):
self.system.part[1].pos = self.system.part[1].pos + self.step
self.system.integrator.run(recalc_forces=True, steps=0)
# Calculate energies
E_sim = self.system.analysis.energy()["non_bonded"]
# Calculate forces
f0_sim = self.system.part[0].f
f1_sim = self.system.part[1].f
f1_ref = self.axis * tests_common.lj_generic_force(
espressomd, r=(i + 1) * self.step_width, eps=wca_eps,
sig=wca_sig, cutoff=wca_cutoff)
# Check that energies match, ...
self.assertFractionAlmostEqual(E_sim, E_ref[i])
# force equals minus the counter-force ...
self.assertTrue((f0_sim == -f1_sim).all())
# and has correct value.
self.assertItemsFractionAlmostEqual(f1_sim, f1_ref)
self.system.non_bonded_inter[0, 0].wca.set_params(epsilon=0., sigma=1.)
# Test Generic Lennard-Jones Softcore Potential
@utx.skipIfMissingFeatures("LJGEN_SOFTCORE")
def test_lj_generic_softcore(self):
lj_eps = 2.12
lj_sig = 1.37
lj_cut = 2.125
lj_off = 0.182
lj_b1 = 6.22
lj_b2 = 3.63
lj_e1 = 13.32
lj_e2 = 3.74
lj_shift = 0.13
lj_delta = 0.1
lj_lam = 0.34
self.system.non_bonded_inter[0, 0].generic_lennard_jones.set_params(
epsilon=lj_eps, sigma=lj_sig, cutoff=lj_cut, offset=lj_off,
b1=lj_b1, b2=lj_b2, e1=lj_e1, e2=lj_e2, shift=lj_shift,
delta=lj_delta, lam=lj_lam)
for i in range(231):
self.system.part[1].pos = self.system.part[1].pos + self.step
self.system.integrator.run(recalc_forces=True, steps=0)
# Calculate energies
E_sim = self.system.analysis.energy()["non_bonded"]
E_ref = tests_common.lj_generic_potential(
r=(i + 1) * self.step_width, eps=lj_eps, sig=lj_sig,
cutoff=lj_cut, offset=lj_off, b1=lj_b1, b2=lj_b2, e1=lj_e1,
e2=lj_e2, shift=lj_shift, delta=lj_delta, lam=lj_lam)
# Calculate forces
f0_sim = self.system.part[0].f
f1_sim = self.system.part[1].f
f1_ref = self.axis * tests_common.lj_generic_force(
espressomd, r=(i + 1) * self.step_width, eps=lj_eps, sig=lj_sig,
cutoff=lj_cut, offset=lj_off, b1=lj_b1, b2=lj_b2, e1=lj_e1,
e2=lj_e2, delta=lj_delta, lam=lj_lam)
# Check that energies match, ...
self.assertFractionAlmostEqual(E_sim, E_ref)
# force equals minus the counter-force ...
self.assertTrue((f0_sim == -f1_sim).all())
# and has correct value.
self.assertItemsFractionAlmostEqual(f1_sim, f1_ref)
self.system.non_bonded_inter[0, 0].generic_lennard_jones.set_params(
epsilon=0.)
# Test Lennard-Jones Potential
@utx.skipIfMissingFeatures("LENNARD_JONES")
def test_lj(self):
lj_eps = 1.92
lj_sig = 1.03
lj_cut = 1.123
lj_shift = 0.92
self.system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=lj_eps, sigma=lj_sig, cutoff=lj_cut, shift=lj_shift)
for i in range(113):
self.system.part[1].pos = self.system.part[1].pos + self.step
self.system.integrator.run(recalc_forces=True, steps=0)
# Calculate energies
E_sim = self.system.analysis.energy()["non_bonded"]
E_ref = tests_common.lj_potential(
(i + 1) * self.step_width, lj_eps, lj_sig, lj_cut,
shift=lj_shift)
# Calculate forces
f0_sim = self.system.part[0].f
f1_sim = self.system.part[1].f
f1_ref = self.axis * \
tests_common.lj_force(espressomd, r=(i + 1) * self.step_width,
eps=lj_eps, sig=lj_sig, cutoff=lj_cut)
# Check that energies match, ...
self.assertFractionAlmostEqual(E_sim, E_ref)
# force equals minus the counter-force ...
self.assertTrue((f0_sim == -f1_sim).all())
# and has correct value.
self.assertItemsFractionAlmostEqual(f1_sim, f1_ref)
self.system.non_bonded_inter[0, 0].lennard_jones.set_params(epsilon=0.)
# Test Lennard-Jones Cosine Potential
@utx.skipIfMissingFeatures("LJCOS")
def test_lj_cos(self):
ljcos_eps = 3.32
ljcos_sig = 0.73
ljcos_cut = 1.523
ljcos_offset = 0.223
self.system.non_bonded_inter[0, 0].lennard_jones_cos.set_params(
epsilon=ljcos_eps, sigma=ljcos_sig, cutoff=ljcos_cut,
offset=ljcos_offset)
for i in range(175):
self.system.part[1].pos = self.system.part[1].pos + self.step
self.system.integrator.run(recalc_forces=True, steps=0)
# Calculate energies
E_sim = self.system.analysis.energy()["non_bonded"]
E_ref = tests_common.lj_cos_potential(
(i + 1) * self.step_width, eps=ljcos_eps, sig=ljcos_sig,
cutoff=ljcos_cut, offset=ljcos_offset)
# Calculate forces
f0_sim = self.system.part[0].f
f1_sim = self.system.part[1].f
f1_ref = self.axis * tests_common.lj_cos_force(
espressomd, (i + 1) * self.step_width, eps=ljcos_eps,
sig=ljcos_sig, cutoff=ljcos_cut, offset=ljcos_offset)
# Check that energies match, ...
self.assertFractionAlmostEqual(E_sim, E_ref)
# force equals minus the counter-force ...
self.assertTrue((f0_sim == -f1_sim).all())
# and has correct value.
self.assertItemsFractionAlmostEqual(f1_sim, f1_ref)
self.system.non_bonded_inter[
0, 0].lennard_jones_cos.set_params(epsilon=0.)
# Test Lennard-Jones Cosine^2 Potential
@utx.skipIfMissingFeatures("LJCOS2")
def test_lj_cos2(self):
ljcos2_eps = 0.31
ljcos2_sig = 0.73
ljcos2_width = 1.523
ljcos2_offset = 0.321
self.system.non_bonded_inter[0, 0].lennard_jones_cos2.set_params(
epsilon=ljcos2_eps, sigma=ljcos2_sig, offset=ljcos2_offset,
width=ljcos2_width)
for i in range(267):
self.system.part[1].pos = self.system.part[1].pos + self.step
self.system.integrator.run(recalc_forces=True, steps=0)
# Calculate energies
E_sim = self.system.analysis.energy()["non_bonded"]
E_ref = tests_common.lj_cos2_potential(
(i + 1) * self.step_width, eps=ljcos2_eps, sig=ljcos2_sig,
offset=ljcos2_offset, width=ljcos2_width)
# Calculate forces
f0_sim = self.system.part[0].f
f1_sim = self.system.part[1].f
f1_ref = self.axis * tests_common.lj_cos2_force(
espressomd, r=(i + 1) * self.step_width, eps=ljcos2_eps,
sig=ljcos2_sig, offset=ljcos2_offset, width=ljcos2_width)
# Check that energies match, ...
self.assertFractionAlmostEqual(E_sim, E_ref)
# force equals minus the counter-force ...
self.assertTrue((f0_sim == -f1_sim).all())
# and has correct value.
self.assertItemsFractionAlmostEqual(f1_sim, f1_ref)
self.system.non_bonded_inter[
0, 0].lennard_jones_cos2.set_params(epsilon=0.)
# Test Smooth-step Potential
@utx.skipIfMissingFeatures("SMOOTH_STEP")
def test_smooth_step(self):
sst_eps = 4.92
sst_sig = 3.03
sst_cut = 1.253
sst_d = 2.52
sst_n = 11
sst_k0 = 2.13
self.system.non_bonded_inter[0, 0].smooth_step.set_params(
eps=sst_eps, sig=sst_sig, cutoff=sst_cut, d=sst_d, n=sst_n,
k0=sst_k0)
for i in range(126):
self.system.part[1].pos = self.system.part[1].pos + self.step
self.system.integrator.run(recalc_forces=True, steps=0)
# Calculate energies
E_sim = self.system.analysis.energy()["non_bonded"]
E_ref = tests_common.smooth_step_potential(
r=(i + 1) * self.step_width, eps=sst_eps, sig=sst_sig,
cutoff=sst_cut, d=sst_d, n=sst_n, k0=sst_k0)
# Calculate forces
f0_sim = self.system.part[0].f
f1_sim = self.system.part[1].f
f1_ref = self.axis * tests_common.smooth_step_force(
r=(i + 1) * self.step_width, eps=sst_eps, sig=sst_sig,
cutoff=sst_cut, d=sst_d, n=sst_n, k0=sst_k0)
# Check that energies match, ...
self.assertFractionAlmostEqual(E_sim, E_ref)
# force equals minus the counter-force ...
self.assertTrue((f0_sim == -f1_sim).all())
# and has correct value.
self.assertItemsFractionAlmostEqual(f1_sim, f1_ref)
self.system.non_bonded_inter[0, 0].smooth_step.set_params(d=0., eps=0.)
# Test BMHTF Potential
@utx.skipIfMissingFeatures("BMHTF_NACL")
def test_bmhtf(self):
bmhtf_a = 3.92
bmhtf_b = 2.43
bmhtf_c = 1.23
bmhtf_d = 3.33
bmhtf_sig = 0.123
bmhtf_cut = 1.253
self.system.non_bonded_inter[0, 0].bmhtf.set_params(
a=bmhtf_a, b=bmhtf_b, c=bmhtf_c, d=bmhtf_d, sig=bmhtf_sig,
cutoff=bmhtf_cut)
for i in range(126):
self.system.part[1].pos = self.system.part[1].pos + self.step
self.system.integrator.run(recalc_forces=True, steps=0)
# Calculate energies
E_sim = self.system.analysis.energy()["non_bonded"]
E_ref = tests_common.bmhtf_potential(
r=(i + 1) * self.step_width, a=bmhtf_a, b=bmhtf_b, c=bmhtf_c,
d=bmhtf_d, sig=bmhtf_sig, cutoff=bmhtf_cut)
# Calculate forces
f0_sim = self.system.part[0].f
f1_sim = self.system.part[1].f
f1_ref = self.axis * tests_common.bmhtf_force(
r=(i + 1) * self.step_width, a=bmhtf_a, b=bmhtf_b, c=bmhtf_c,
d=bmhtf_d, sig=bmhtf_sig, cutoff=bmhtf_cut)
# Check that energies match, ...
self.assertFractionAlmostEqual(E_sim, E_ref)
# force equals minus the counter-force ...
self.assertTrue((f0_sim == -f1_sim).all())
# and has correct value.
self.assertItemsFractionAlmostEqual(f1_sim, f1_ref)
self.system.non_bonded_inter[0, 0].bmhtf.set_params(a=0., c=0., d=0.)
# Test Morse Potential
@utx.skipIfMissingFeatures("MORSE")
def test_morse(self):
m_eps = 1.92
m_alpha = 3.03
m_cut = 1.253
m_rmin = 0.123
self.system.non_bonded_inter[0, 0].morse.set_params(
eps=m_eps, alpha=m_alpha, cutoff=m_cut, rmin=m_rmin)
for i in range(126):
self.system.part[1].pos = self.system.part[1].pos + self.step
self.system.integrator.run(recalc_forces=True, steps=0)
# Calculate energies
E_sim = self.system.analysis.energy()["non_bonded"]
E_ref = tests_common.morse_potential(
r=(i + 1) * self.step_width, eps=m_eps, alpha=m_alpha,
cutoff=m_cut, rmin=m_rmin)
# Calculate forces
f0_sim = self.system.part[0].f
f1_sim = self.system.part[1].f
f1_ref = self.axis * tests_common.morse_force(
r=(i + 1) * self.step_width, eps=m_eps, alpha=m_alpha,
cutoff=m_cut, rmin=m_rmin)
# Check that energies match, ...
self.assertFractionAlmostEqual(E_sim, E_ref)
# force equals minus the counter-force ...
self.assertTrue((f0_sim == -f1_sim).all())
# and has correct value.
self.assertItemsFractionAlmostEqual(f1_sim, f1_ref)
self.system.non_bonded_inter[0, 0].morse.set_params(eps=0.)
# Test Buckingham Potential
@utx.skipIfMissingFeatures("BUCKINGHAM")
def test_buckingham(self):
b_a = 3.71
b_b = 2.92
b_c = 5.32
b_d = 4.11
b_disc = 1.03
b_cut = 2.253
b_shift = 0.133
self.system.non_bonded_inter[0, 0].buckingham.set_params(
a=b_a, b=b_b, c=b_c, d=b_d, discont=b_disc, cutoff=b_cut,
shift=b_shift)
for i in range(226):
self.system.part[1].pos = self.system.part[1].pos + self.step
self.system.integrator.run(recalc_forces=True, steps=0)
# Calculate energies
E_sim = self.system.analysis.energy()["non_bonded"]
E_ref = tests_common.buckingham_potential(
r=(i + 1) * self.step_width, a=b_a, b=b_b, c=b_c, d=b_d,
discont=b_disc, cutoff=b_cut, shift=b_shift)
# Calculate forces
f0_sim = self.system.part[0].f
f1_sim = self.system.part[1].f
f1_ref = self.axis * tests_common.buckingham_force(
r=(i + 1) * self.step_width, a=b_a, b=b_b, c=b_c, d=b_d,
discont=b_disc, cutoff=b_cut, shift=b_shift)
# Check that energies match, ...
self.assertFractionAlmostEqual(E_sim, E_ref)
# force equals minus the counter-force ...
self.assertTrue((f0_sim == -f1_sim).all())
# and has correct value.
self.assertItemsFractionAlmostEqual(f1_sim, f1_ref)
self.system.non_bonded_inter[
0, 0].buckingham.set_params(a=0., c=0., d=0., shift=0.)
# Test Soft-sphere Potential
@utx.skipIfMissingFeatures("SOFT_SPHERE")
def test_soft_sphere(self):
ss_a = 1.92
ss_n = 3.03
ss_cut = 1.123
ss_off = 0.123
self.system.non_bonded_inter[0, 0].soft_sphere.set_params(
a=ss_a, n=ss_n, cutoff=ss_cut, offset=ss_off)
for i in range(12):
self.system.part[1].pos = self.system.part[1].pos + self.step
for i in range(113):
self.system.part[1].pos = self.system.part[1].pos + self.step
self.system.integrator.run(recalc_forces=True, steps=0)
# Calculate energies
E_sim = self.system.analysis.energy()["non_bonded"]
E_ref = tests_common.soft_sphere_potential(
r=(i + 13) * self.step_width, a=ss_a, n=ss_n, cutoff=ss_cut,
offset=ss_off)
# Calculate forces
f0_sim = self.system.part[0].f
f1_sim = self.system.part[1].f
f1_ref = self.axis * tests_common.soft_sphere_force(
r=(i + 13) * self.step_width, a=ss_a, n=ss_n, cutoff=ss_cut,
offset=ss_off)
# Check that energies match, ...
self.assertFractionAlmostEqual(E_sim, E_ref)
# force equals minus the counter-force ...
self.assertTrue((f0_sim == -f1_sim).all())
# and has correct value.
self.assertItemsFractionAlmostEqual(f1_sim, f1_ref)
self.system.non_bonded_inter[0, 0].soft_sphere.set_params(a=0.)
# Test Hertzian Potential
@utx.skipIfMissingFeatures("HERTZIAN")
def test_hertzian(self):
h_eps = 6.92
h_sig = 2.432
self.system.non_bonded_inter[0, 0].hertzian.set_params(
eps=h_eps, sig=h_sig)
for i in range(244):
self.system.part[1].pos = self.system.part[1].pos + self.step
self.system.integrator.run(recalc_forces=True, steps=0)
# Calculate energies
E_sim = self.system.analysis.energy()["non_bonded"]
E_ref = tests_common.hertzian_potential(
r=(i + 1) * self.step_width, eps=h_eps, sig=h_sig)
# Calculate forces
f0_sim = self.system.part[0].f
f1_sim = self.system.part[1].f
f1_ref = self.axis * tests_common.hertzian_force(
r=(i + 1) * self.step_width, eps=h_eps, sig=h_sig)
# Check that energies match, ...
self.assertFractionAlmostEqual(E_sim, E_ref)
# force equals minus the counter-force ...
self.assertTrue((f0_sim == -f1_sim).all())
# and has correct value.
self.assertItemsFractionAlmostEqual(f1_sim, f1_ref)
self.system.non_bonded_inter[0, 0].hertzian.set_params(eps=0.)
# Test Gaussian Potential
@utx.skipIfMissingFeatures("GAUSSIAN")
def test_gaussian(self):
g_eps = 6.92
g_sig = 4.03
g_cut = 1.243
self.system.non_bonded_inter[0, 0].gaussian.set_params(
eps=g_eps, sig=g_sig, cutoff=g_cut)
for i in range(125):
self.system.part[1].pos = self.system.part[1].pos + self.step
self.system.integrator.run(recalc_forces=True, steps=0)
# Calculate energies
E_sim = self.system.analysis.energy()["non_bonded"]
E_ref = tests_common.gaussian_potential(
r=(i + 1) * self.step_width, eps=g_eps, sig=g_sig, cutoff=g_cut)
# Calculate forces
f0_sim = self.system.part[0].f
f1_sim = self.system.part[1].f
f1_ref = self.axis * tests_common.gaussian_force(
r=(i + 1) * self.step_width, eps=g_eps, sig=g_sig, cutoff=g_cut)
# Check that energies match, ...
self.assertFractionAlmostEqual(E_sim, E_ref)
# force equals minus the counter-force ...
self.assertTrue((f0_sim == -f1_sim).all())
# and has correct value.
self.assertItemsFractionAlmostEqual(f1_sim, f1_ref)
self.system.non_bonded_inter[0, 0].gaussian.set_params(eps=0.)
# Test the Gay-Berne potential and the resulting force and torque
@utx.skipIfMissingFeatures("GAY_BERNE")
def test_gb(self):
# helper function definitions
def gradient(func, x0, dx=1.0e-7):
"""
Approximate the gradient of a function at a point x0
using the two-point central difference formula with spacing 2dx.
Parameters
----------
func: :obj:`function`
function for which the gradient is calculated
x0: (3,) array_like of :obj:`float`
Point in N-dimensional space where the derivatives are calculated
dx: :obj:`float`, optional
Spacing
Returns
-------
(3,) array_like of obj:`float`
the approximated gradient of func at x0
"""
def partial_x(x):
return (func(x0 + x) - func(x0 - x)) / (
2.0 * np.linalg.norm(x))
delta = np.array([dx, 0.0, 0.0])
return np.array([partial_x(np.roll(delta, i)) for i in range(3)])
def setup_system(gb_params):
k_1, k_2, mu, nu, sigma_0, epsilon_0, cut = gb_params
self.system.part.clear()
self.system.part.add(
id=0, pos=(1, 2, 3), rotation=(1, 1, 1), type=0)
self.system.part.add(
id=1, pos=(2.2, 2.1, 2.9), rotation=(1, 1, 1), type=0)
self.system.non_bonded_inter[0, 0].gay_berne.set_params(
sig=sigma_0, cut=cut, eps=epsilon_0, k1=k_1, k2=k_2, mu=mu,
nu=nu)
def advance_and_rotate_part(particle):
particle.pos = particle.pos + self.step
particle.rotate(axis=(1, 2, 3), angle=0.3)
particle.rotate(axis=(1, -2, -4), angle=1.2)
def get_simulation_energy():
return self.system.analysis.energy()["non_bonded"]
def get_reference_energy(gb_params, r, director1, director2):
k_1, k_2, mu, nu, sigma_0, epsilon_0, cut = gb_params
r_cut = r * cut / np.linalg.norm(r)
E_ref = tests_common.gay_berne_potential(
r, director1, director2, epsilon_0, sigma_0, mu, nu, k_1, k_2)
E_ref -= tests_common.gay_berne_potential(
r_cut, director1, director2, epsilon_0, sigma_0, mu, nu,
k_1, k_2)
return E_ref
def get_reference_force(gb_params, r, dir1, dir2):
return -gradient(
lambda x: get_reference_energy(gb_params, x, dir1, dir2),
x0=r, dx=1.0e-7)
def get_reference_torque(gb_params, r, dir1, dir2):
force_in_dir1 = gradient(
lambda x: get_reference_energy(gb_params, r, x, dir2),
x0=dir1, dx=1.0e-7)
return np.cross(-dir1, force_in_dir1)
# actual tests of the gb potential
k_1 = 1.2
k_2 = 2.4
mu = 2.
nu = 5.
sigma_0 = 1.2
epsilon_0 = 0.8
cut = 3.3
gb_params = (k_1, k_2, mu, nu, sigma_0, epsilon_0, cut)
setup_system(gb_params)
p1 = self.system.part[0]
p2 = self.system.part[1]
delta = 1.0e-6
for _ in range(100):
advance_and_rotate_part(p2)
self.system.integrator.run(recalc_forces=True, steps=0)
r = self.system.distance_vec(p1, p2)
director1 = p1.director
director2 = p2.director
# Calc energies
E_sim = get_simulation_energy()
E_ref = get_reference_energy(gb_params, r, director1, director2)
# Test energies
self.assertAlmostEqual(E_sim, E_ref, delta=delta)
# Calc forces
f1_sim = p1.f
f2_sim = p2.f
f2_ref = get_reference_force(gb_params, r, director1, director2)
# Test forces
# force equals minus the counter-force
self.assertTrue((f1_sim == -f2_sim).all())
# compare force to reference force
for i in range(3):
self.assertAlmostEqual(f2_sim[i], f2_ref[i], delta=delta)
# Calc torques
torque1_sim = p1.torque_lab
torque2_sim = p2.torque_lab
torque1_ref = get_reference_torque(
gb_params, r, director1, director2)
torque2_ref = get_reference_torque(
gb_params, r, director2, director1)
# Test torques
for i in range(3):
self.assertAlmostEqual(
torque1_sim[i],
torque1_ref[i],
delta=delta)
self.assertAlmostEqual(
torque2_sim[i],
torque2_ref[i],
delta=delta)
# Test zero energy
self.system.non_bonded_inter[0, 0].gay_berne.set_params(
sig=sigma_0, cut=0, eps=0, k1=k_1, k2=k_2, mu=mu, nu=nu)
self.system.integrator.run(0)
self.assertEqual(self.system.analysis.energy()["non_bonded"], 0.0)
if __name__ == '__main__':
ut.main()
| gpl-3.0 | -3,481,423,916,790,049,000 | 36.368707 | 81 | 0.552501 | false |
syphar/django | tests/modeladmin/tests.py | 7 | 62231 | from __future__ import unicode_literals
from datetime import date
from django import forms
from django.contrib.admin import BooleanFieldListFilter, SimpleListFilter
from django.contrib.admin.models import LogEntry
from django.contrib.admin.options import (
HORIZONTAL, VERTICAL, ModelAdmin, TabularInline,
)
from django.contrib.admin.sites import AdminSite
from django.contrib.admin.widgets import AdminDateWidget, AdminRadioSelect
from django.contrib.auth.models import User
from django.core.checks import Error
from django.forms.models import BaseModelFormSet
from django.forms.widgets import Select
from django.test import SimpleTestCase, TestCase
from django.utils import six
from .models import (
Band, Concert, ValidationTestInlineModel, ValidationTestModel,
)
class MockRequest(object):
pass
class MockSuperUser(object):
def has_perm(self, perm):
return True
request = MockRequest()
request.user = MockSuperUser()
class ModelAdminTests(TestCase):
def setUp(self):
self.band = Band.objects.create(
name='The Doors',
bio='',
sign_date=date(1965, 1, 1),
)
self.site = AdminSite()
# form/fields/fieldsets interaction ##############################
def test_default_fields(self):
ma = ModelAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['name', 'bio', 'sign_date'])
self.assertEqual(list(ma.get_fields(request)), ['name', 'bio', 'sign_date'])
self.assertEqual(list(ma.get_fields(request, self.band)), ['name', 'bio', 'sign_date'])
self.assertIsNone(ma.get_exclude(request, self.band))
def test_default_fieldsets(self):
# fieldsets_add and fieldsets_change should return a special data structure that
# is used in the templates. They should generate the "right thing" whether we
# have specified a custom form, the fields argument, or nothing at all.
#
# Here's the default case. There are no custom form_add/form_change methods,
# no fields argument, and no fieldsets argument.
ma = ModelAdmin(Band, self.site)
self.assertEqual(ma.get_fieldsets(request), [(None, {'fields': ['name', 'bio', 'sign_date']})])
self.assertEqual(ma.get_fieldsets(request, self.band), [(None, {'fields': ['name', 'bio', 'sign_date']})])
def test_get_fieldsets(self):
# Test that get_fieldsets is called when figuring out form fields.
# Refs #18681.
class BandAdmin(ModelAdmin):
def get_fieldsets(self, request, obj=None):
return [(None, {'fields': ['name', 'bio']})]
ma = BandAdmin(Band, self.site)
form = ma.get_form(None)
self.assertEqual(form._meta.fields, ['name', 'bio'])
class InlineBandAdmin(TabularInline):
model = Concert
fk_name = 'main_band'
can_delete = False
def get_fieldsets(self, request, obj=None):
return [(None, {'fields': ['day', 'transport']})]
ma = InlineBandAdmin(Band, self.site)
form = ma.get_formset(None).form
self.assertEqual(form._meta.fields, ['day', 'transport'])
def test_lookup_allowed_allows_nonexistent_lookup(self):
"""
Ensure that a lookup_allowed allows a parameter
whose field lookup doesn't exist.
Refs #21129.
"""
class BandAdmin(ModelAdmin):
fields = ['name']
ma = BandAdmin(Band, self.site)
self.assertTrue(ma.lookup_allowed('name__nonexistent', 'test_value'))
def test_field_arguments(self):
# If we specify the fields argument, fieldsets_add and fieldsets_change should
# just stick the fields into a formsets structure and return it.
class BandAdmin(ModelAdmin):
fields = ['name']
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_fields(request)), ['name'])
self.assertEqual(list(ma.get_fields(request, self.band)), ['name'])
self.assertEqual(ma.get_fieldsets(request), [(None, {'fields': ['name']})])
self.assertEqual(ma.get_fieldsets(request, self.band), [(None, {'fields': ['name']})])
def test_field_arguments_restricted_on_form(self):
# If we specify fields or fieldsets, it should exclude fields on the Form class
# to the fields specified. This may cause errors to be raised in the db layer if
# required model fields aren't in fields/fieldsets, but that's preferable to
# ghost errors where you have a field in your Form class that isn't being
# displayed because you forgot to add it to fields/fieldsets
# Using `fields`.
class BandAdmin(ModelAdmin):
fields = ['name']
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['name'])
self.assertEqual(list(ma.get_form(request, self.band).base_fields), ['name'])
# Using `fieldsets`.
class BandAdmin(ModelAdmin):
fieldsets = [(None, {'fields': ['name']})]
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['name'])
self.assertEqual(list(ma.get_form(request, self.band).base_fields), ['name'])
# Using `exclude`.
class BandAdmin(ModelAdmin):
exclude = ['bio']
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['name', 'sign_date'])
# You can also pass a tuple to `exclude`.
class BandAdmin(ModelAdmin):
exclude = ('bio',)
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['name', 'sign_date'])
# Using `fields` and `exclude`.
class BandAdmin(ModelAdmin):
fields = ['name', 'bio']
exclude = ['bio']
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['name'])
def test_custom_form_meta_exclude_with_readonly(self):
"""
Ensure that the custom ModelForm's `Meta.exclude` is respected when
used in conjunction with `ModelAdmin.readonly_fields` and when no
`ModelAdmin.exclude` is defined.
Refs #14496.
"""
# First, with `ModelAdmin` -----------------------
class AdminBandForm(forms.ModelForm):
class Meta:
model = Band
exclude = ['bio']
class BandAdmin(ModelAdmin):
readonly_fields = ['name']
form = AdminBandForm
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['sign_date'])
# Then, with `InlineModelAdmin` -----------------
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ['day']
class ConcertInline(TabularInline):
readonly_fields = ['transport']
form = AdminConcertForm
fk_name = 'main_band'
model = Concert
class BandAdmin(ModelAdmin):
inlines = [
ConcertInline
]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['main_band', 'opening_band', 'id', 'DELETE'])
def test_custom_formfield_override_readonly(self):
class AdminBandForm(forms.ModelForm):
name = forms.CharField()
class Meta:
exclude = tuple()
model = Band
class BandAdmin(ModelAdmin):
form = AdminBandForm
readonly_fields = ['name']
ma = BandAdmin(Band, self.site)
# `name` shouldn't appear in base_fields because it's part of
# readonly_fields.
self.assertEqual(
list(ma.get_form(request).base_fields),
['bio', 'sign_date']
)
# But it should appear in get_fields()/fieldsets() so it can be
# displayed as read-only.
self.assertEqual(
list(ma.get_fields(request)),
['bio', 'sign_date', 'name']
)
self.assertEqual(
list(ma.get_fieldsets(request)),
[(None, {'fields': ['bio', 'sign_date', 'name']})]
)
def test_custom_form_meta_exclude(self):
"""
Ensure that the custom ModelForm's `Meta.exclude` is overridden if
`ModelAdmin.exclude` or `InlineModelAdmin.exclude` are defined.
Refs #14496.
"""
# First, with `ModelAdmin` -----------------------
class AdminBandForm(forms.ModelForm):
class Meta:
model = Band
exclude = ['bio']
class BandAdmin(ModelAdmin):
exclude = ['name']
form = AdminBandForm
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['bio', 'sign_date'])
# Then, with `InlineModelAdmin` -----------------
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ['day']
class ConcertInline(TabularInline):
exclude = ['transport']
form = AdminConcertForm
fk_name = 'main_band'
model = Concert
class BandAdmin(ModelAdmin):
inlines = [
ConcertInline
]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['main_band', 'opening_band', 'day', 'id', 'DELETE']
)
def test_overriding_get_exclude(self):
class BandAdmin(ModelAdmin):
def get_exclude(self, request, obj=None):
return ['name']
self.assertEqual(
list(BandAdmin(Band, self.site).get_form(request).base_fields),
['bio', 'sign_date']
)
def test_get_exclude_overrides_exclude(self):
class BandAdmin(ModelAdmin):
exclude = ['bio']
def get_exclude(self, request, obj=None):
return ['name']
self.assertEqual(
list(BandAdmin(Band, self.site).get_form(request).base_fields),
['bio', 'sign_date']
)
def test_get_exclude_takes_obj(self):
class BandAdmin(ModelAdmin):
def get_exclude(self, request, obj=None):
if obj:
return ['sign_date']
return ['name']
self.assertEqual(
list(BandAdmin(Band, self.site).get_form(request, self.band).base_fields),
['name', 'bio']
)
def test_custom_form_validation(self):
# If we specify a form, it should use it allowing custom validation to work
# properly. This won't, however, break any of the admin widgets or media.
class AdminBandForm(forms.ModelForm):
delete = forms.BooleanField()
class BandAdmin(ModelAdmin):
form = AdminBandForm
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['name', 'bio', 'sign_date', 'delete'])
self.assertEqual(type(ma.get_form(request).base_fields['sign_date'].widget), AdminDateWidget)
def test_form_exclude_kwarg_override(self):
"""
Ensure that the `exclude` kwarg passed to `ModelAdmin.get_form()`
overrides all other declarations. Refs #8999.
"""
class AdminBandForm(forms.ModelForm):
class Meta:
model = Band
exclude = ['name']
class BandAdmin(ModelAdmin):
exclude = ['sign_date']
form = AdminBandForm
def get_form(self, request, obj=None, **kwargs):
kwargs['exclude'] = ['bio']
return super(BandAdmin, self).get_form(request, obj, **kwargs)
ma = BandAdmin(Band, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['name', 'sign_date'])
def test_formset_exclude_kwarg_override(self):
"""
Ensure that the `exclude` kwarg passed to `InlineModelAdmin.get_formset()`
overrides all other declarations. Refs #8999.
"""
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ['day']
class ConcertInline(TabularInline):
exclude = ['transport']
form = AdminConcertForm
fk_name = 'main_band'
model = Concert
def get_formset(self, request, obj=None, **kwargs):
kwargs['exclude'] = ['opening_band']
return super(ConcertInline, self).get_formset(request, obj, **kwargs)
class BandAdmin(ModelAdmin):
inlines = [
ConcertInline
]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['main_band', 'day', 'transport', 'id', 'DELETE'])
def test_formset_overriding_get_exclude_with_form_fields(self):
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
fields = ['main_band', 'opening_band', 'day', 'transport']
class ConcertInline(TabularInline):
form = AdminConcertForm
fk_name = 'main_band'
model = Concert
def get_exclude(self, request, obj=None):
return ['opening_band']
class BandAdmin(ModelAdmin):
inlines = [ConcertInline]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['main_band', 'day', 'transport', 'id', 'DELETE']
)
def test_formset_overriding_get_exclude_with_form_exclude(self):
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ['day']
class ConcertInline(TabularInline):
form = AdminConcertForm
fk_name = 'main_band'
model = Concert
def get_exclude(self, request, obj=None):
return ['opening_band']
class BandAdmin(ModelAdmin):
inlines = [ConcertInline]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['main_band', 'day', 'transport', 'id', 'DELETE']
)
def test_queryset_override(self):
# If we need to override the queryset of a ModelChoiceField in our custom form
# make sure that RelatedFieldWidgetWrapper doesn't mess that up.
band2 = Band(name='The Beatles', bio='', sign_date=date(1962, 1, 1))
band2.save()
class ConcertAdmin(ModelAdmin):
pass
ma = ConcertAdmin(Concert, self.site)
form = ma.get_form(request)()
self.assertHTMLEqual(
str(form["main_band"]),
'<div class="related-widget-wrapper">'
'<select name="main_band" id="id_main_band" required>'
'<option value="" selected>---------</option>'
'<option value="%d">The Beatles</option>'
'<option value="%d">The Doors</option>'
'</select></div>' % (band2.id, self.band.id)
)
class AdminConcertForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(AdminConcertForm, self).__init__(*args, **kwargs)
self.fields["main_band"].queryset = Band.objects.filter(name='The Doors')
class ConcertAdminWithForm(ModelAdmin):
form = AdminConcertForm
ma = ConcertAdminWithForm(Concert, self.site)
form = ma.get_form(request)()
self.assertHTMLEqual(
str(form["main_band"]),
'<div class="related-widget-wrapper">'
'<select name="main_band" id="id_main_band" required>'
'<option value="" selected>---------</option>'
'<option value="%d">The Doors</option>'
'</select></div>' % self.band.id
)
def test_regression_for_ticket_15820(self):
"""
Ensure that `obj` is passed from `InlineModelAdmin.get_fieldsets()` to
`InlineModelAdmin.get_formset()`.
"""
class CustomConcertForm(forms.ModelForm):
class Meta:
model = Concert
fields = ['day']
class ConcertInline(TabularInline):
model = Concert
fk_name = 'main_band'
def get_formset(self, request, obj=None, **kwargs):
if obj:
kwargs['form'] = CustomConcertForm
return super(ConcertInline, self).get_formset(request, obj, **kwargs)
class BandAdmin(ModelAdmin):
inlines = [
ConcertInline
]
Concert.objects.create(main_band=self.band, opening_band=self.band, day=1)
ma = BandAdmin(Band, self.site)
inline_instances = ma.get_inline_instances(request)
fieldsets = list(inline_instances[0].get_fieldsets(request))
self.assertEqual(fieldsets[0][1]['fields'], ['main_band', 'opening_band', 'day', 'transport'])
fieldsets = list(inline_instances[0].get_fieldsets(request, inline_instances[0].model))
self.assertEqual(fieldsets[0][1]['fields'], ['day'])
# radio_fields behavior ###########################################
def test_default_foreign_key_widget(self):
# First, without any radio_fields specified, the widgets for ForeignKey
# and fields with choices specified ought to be a basic Select widget.
# ForeignKey widgets in the admin are wrapped with RelatedFieldWidgetWrapper so
# they need to be handled properly when type checking. For Select fields, all of
# the choices lists have a first entry of dashes.
cma = ModelAdmin(Concert, self.site)
cmafa = cma.get_form(request)
self.assertEqual(type(cmafa.base_fields['main_band'].widget.widget), Select)
self.assertEqual(
list(cmafa.base_fields['main_band'].widget.choices),
[('', '---------'), (self.band.id, 'The Doors')])
self.assertEqual(type(cmafa.base_fields['opening_band'].widget.widget), Select)
self.assertEqual(
list(cmafa.base_fields['opening_band'].widget.choices),
[('', '---------'), (self.band.id, 'The Doors')]
)
self.assertEqual(type(cmafa.base_fields['day'].widget), Select)
self.assertEqual(
list(cmafa.base_fields['day'].widget.choices),
[('', '---------'), (1, 'Fri'), (2, 'Sat')]
)
self.assertEqual(type(cmafa.base_fields['transport'].widget), Select)
self.assertEqual(
list(cmafa.base_fields['transport'].widget.choices),
[('', '---------'), (1, 'Plane'), (2, 'Train'), (3, 'Bus')])
def test_foreign_key_as_radio_field(self):
# Now specify all the fields as radio_fields. Widgets should now be
# RadioSelect, and the choices list should have a first entry of 'None' if
# blank=True for the model field. Finally, the widget should have the
# 'radiolist' attr, and 'inline' as well if the field is specified HORIZONTAL.
class ConcertAdmin(ModelAdmin):
radio_fields = {
'main_band': HORIZONTAL,
'opening_band': VERTICAL,
'day': VERTICAL,
'transport': HORIZONTAL,
}
cma = ConcertAdmin(Concert, self.site)
cmafa = cma.get_form(request)
self.assertEqual(type(cmafa.base_fields['main_band'].widget.widget), AdminRadioSelect)
self.assertEqual(cmafa.base_fields['main_band'].widget.attrs, {'class': 'radiolist inline'})
self.assertEqual(
list(cmafa.base_fields['main_band'].widget.choices),
[(self.band.id, 'The Doors')]
)
self.assertEqual(type(cmafa.base_fields['opening_band'].widget.widget), AdminRadioSelect)
self.assertEqual(cmafa.base_fields['opening_band'].widget.attrs, {'class': 'radiolist'})
self.assertEqual(
list(cmafa.base_fields['opening_band'].widget.choices),
[('', 'None'), (self.band.id, 'The Doors')]
)
self.assertEqual(type(cmafa.base_fields['day'].widget), AdminRadioSelect)
self.assertEqual(cmafa.base_fields['day'].widget.attrs, {'class': 'radiolist'})
self.assertEqual(list(cmafa.base_fields['day'].widget.choices), [(1, 'Fri'), (2, 'Sat')])
self.assertEqual(type(cmafa.base_fields['transport'].widget), AdminRadioSelect)
self.assertEqual(cmafa.base_fields['transport'].widget.attrs, {'class': 'radiolist inline'})
self.assertEqual(
list(cmafa.base_fields['transport'].widget.choices),
[('', 'None'), (1, 'Plane'), (2, 'Train'), (3, 'Bus')]
)
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ('transport',)
class ConcertAdmin(ModelAdmin):
form = AdminConcertForm
ma = ConcertAdmin(Concert, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['main_band', 'opening_band', 'day'])
class AdminConcertForm(forms.ModelForm):
extra = forms.CharField()
class Meta:
model = Concert
fields = ['extra', 'transport']
class ConcertAdmin(ModelAdmin):
form = AdminConcertForm
ma = ConcertAdmin(Concert, self.site)
self.assertEqual(list(ma.get_form(request).base_fields), ['extra', 'transport'])
class ConcertInline(TabularInline):
form = AdminConcertForm
model = Concert
fk_name = 'main_band'
can_delete = True
class BandAdmin(ModelAdmin):
inlines = [
ConcertInline
]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(list(ma.get_formsets_with_inlines(request))[0][0]().forms[0].fields),
['extra', 'transport', 'id', 'DELETE', 'main_band']
)
def test_log_actions(self):
ma = ModelAdmin(Band, self.site)
mock_request = MockRequest()
mock_request.user = User.objects.create(username='bill')
self.assertEqual(ma.log_addition(mock_request, self.band, 'added'), LogEntry.objects.latest('id'))
self.assertEqual(ma.log_change(mock_request, self.band, 'changed'), LogEntry.objects.latest('id'))
self.assertEqual(ma.log_change(mock_request, self.band, 'deleted'), LogEntry.objects.latest('id'))
class CheckTestCase(SimpleTestCase):
def assertIsInvalid(self, model_admin, model, msg, id=None, hint=None, invalid_obj=None):
invalid_obj = invalid_obj or model_admin
admin_obj = model_admin(model, AdminSite())
errors = admin_obj.check()
expected = [
Error(
msg,
hint=hint,
obj=invalid_obj,
id=id,
)
]
self.assertEqual(errors, expected)
def assertIsInvalidRegexp(self, model_admin, model, msg, id=None, hint=None, invalid_obj=None):
"""
Same as assertIsInvalid but treats the given msg as a regexp.
"""
invalid_obj = invalid_obj or model_admin
admin_obj = model_admin(model, AdminSite())
errors = admin_obj.check()
self.assertEqual(len(errors), 1)
error = errors[0]
self.assertEqual(error.hint, hint)
self.assertEqual(error.obj, invalid_obj)
self.assertEqual(error.id, id)
six.assertRegex(self, error.msg, msg)
def assertIsValid(self, model_admin, model):
admin_obj = model_admin(model, AdminSite())
errors = admin_obj.check()
expected = []
self.assertEqual(errors, expected)
class RawIdCheckTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'raw_id_fields' must be a list or tuple.",
'admin.E001')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = ('non_existent_field',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'raw_id_fields[0]' refers to 'non_existent_field', "
"which is not an attribute of 'modeladmin.ValidationTestModel'."),
'admin.E002')
def test_invalid_field_type(self):
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = ('name',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'raw_id_fields[0]' must be a foreign key or a many-to-many field.",
'admin.E003')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = ('users',)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class FieldsetsCheckTests(CheckTestCase):
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", {'fields': ('name',)}),)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'fieldsets' must be a list or tuple.",
'admin.E007')
def test_non_iterable_item(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = ({},)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'fieldsets[0]' must be a list or tuple.",
'admin.E008')
def test_item_not_a_pair(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = ((),)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'fieldsets[0]' must be of length 2.",
'admin.E009')
def test_second_element_of_item_not_a_dict(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", ()),)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'fieldsets[0][1]' must be a dictionary.",
'admin.E010')
def test_missing_fields_key(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", {}),)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'fieldsets[0][1]' must contain the key 'fields'.",
'admin.E011')
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", {'fields': ('name',)}),)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
def test_specified_both_fields_and_fieldsets(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", {'fields': ('name',)}),)
fields = ['name']
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"Both 'fieldsets' and 'fields' are specified.",
'admin.E005')
def test_duplicate_fields(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = [(None, {'fields': ['name', 'name']})]
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"There are duplicate field(s) in 'fieldsets[0][1]'.",
'admin.E012')
def test_fieldsets_with_custom_form_validation(self):
class BandAdmin(ModelAdmin):
fieldsets = (
('Band', {
'fields': ('name',)
}),
)
self.assertIsValid(BandAdmin, Band)
class FieldsCheckTests(CheckTestCase):
def test_duplicate_fields_in_fields(self):
class ValidationTestModelAdmin(ModelAdmin):
fields = ['name', 'name']
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'fields' contains duplicate field(s).",
'admin.E006')
def test_inline(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
fields = 10
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'fields' must be a list or tuple.",
'admin.E004',
invalid_obj=ValidationTestInline)
class FormCheckTests(CheckTestCase):
def test_invalid_type(self):
class FakeForm(object):
pass
class ValidationTestModelAdmin(ModelAdmin):
form = FakeForm
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'form' must inherit from 'BaseModelForm'.",
'admin.E016')
def test_fieldsets_with_custom_form_validation(self):
class BandAdmin(ModelAdmin):
fieldsets = (
('Band', {
'fields': ('name',)
}),
)
self.assertIsValid(BandAdmin, Band)
def test_valid_case(self):
class AdminBandForm(forms.ModelForm):
delete = forms.BooleanField()
class BandAdmin(ModelAdmin):
form = AdminBandForm
fieldsets = (
('Band', {
'fields': ('name', 'bio', 'sign_date', 'delete')
}),
)
self.assertIsValid(BandAdmin, Band)
class FilterVerticalCheckTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_vertical = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'filter_vertical' must be a list or tuple.",
'admin.E017')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_vertical = ('non_existent_field',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'filter_vertical[0]' refers to 'non_existent_field', "
"which is not an attribute of 'modeladmin.ValidationTestModel'."),
'admin.E019')
def test_invalid_field_type(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_vertical = ('name',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'filter_vertical[0]' must be a many-to-many field.",
'admin.E020')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_vertical = ("users",)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class FilterHorizontalCheckTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_horizontal = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'filter_horizontal' must be a list or tuple.",
'admin.E018')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_horizontal = ('non_existent_field',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'filter_horizontal[0]' refers to 'non_existent_field', "
"which is not an attribute of 'modeladmin.ValidationTestModel'."),
'admin.E019')
def test_invalid_field_type(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_horizontal = ('name',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'filter_horizontal[0]' must be a many-to-many field.",
'admin.E020')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_horizontal = ("users",)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class RadioFieldsCheckTests(CheckTestCase):
def test_not_dictionary(self):
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = ()
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'radio_fields' must be a dictionary.",
'admin.E021')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = {'non_existent_field': VERTICAL}
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'radio_fields' refers to 'non_existent_field', "
"which is not an attribute of 'modeladmin.ValidationTestModel'."),
'admin.E022')
def test_invalid_field_type(self):
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = {'name': VERTICAL}
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'radio_fields' refers to 'name', which is not an instance "
"of ForeignKey, and does not have a 'choices' definition."),
'admin.E023')
def test_invalid_value(self):
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = {"state": None}
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'radio_fields[\"state\"]' must be either admin.HORIZONTAL or admin.VERTICAL.",
'admin.E024')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = {"state": VERTICAL}
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class PrepopulatedFieldsCheckTests(CheckTestCase):
def test_not_dictionary(self):
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = ()
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'prepopulated_fields' must be a dictionary.",
'admin.E026')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = {'non_existent_field': ("slug",)}
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'prepopulated_fields' refers to 'non_existent_field', "
"which is not an attribute of 'modeladmin.ValidationTestModel'."),
'admin.E027')
def test_missing_field_again(self):
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = {"slug": ('non_existent_field',)}
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'prepopulated_fields[\"slug\"][0]' refers to 'non_existent_field', "
"which is not an attribute of 'modeladmin.ValidationTestModel'."),
'admin.E030')
def test_invalid_field_type(self):
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = {"users": ('name',)}
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'prepopulated_fields' refers to 'users', which must not be "
"a DateTimeField, a ForeignKey, or a ManyToManyField."),
'admin.E028')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = {"slug": ('name',)}
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class ListDisplayTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_display' must be a list or tuple.",
'admin.E107')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display = ('non_existent_field',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
("The value of 'list_display[0]' refers to 'non_existent_field', which is not a callable, an attribute "
"of 'ValidationTestModelAdmin', or an attribute or method on 'modeladmin.ValidationTestModel'."),
'admin.E108')
def test_invalid_field_type(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display = ('users',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_display[0]' must not be a ManyToManyField.",
'admin.E109')
def test_valid_case(self):
def a_callable(obj):
pass
class ValidationTestModelAdmin(ModelAdmin):
def a_method(self, obj):
pass
list_display = ('name', 'decade_published_in', 'a_method', a_callable)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class ListDisplayLinksCheckTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display_links = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_display_links' must be a list, a tuple, or None.",
'admin.E110')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display_links = ('non_existent_field',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel, (
"The value of 'list_display_links[0]' refers to "
"'non_existent_field', which is not defined in 'list_display'."
), 'admin.E111'
)
def test_missing_in_list_display(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display_links = ('name',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_display_links[0]' refers to 'name', which is not defined in 'list_display'.",
'admin.E111')
def test_valid_case(self):
def a_callable(obj):
pass
class ValidationTestModelAdmin(ModelAdmin):
def a_method(self, obj):
pass
list_display = ('name', 'decade_published_in', 'a_method', a_callable)
list_display_links = ('name', 'decade_published_in', 'a_method', a_callable)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
def test_None_is_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display_links = None
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class ListFilterTests(CheckTestCase):
def test_list_filter_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
list_filter = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_filter' must be a list or tuple.",
'admin.E112')
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
list_filter = ('non_existent_field',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_filter[0]' refers to 'non_existent_field', which does not refer to a Field.",
'admin.E116')
def test_not_filter(self):
class RandomClass(object):
pass
class ValidationTestModelAdmin(ModelAdmin):
list_filter = (RandomClass,)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_filter[0]' must inherit from 'ListFilter'.",
'admin.E113')
def test_not_filter_again(self):
class RandomClass(object):
pass
class ValidationTestModelAdmin(ModelAdmin):
list_filter = (('is_active', RandomClass),)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_filter[0][1]' must inherit from 'FieldListFilter'.",
'admin.E115')
def test_not_filter_again_again(self):
class AwesomeFilter(SimpleListFilter):
def get_title(self):
return 'awesomeness'
def get_choices(self, request):
return (('bit', 'A bit awesome'), ('very', 'Very awesome'), )
def get_queryset(self, cl, qs):
return qs
class ValidationTestModelAdmin(ModelAdmin):
list_filter = (('is_active', AwesomeFilter),)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_filter[0][1]' must inherit from 'FieldListFilter'.",
'admin.E115')
def test_not_associated_with_field_name(self):
class ValidationTestModelAdmin(ModelAdmin):
list_filter = (BooleanFieldListFilter,)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_filter[0]' must not inherit from 'FieldListFilter'.",
'admin.E114')
def test_valid_case(self):
class AwesomeFilter(SimpleListFilter):
def get_title(self):
return 'awesomeness'
def get_choices(self, request):
return (('bit', 'A bit awesome'), ('very', 'Very awesome'), )
def get_queryset(self, cl, qs):
return qs
class ValidationTestModelAdmin(ModelAdmin):
list_filter = ('is_active', AwesomeFilter, ('is_active', BooleanFieldListFilter), 'no')
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class ListPerPageCheckTests(CheckTestCase):
def test_not_integer(self):
class ValidationTestModelAdmin(ModelAdmin):
list_per_page = 'hello'
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_per_page' must be an integer.",
'admin.E118')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
list_per_page = 100
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class ListMaxShowAllCheckTests(CheckTestCase):
def test_not_integer(self):
class ValidationTestModelAdmin(ModelAdmin):
list_max_show_all = 'hello'
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_max_show_all' must be an integer.",
'admin.E119')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
list_max_show_all = 200
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class SearchFieldsCheckTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
search_fields = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'search_fields' must be a list or tuple.",
'admin.E126')
class DateHierarchyCheckTests(CheckTestCase):
def test_missing_field(self):
class ValidationTestModelAdmin(ModelAdmin):
date_hierarchy = 'non_existent_field'
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'date_hierarchy' refers to 'non_existent_field', which "
"does not refer to a Field.",
'admin.E127'
)
def test_invalid_field_type(self):
class ValidationTestModelAdmin(ModelAdmin):
date_hierarchy = 'name'
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'date_hierarchy' must be a DateField or DateTimeField.",
'admin.E128')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
date_hierarchy = 'pub_date'
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
def test_related_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
date_hierarchy = 'band__sign_date'
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
def test_related_invalid_field_type(self):
class ValidationTestModelAdmin(ModelAdmin):
date_hierarchy = 'band__name'
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'date_hierarchy' must be a DateField or DateTimeField.",
'admin.E128'
)
class OrderingCheckTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
ordering = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'ordering' must be a list or tuple.",
'admin.E031'
)
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('non_existent_field',)
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'ordering[0]' refers to 'non_existent_field', "
"which is not an attribute of 'modeladmin.ValidationTestModel'.",
'admin.E033'
)
def test_random_marker_not_alone(self):
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('?', 'name')
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'ordering' has the random ordering marker '?', but contains "
"other fields as well.",
'admin.E032',
hint='Either remove the "?", or remove the other fields.'
)
def test_valid_random_marker_case(self):
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('?',)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
def test_valid_complex_case(self):
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('band__name',)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('name',)
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class ListSelectRelatedCheckTests(CheckTestCase):
def test_invalid_type(self):
class ValidationTestModelAdmin(ModelAdmin):
list_select_related = 1
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'list_select_related' must be a boolean, tuple or list.",
'admin.E117')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
list_select_related = False
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class SaveAsCheckTests(CheckTestCase):
def test_not_boolean(self):
class ValidationTestModelAdmin(ModelAdmin):
save_as = 1
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'save_as' must be a boolean.",
'admin.E101')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
save_as = True
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class SaveOnTopCheckTests(CheckTestCase):
def test_not_boolean(self):
class ValidationTestModelAdmin(ModelAdmin):
save_on_top = 1
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'save_on_top' must be a boolean.",
'admin.E102')
def test_valid_case(self):
class ValidationTestModelAdmin(ModelAdmin):
save_on_top = True
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class InlinesCheckTests(CheckTestCase):
def test_not_iterable(self):
class ValidationTestModelAdmin(ModelAdmin):
inlines = 10
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'inlines' must be a list or tuple.",
'admin.E103')
def test_not_model_admin(self):
class ValidationTestInline(object):
pass
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalidRegexp(
ValidationTestModelAdmin, ValidationTestModel,
r"'.*\.ValidationTestInline' must inherit from 'InlineModelAdmin'\.",
'admin.E104')
def test_missing_model_field(self):
class ValidationTestInline(TabularInline):
pass
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalidRegexp(
ValidationTestModelAdmin, ValidationTestModel,
r"'.*\.ValidationTestInline' must have a 'model' attribute\.",
'admin.E105')
def test_invalid_model_type(self):
""" Test if `model` attribute on inline model admin is a models.Model.
"""
class SomethingBad(object):
pass
class ValidationTestInline(TabularInline):
model = SomethingBad
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalidRegexp(
ValidationTestModelAdmin, ValidationTestModel,
r"The value of '.*\.ValidationTestInline.model' must be a Model\.",
'admin.E106')
def test_valid_case(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class FkNameCheckTests(CheckTestCase):
def test_missing_field(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
fk_name = 'non_existent_field'
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"'modeladmin.ValidationTestInlineModel' has no field named 'non_existent_field'.",
'admin.E202',
invalid_obj=ValidationTestInline)
def test_valid_case(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
fk_name = "parent"
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class ExtraCheckTests(CheckTestCase):
def test_not_integer(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
extra = "hello"
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'extra' must be an integer.",
'admin.E203',
invalid_obj=ValidationTestInline)
def test_valid_case(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
extra = 2
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class MaxNumCheckTests(CheckTestCase):
def test_not_integer(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
max_num = "hello"
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'max_num' must be an integer.",
'admin.E204',
invalid_obj=ValidationTestInline)
def test_valid_case(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
max_num = 2
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class MinNumCheckTests(CheckTestCase):
def test_not_integer(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
min_num = "hello"
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'min_num' must be an integer.",
'admin.E205',
invalid_obj=ValidationTestInline)
def test_valid_case(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
min_num = 2
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class FormsetCheckTests(CheckTestCase):
def test_invalid_type(self):
class FakeFormSet(object):
pass
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
formset = FakeFormSet
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
ValidationTestModelAdmin, ValidationTestModel,
"The value of 'formset' must inherit from 'BaseModelFormSet'.",
'admin.E206',
invalid_obj=ValidationTestInline)
def test_valid_case(self):
class RealModelFormSet(BaseModelFormSet):
pass
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
formset = RealModelFormSet
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(ValidationTestModelAdmin, ValidationTestModel)
class ListDisplayEditableTests(CheckTestCase):
def test_list_display_links_is_none(self):
"""
list_display and list_editable can contain the same values
when list_display_links is None
"""
class ProductAdmin(ModelAdmin):
list_display = ['name', 'slug', 'pub_date']
list_editable = list_display
list_display_links = None
self.assertIsValid(ProductAdmin, ValidationTestModel)
def test_list_display_first_item_same_as_list_editable_first_item(self):
"""
The first item in list_display can be the same as the first in
list_editable.
"""
class ProductAdmin(ModelAdmin):
list_display = ['name', 'slug', 'pub_date']
list_editable = ['name', 'slug']
list_display_links = ['pub_date']
self.assertIsValid(ProductAdmin, ValidationTestModel)
def test_list_display_first_item_in_list_editable(self):
"""
The first item in list_display can be in list_editable as long as
list_display_links is defined.
"""
class ProductAdmin(ModelAdmin):
list_display = ['name', 'slug', 'pub_date']
list_editable = ['slug', 'name']
list_display_links = ['pub_date']
self.assertIsValid(ProductAdmin, ValidationTestModel)
def test_list_display_first_item_same_as_list_editable_no_list_display_links(self):
"""
The first item in list_display cannot be the same as the first item
in list_editable if list_display_links is not defined.
"""
class ProductAdmin(ModelAdmin):
list_display = ['name']
list_editable = ['name']
self.assertIsInvalid(
ProductAdmin, ValidationTestModel,
"The value of 'list_editable[0]' refers to the first field "
"in 'list_display' ('name'), which cannot be used unless "
"'list_display_links' is set.",
id='admin.E124',
)
def test_list_display_first_item_in_list_editable_no_list_display_links(self):
"""
The first item in list_display cannot be in list_editable if
list_display_links isn't defined.
"""
class ProductAdmin(ModelAdmin):
list_display = ['name', 'slug', 'pub_date']
list_editable = ['slug', 'name']
self.assertIsInvalid(
ProductAdmin, ValidationTestModel,
"The value of 'list_editable[1]' refers to the first field "
"in 'list_display' ('name'), which cannot be used unless "
"'list_display_links' is set.",
id='admin.E124',
)
class ModelAdminPermissionTests(SimpleTestCase):
class MockUser(object):
def has_module_perms(self, app_label):
if app_label == "modeladmin":
return True
return False
class MockAddUser(MockUser):
def has_perm(self, perm):
if perm == "modeladmin.add_band":
return True
return False
class MockChangeUser(MockUser):
def has_perm(self, perm):
if perm == "modeladmin.change_band":
return True
return False
class MockDeleteUser(MockUser):
def has_perm(self, perm):
if perm == "modeladmin.delete_band":
return True
return False
def test_has_add_permission(self):
"""
Ensure that has_add_permission returns True for users who can add
objects and False for users who can't.
"""
ma = ModelAdmin(Band, AdminSite())
request = MockRequest()
request.user = self.MockAddUser()
self.assertTrue(ma.has_add_permission(request))
request.user = self.MockChangeUser()
self.assertFalse(ma.has_add_permission(request))
request.user = self.MockDeleteUser()
self.assertFalse(ma.has_add_permission(request))
def test_has_change_permission(self):
"""
Ensure that has_change_permission returns True for users who can edit
objects and False for users who can't.
"""
ma = ModelAdmin(Band, AdminSite())
request = MockRequest()
request.user = self.MockAddUser()
self.assertFalse(ma.has_change_permission(request))
request.user = self.MockChangeUser()
self.assertTrue(ma.has_change_permission(request))
request.user = self.MockDeleteUser()
self.assertFalse(ma.has_change_permission(request))
def test_has_delete_permission(self):
"""
Ensure that has_delete_permission returns True for users who can delete
objects and False for users who can't.
"""
ma = ModelAdmin(Band, AdminSite())
request = MockRequest()
request.user = self.MockAddUser()
self.assertFalse(ma.has_delete_permission(request))
request.user = self.MockChangeUser()
self.assertFalse(ma.has_delete_permission(request))
request.user = self.MockDeleteUser()
self.assertTrue(ma.has_delete_permission(request))
def test_has_module_permission(self):
"""
Ensure that has_module_permission returns True for users who have any
permission for the module and False for users who don't.
"""
ma = ModelAdmin(Band, AdminSite())
request = MockRequest()
request.user = self.MockAddUser()
self.assertTrue(ma.has_module_permission(request))
request.user = self.MockChangeUser()
self.assertTrue(ma.has_module_permission(request))
request.user = self.MockDeleteUser()
self.assertTrue(ma.has_module_permission(request))
original_app_label = ma.opts.app_label
ma.opts.app_label = 'anotherapp'
try:
request.user = self.MockAddUser()
self.assertFalse(ma.has_module_permission(request))
request.user = self.MockChangeUser()
self.assertFalse(ma.has_module_permission(request))
request.user = self.MockDeleteUser()
self.assertFalse(ma.has_module_permission(request))
finally:
ma.opts.app_label = original_app_label
| bsd-3-clause | 4,748,427,532,780,828,000 | 34.039977 | 116 | 0.608555 | false |
jaredkoontz/leetcode | Python/optimal-account-balancing.py | 3 | 1111 | # Time: O(n * 2^n), n is the size of the debt.
# Space: O(n * 2^n)
class Solution(object):
def minTransfers(self, transactions):
"""
:type transactions: List[List[int]]
:rtype: int
"""
account = collections.defaultdict(int)
for transaction in transactions:
account[transaction[0]] += transaction[2]
account[transaction[1]] -= transaction[2]
debt = []
for v in account.values():
if v:
debt.append(v)
if not debt:
return 0
n = 1 << len(debt)
dp, subset = [float("inf")] * n, []
for i in xrange(1, n):
net_debt, number = 0, 0
for j in xrange(len(debt)):
if i & 1 << j:
net_debt += debt[j]
number += 1
if net_debt == 0:
dp[i] = number - 1
for s in subset:
if (i & s) == s:
dp[i] = min(dp[i], dp[s] + dp[i - s])
subset.append(i)
return dp[-1]
| mit | 3,390,924,850,970,744,000 | 29.027027 | 61 | 0.422142 | false |
tiagocoutinho/bliss | bliss/controllers/mca/simulation.py | 1 | 4984 | """Provide an MCA simulator."""
import time
import numpy
import gevent
from .base import BaseMCA, PresetMode, TriggerMode, Stats
class SimulatedMCA(BaseMCA):
_init_time = 1.
_prepare_time = 0.1
_cleanup_time = 0.1
_gate_end = 0.5
_mapping_modulo = 2
# Initialization
def initialize_attributes(self):
self._running = False
self._block_size = None
self._spectrum_size = 1024
self._acquistion_number = 1
self._trigger_mode = TriggerMode.SOFTWARE
self._current_data = None
self._current_stats = None
def initialize_hardware(self):
gevent.sleep(self._init_time)
def finalize(self):
pass
# Information
@property
def detector_brand(self):
return "SIMULATION"
@property
def detector_type(self):
return "SIMULATION"
@property
def elements(self):
return (0, 1, 2, 3)
# Settings
@property
def spectrum_size(self):
return self._spectrum_size
def set_spectrum_size(self, size):
self._spectrum_size = size
@property
def supported_preset_modes(self):
return PresetMode.NONE,
def set_preset_mode(self, mode, value=None):
assert mode is PresetMode.REALTIME
self._realtime = value
@property
def supported_trigger_modes(self):
return TriggerMode.SOFTWARE, TriggerMode.GATE, TriggerMode.SYNC
def set_trigger_mode(self, mode):
if mode is None:
mode = TriggerMode.SOFTWARE
assert mode in self.supported_trigger_modes
self._trigger_mode = mode
@property
def hardware_points(self):
return self._hardware_points
def set_hardware_points(self, value):
self._hardware_points = value
@property
def block_size(self):
return self._block_size or 100
def set_block_size(self, value=None):
self._block_size = value
# Acquisition control
def start_acquisition(self):
if not self._running:
gevent.sleep(self._prepare_time)
self._t0 = time.time()
self._count = -1
self._data_buffer = {}
self._stats_buffer = {}
self._running = True
def stop_acquisition(self):
if self._running:
self._delta = time.time() - self._t0
gevent.sleep(self._cleanup_time)
self._running = False
pixel = self._generate_pixel(self.delta)
self._current_data, self._current_stats = pixel
def is_acquiring(self):
return self._running and self.delta < self._realtime
@property
def delta(self):
d = time.time() - self._t0 if self._running else self._delta
if self._trigger_mode == TriggerMode.GATE:
return min(d, self._gate_end)
if self._trigger_mode == TriggerMode.SOFTWARE:
return min(d, self._realtime)
return d
# Get data
def get_acquisition_data(self):
return self._current_data
def get_acquisition_statistics(self):
return self._current_stats
def poll_data(self):
# Update
self._count += 1
current = self._count // self._mapping_modulo
# Realtime
if self._trigger_mode == TriggerMode.SYNC:
delta = 0.2 * self._mapping_modulo
else:
delta = self._gate_end
# Flags
new_pixel = self._count % self._mapping_modulo != 0
full_buffer = current and current % self.block_size == 0
finished = current == self.hardware_points
# A new pixel has been generated
if current > 0 and new_pixel:
a, b = self._generate_pixel(delta)
self._data_buffer[current-1] = a
self._stats_buffer[current-1] = b
# Available data
if new_pixel and (full_buffer or finished):
a, b = self._data_buffer, self._stats_buffer
self._data_buffer = {}
self._stats_buffer = {}
return current, a, b
# Nothing to return yet
return current, {}, {}
# Data generation
def _generate_pixel(self, delta):
realtime = delta
livetime = realtime * numpy.random.normal(0.9, 0.01)
triggers = int(10000 * numpy.random.normal(livetime, livetime*0.2))
events = triggers // 2
icr = triggers / realtime if realtime else 0.
ocr = events / livetime if livetime else 0.
deadtime = 1 - ocr / icr if icr else 0.
st = Stats(realtime, livetime, triggers, events, icr, ocr, deadtime)
stats = dict((i, st) for i in self.elements)
size = self._spectrum_size
data = dict((i, numpy.zeros(size)) for i in self.elements)
for _ in range(events):
loc = numpy.random.normal(size//2, size//16)
for i in self.elements:
e = int(numpy.random.normal(loc, size//16))
data[i][e] += 1
return data, stats
| lgpl-3.0 | 3,883,550,063,179,060,700 | 27.318182 | 76 | 0.58427 | false |
frouty/odoo_oph | addons/lunch/report/report_lunch_order.py | 52 | 2799 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import fields,osv
class report_lunch_order(osv.osv):
_name = "report.lunch.order.line"
_description = "Lunch Orders Statistics"
_auto = False
_rec_name = 'date'
_columns = {
'date': fields.date('Date Order', readonly=True, select=True),
'year': fields.char('Year', size=4, readonly=True),
'month':fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'),
('05','May'), ('06','June'), ('07','July'), ('08','August'), ('09','September'),
('10','October'), ('11','November'), ('12','December')], 'Month',readonly=True),
'day': fields.char('Day', size=128, readonly=True),
'user_id': fields.many2one('res.users', 'User Name'),
'price_total':fields.float('Total Price', readonly=True),
'note' : fields.text('Note',size=256,readonly=True),
}
_order = 'date desc'
def init(self, cr):
tools.drop_view_if_exists(cr, 'report_lunch_order_line')
cr.execute("""
create or replace view report_lunch_order_line as (
select
min(lo.id) as id,
lo.user_id as user_id,
lo.date as date,
to_char(lo.date, 'YYYY') as year,
to_char(lo.date, 'MM') as month,
to_char(lo.date, 'YYYY-MM-DD') as day,
lo.note as note,
sum(lp.price) as price_total
from
lunch_order_line as lo
left join lunch_product as lp on (lo.product_id = lp.id)
group by
lo.date,lo.user_id,lo.note
)
""")
report_lunch_order()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 2,054,504,855,772,869,400 | 42.061538 | 102 | 0.549482 | false |
prontotools/zendesk-tickets-machine | zendesk_tickets_machine/tickets/tests/test_services.py | 1 | 5515 | import datetime
from django.test import TestCase
from django.utils.timezone import utc
from ..models import Ticket
from ..services import TicketServices
from agents.models import Agent
from agent_groups.models import AgentGroup
from boards.models import Board
from requesters.models import Requester
class TicketServicesTest(TestCase):
def setUp(self):
agent = Agent.objects.create(name='Kan', zendesk_user_id='123')
agent_group = AgentGroup.objects.create(
name='Development',
zendesk_group_id='123'
)
self.board = Board.objects.create(name='Pre-Production')
self.first_ticket = Ticket.objects.create(
subject='Ticket 1',
comment='Comment 1',
requester='[email protected]',
created_by=agent,
assignee=agent,
group=agent_group,
ticket_type='question',
priority='urgent',
tags='welcome',
private_comment='Private comment',
zendesk_ticket_id='24328',
board=self.board
)
self.second_ticket = Ticket.objects.create(
subject='Ticket 2',
comment='Comment 2',
requester='[email protected]',
created_by=agent,
assignee=agent,
group=agent_group,
ticket_type='question',
priority='high',
tags='welcome internal',
private_comment='Private comment',
board=self.board
)
def test_edit_ticket_once_edit_subject_and_tags_if_select_all(self):
agent = Agent.objects.create(name='Natty', zendesk_user_id='456')
requester = Requester.objects.create(
email='[email protected]', zendesk_user_id='123'
)
ticketServices = TicketServices()
ticketServices.edit_ticket_once(
id_list=[self.first_ticket.id, self.second_ticket.id],
edit_tags='aa bb',
edit_requester=requester.email,
edit_subject='New Subject',
edit_due_at='01/31/2017',
edit_assignee=agent
)
self.assertEqual(
Ticket.objects.get(id=self.first_ticket.id).tags,
'aa bb'
)
self.assertEqual(
Ticket.objects.get(id=self.second_ticket.id).tags,
'aa bb'
)
self.assertEqual(
Ticket.objects.get(id=self.first_ticket.id).requester,
requester.email
)
self.assertEqual(
Ticket.objects.get(id=self.second_ticket.id).requester,
requester.email
)
self.assertEqual(
Ticket.objects.get(id=self.first_ticket.id).subject,
'New Subject'
)
self.assertEqual(
Ticket.objects.get(id=self.second_ticket.id).subject,
'New Subject'
)
self.assertEqual(
Ticket.objects.get(id=self.first_ticket.id).due_at,
datetime.datetime.strptime(
'01/31/2017', "%m/%d/%Y"
).replace(tzinfo=utc)
)
self.assertEqual(
Ticket.objects.get(id=self.second_ticket.id).due_at,
datetime.datetime(2017, 1, 31, tzinfo=utc)
)
self.assertEqual(
Ticket.objects.get(id=self.first_ticket.id).assignee,
agent
)
self.assertEqual(
Ticket.objects.get(id=self.second_ticket.id).assignee,
agent
)
def test_edit_ticket_once_if_select_one(self):
agent = Agent.objects.create(name='Natty', zendesk_user_id='456')
requester = Requester.objects.create(
email='[email protected]', zendesk_user_id='123'
)
ticketServices = TicketServices()
ticketServices.edit_ticket_once(
id_list=[self.first_ticket.id],
edit_tags='aa bb',
edit_requester=requester.email,
edit_subject='New Subject',
edit_due_at='01/31/2017',
edit_assignee=agent
)
self.assertEqual(
Ticket.objects.get(id=self.first_ticket.id).tags,
'aa bb'
)
self.assertNotEqual(
Ticket.objects.get(id=self.second_ticket.id).tags,
'aa bb'
)
self.assertEqual(
Ticket.objects.get(id=self.first_ticket.id).requester,
requester.email
)
self.assertNotEqual(
Ticket.objects.get(id=self.second_ticket.id).requester,
requester.email
)
self.assertEqual(
Ticket.objects.get(id=self.first_ticket.id).subject,
'New Subject'
)
self.assertNotEqual(
Ticket.objects.get(id=self.second_ticket.id).subject,
'New Subject'
)
self.assertEqual(
Ticket.objects.get(id=self.first_ticket.id).due_at,
datetime.datetime.strptime(
'01/31/2017', "%m/%d/%Y"
).replace(tzinfo=utc)
)
self.assertNotEqual(
Ticket.objects.get(id=self.second_ticket.id).due_at,
datetime.datetime(2017, 1, 31, tzinfo=utc)
)
self.assertEqual(
Ticket.objects.get(id=self.first_ticket.id).assignee,
agent
)
self.assertNotEqual(
Ticket.objects.get(id=self.second_ticket.id).assignee,
agent
)
| mit | 3,247,515,967,635,474,000 | 32.628049 | 73 | 0.554306 | false |
evancich/apm_motor | modules/PX4Firmware/integrationtests/demo_tests/flight_path_assertion.py | 15 | 7439 | #!/usr/bin/env python
#***************************************************************************
#
# Copyright (c) 2015 PX4 Development Team. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# 3. Neither the name PX4 nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
#***************************************************************************/
#
# @author Andreas Antener <[email protected]>
#
import rospy
import threading
from px4.msg import vehicle_local_position
from gazebo_msgs.srv import SpawnModel
from gazebo_msgs.srv import SetModelState
from gazebo_msgs.srv import DeleteModel
from geometry_msgs.msg import Pose
from geometry_msgs.msg import Twist
from numpy import linalg
import numpy as np
#
# Helper to test if vehicle stays on expected flight path.
#
class FlightPathAssertion(threading.Thread):
#
# Arguments
# - positions: tuple of tuples in the form (x, y, z, heading)
#
# TODO: yaw validation
# TODO: fail main test thread
#
def __init__(self, positions, tunnelRadius=1, yaw_offset=0.2):
threading.Thread.__init__(self)
rospy.Subscriber("vehicle_local_position", vehicle_local_position, self.position_callback)
self.spawn_model = rospy.ServiceProxy('/gazebo/spawn_sdf_model', SpawnModel)
self.set_model_state = rospy.ServiceProxy('/gazebo/set_model_state', SetModelState)
self.delete_model = rospy.ServiceProxy('/gazebo/delete_model', DeleteModel)
self.positions = positions
self.tunnel_radius = tunnelRadius
self.yaw_offset = yaw_offset
self.has_pos = False
self.should_stop = False
self.center = positions[0]
self.end_of_segment = False
self.failed = False
self.local_position = vehicle_local_position
def position_callback(self, data):
self.has_pos = True
self.local_position = data
def spawn_indicator(self):
self.delete_model("indicator")
xml = (
"<?xml version='1.0'?>" +
"<sdf version='1.4'>" +
"<model name='indicator'>" +
"<static>true</static>" +
"<link name='link'>" +
"<visual name='visual'>" +
"<transparency>0.7</transparency>" +
"<geometry>" +
"<sphere>" +
"<radius>%f</radius>" +
"</sphere>" +
"</geometry>" +
"<material>" +
"<ambient>1 0 0 0.5</ambient>" +
"<diffuse>1 0 0 0.5</diffuse>" +
"</material>" +
"</visual>" +
"</link>" +
"</model>" +
"</sdf>") % self.tunnel_radius
self.spawn_model("indicator", xml, "", Pose(), "")
def position_indicator(self):
state = SetModelState()
state.model_name = "indicator"
pose = Pose()
pose.position.x = self.center[0]
pose.position.y = (-1) * self.center[1]
pose.position.z = (-1) * self.center[2]
state.pose = pose
state.twist = Twist()
state.reference_frame = ""
self.set_model_state(state)
def distance_to_line(self, a, b, pos):
v = b - a
w = pos - a
c1 = np.dot(w, v)
if c1 <= 0: # before a
self.center = a
return linalg.norm(pos - a)
c2 = np.dot(v, v)
if c2 <= c1: # after b
self.center = b
self.end_of_segment = True
return linalg.norm(pos - b)
x = c1 / c2
l = a + x * v
self.center = l
return linalg.norm(pos - l)
def stop(self):
self.should_stop = True
def run(self):
rate = rospy.Rate(10) # 10hz
self.spawn_indicator()
current = 0
count = 0
while not self.should_stop:
if self.has_pos:
# calculate distance to line segment between first two points
# if distances > tunnel_radius
# exit with error
# advance current pos if not on the line anymore or distance to next point < tunnel_radius
# exit if current pos is now the last position
self.position_indicator()
pos = np.array((self.local_position.x,
self.local_position.y,
self.local_position.z))
a_pos = np.array((self.positions[current][0],
self.positions[current][1],
self.positions[current][2]))
b_pos = np.array((self.positions[current + 1][0],
self.positions[current + 1][1],
self.positions[current + 1][2]))
dist = self.distance_to_line(a_pos, b_pos, pos)
b_dist = linalg.norm(pos - b_pos)
rospy.logdebug("distance to line: %f, distance to end: %f" % (dist, b_dist))
if dist > self.tunnel_radius:
msg = "left tunnel at position (%f, %f, %f)" % (self.local_position.x, self.local_position.y, self.local_position.z)
rospy.logerr(msg)
self.failed = True
break
if self.end_of_segment or b_dist < self.tunnel_radius:
rospy.loginfo("next segment")
self.end_of_segment = False
current = current + 1
if current == len(self.positions) - 1:
rospy.loginfo("no more positions")
break
rate.sleep()
count = count + 1
if count > 10 and not self.has_pos: # no position after 1 sec
rospy.logerr("no position")
self.failed = True
break
| gpl-3.0 | 196,801,837,651,360,700 | 36.761421 | 136 | 0.546848 | false |
clusto/clusto | src/clusto/commands/console.py | 3 | 2031 | #!/usr/bin/env python
# -*- mode: python; sh-basic-offset: 4; indent-tabs-mode: nil; coding: utf-8 -*-
# vim: tabstop=4 softtabstop=4 expandtab shiftwidth=4 fileencoding=utf-8
#
# Shell command
# Copyright 2010, Jeremy Grosser <[email protected]>
import os
import sys
import clusto
from clusto import script_helper
class Console(script_helper.Script):
'''
Use clusto's hardware port mappings to console to a remote server
using the serial console.
'''
def __init__(self):
script_helper.Script.__init__(self)
def _add_arguments(self, parser):
parser.add_argument('--user', '-u',
help='SSH User (you can also set this in clusto.conf '
'in console.user: --user > clusto.conf:console.user')
parser.add_argument('--force', '-f', action='store_true',
default=False, help='Force taking over the console session')
parser.add_argument('server', nargs=1,
help='Object to console to (IP or name)')
def run(self, args):
try:
server = clusto.get(args.server[0])
if not server:
raise LookupError('Object "%s" does not exist' % args.server)
except Exception as e:
self.debug(e)
self.error('No object like "%s" was found' % args.server)
return 1
server = server[0]
if not hasattr(server, 'console'):
self.error('The object %s lacks a console method' % server.name)
return 2
user = os.environ.get('USER')
if args.user:
self.debug('Grabbing user from parameter')
user = args.user
else:
self.debug('Grabbing user from config file or default')
user = self.get_conf('console.user', user)
self.debug('User is "%s"' % user)
return(server.console(ssh_user=user))
def main():
console, args = script_helper.init_arguments(Console)
return(console.run(args))
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause | 399,416,895,033,877,760 | 30.246154 | 80 | 0.593796 | false |
alexander-yu/nycodex | nycodex/scrape/dataset.py | 1 | 5707 | import os
import tempfile
import typing
import geoalchemy2
import geopandas as gpd
import pandas as pd
import sqlalchemy as sa
from nycodex import db
from nycodex.logging import get_logger
from . import exceptions, utils
BASE = "https://data.cityofnewyork.us/api"
RAW_SCHEMA = 'raw'
logger = get_logger(__name__)
def scrape_geojson(trans: sa.engine.base.Connection, dataset_id: str) -> None:
log = logger.bind(dataset_id=dataset_id, method="scrape_geojson")
params = {"method": "export", "format": "GeoJSON"}
url = f"{BASE}/geospatial/{dataset_id}"
with utils.download_file(url, params=params) as fname:
try:
df = gpd.read_file(fname)
except ValueError as e:
raise exceptions.SocrataParseError from e
for column in df.columns:
if column == 'geometry':
continue
# Bad type inference
try:
df[column] = df[column].astype(int)
continue
except (ValueError, TypeError):
pass
try:
df[column] = df[column].astype(float)
continue
except (ValueError, TypeError):
pass
try:
df[column] = pd.to_datetime(df[column])
continue
except (ValueError, TypeError):
pass
log.info("Inserting")
# TODO: Use ogr2ogr2?
# srid 4326 for latitude/longitude coordinates
ty = df.geometry.map(lambda x: x.geometryType()).unique()
if len(ty) != 1:
msg = f"Too many geometry types detected: {ty}"
raise exceptions.SocrataParseError(msg)
ty = ty[0]
df['geometry'] = df['geometry'].map(
lambda x: geoalchemy2.WKTElement(x.wkt, srid=4326))
df.to_sql(
f"{dataset_id}-new",
db.engine,
schema=RAW_SCHEMA,
if_exists='replace',
index=False,
dtype={"geometry": geoalchemy2.Geometry(geometry_type=ty, srid=4326)})
trans.execute(f"DROP TABLE IF EXISTS\"{RAW_SCHEMA}.{dataset_id}\"")
trans.execute(f"""
ALTER TABLE \"{RAW_SCHEMA}.{dataset_id}-new\"
RENAME TO "{RAW_SCHEMA}.{dataset_id}"
""")
trans.execute(f"""
UPDATE dataset
SET scraped_at = NOW()
WHERE id = '{dataset_id}'
""")
log.info("Successfully inserted")
def scrape_dataset(trans, dataset_id, names, fields, types) -> None:
log = logger.bind(dataset_id=dataset_id)
assert all(len(f) <= 63 for f in fields)
url = f"{BASE}/views/{dataset_id}/rows.csv"
with utils.download_file(url, params={"accessType": "DOWNLOAD"}) as fname:
try:
df = pd.read_csv(fname, dtype={
name: str
for name, ty in zip(names, types)
if ty not in {db.DataType.NUMBER, db.DataType.CHECKBOX}
}) # yapf: disable
except pd.errors.ParserError as e:
raise exceptions.SocrataParseError from e
df = df[names] # Reorder columns
df.columns = fields # replace with normalized names
columns, df = dataset_columns(df, types)
columns = ", ".join(f"\"{name}\" {ty}"
for name, ty in zip(df.columns, columns))
with tempfile.TemporaryDirectory() as tmpdir:
path = os.path.abspath(os.path.join(tmpdir, "data.csv"))
df.to_csv(path, header=False, index=False)
# Handle Postgresql permission denied errors
os.chmod(tmpdir, 0o775)
os.chmod(path, 0o775)
log.info("Inserting dataset")
trans.execute(f"DROP TABLE IF EXISTS \"{RAW_SCHEMA}.{dataset_id}\"")
trans.execute(
f"CREATE TABLE \"{RAW_SCHEMA}.{dataset_id}\" ({columns})")
trans.execute(f"""
COPY "{RAW_SCHEMA}.{dataset_id}"
FROM '{path}'
WITH CSV NULL AS ''
""")
log.info("Insert Sucessful!")
def dataset_columns(df: pd.DataFrame, types: typing.Iterable[str]
) -> typing.Tuple[typing.List[str], pd.DataFrame]:
columns = []
for field, ty in zip(df.columns, types):
if ty == db.DataType.CALENDAR_DATE:
ty = "TIMESTAMP WITHOUT TIME ZONE"
elif ty == db.DataType.CHECKBOX:
ty = "BOOLEAN"
elif ty == db.DataType.DATE:
ty = "TIMESTAMP WITH TIME ZONE"
elif ty in {
db.DataType.EMAIL, db.DataType.HTML, db.DataType.LOCATION,
db.DataType.PHONE, db.DataType.TEXT, db.DataType.URL
}:
ty = "TEXT"
elif ty == db.DataType.MONEY:
ty = "MONEY"
elif ty == db.DataType.NUMBER:
if not pd.api.types.is_numeric_dtype(df[field]):
raise exceptions.SocrataTypeError(field, ty, df[field].dtype)
elif pd.api.types.is_integer_dtype(df[field]):
# TODO(alan): Handle nullable integers
min, max = df[field].min(), df[field].max()
if -32768 < min and max < 32767:
ty = "SMALLINT"
elif -2147483648 < min and max < 2147483647:
ty = "INTEGER"
else:
ty = "BIGINT"
else:
ty = "DOUBLE PRECISION"
elif ty == db.DataType.PERCENT:
ty = "NUMERIC(6, 3)"
if (df[field].dropna().str[-1] != "%").any():
raise exceptions.SocrataTypeError(field, ty, df[field].dtype)
try:
df[field] = df[field].str[:-1].astype(float)
except (ValueError, TypeError) as e:
raise exceptions.SocrataTypeError(field, ty, df[field].dtype)
else:
raise RuntimeError(f"Unknown datatype {ty}")
columns.append(ty)
return columns, df
| apache-2.0 | -2,777,829,309,018,861,600 | 33.587879 | 78 | 0.566497 | false |
drwyrm/Flexget | flexget/tests/test_validator.py | 9 | 4255 | """
These validate methods are never run by FlexGet anymore, but these tests serve as a sanity check that the
old validators will get converted to new schemas properly for plugins still using the `validator` method.
"""
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from flexget import validator
class TestValidator(object):
def test_default(self):
root = validator.factory()
assert root.name == 'root', 'expected root'
dv = root.accept('dict')
assert dv.name == 'dict', 'expected dict'
dv.accept('text', key='text')
def test_dict(self):
dv = validator.factory('dict')
dv.accept('dict', key='foo')
result = dv.validate({'foo': {}})
assert not dv.errors.messages, 'should have passed foo'
assert result, 'invalid result for foo'
result = dv.validate({'bar': {}})
assert dv.errors.messages, 'should not have passed bar'
assert not result, 'should have an invalid result for bar'
# Test validation of dictionary keys
dv = validator.factory('dict')
dv.accept_valid_keys('dict', key_type='number')
result = dv.validate({3: {}})
assert not dv.errors.messages, 'should have passed 3'
assert result, 'invalid result for key 3'
def test_regexp_match(self):
re_match = validator.factory('regexp_match')
re_match.accept('abc.*')
assert not re_match.validate('foobar'), 'foobar should not have passed'
assert re_match.validate('abcdefg'), 'abcdefg should have passed'
def test_interval(self):
interval = validator.factory('interval')
assert interval.validate('3 days')
assert interval.validate('12 hours')
assert interval.validate('1 minute')
assert not interval.validate('aoeu')
assert not interval.validate('14')
assert not interval.validate('3 dayz')
assert not interval.validate('about 5 minutes')
def test_choice(self):
choice = validator.factory('choice')
choice.accept('foo')
choice.accept('Bar', ignore_case=True)
choice.accept(120)
choice.validate('foo')
assert not choice.errors.messages, 'foo should be valid'
choice.validate(120)
assert not choice.errors.messages, '120 should be valid'
choice.validate('bAR')
assert not choice.errors.messages, 'bAR should be valid'
choice.validate('xxx')
assert choice.errors.messages, 'xxx should be invalid'
choice.errors.messages = []
choice.validate(300)
assert choice.errors.messages, '300 should be invalid'
choice.errors.messages = []
choice.validate('fOO')
assert choice.errors.messages, 'fOO should be invalid'
# This validator is not supported with json schema
def _lazy(self):
"""Test lazy validators by making a recursive one."""
def recursive_validator():
root = validator.factory('dict')
root.accept('integer', key='int')
root.accept(recursive_validator, key='recurse')
return root
test_config = {'int': 1,
'recurse': {
'int': 2,
'recurse': {
'int': 3}}}
assert recursive_validator().validate(test_config), 'Config should pass validation'
test_config['recurse']['badkey'] = 4
assert not recursive_validator().validate(test_config), 'Config should not be valid'
def test_path(self, tmpdir):
path = validator.factory('path')
path_allow_missing = validator.factory('path', allow_missing=True)
path.validate(tmpdir.strpath)
assert not path.errors.messages, '%s should be valid' % tmpdir.strpath
path_allow_missing.validate('missing_directory')
assert not path_allow_missing.errors.messages, 'missing_directory should be valid with allow_missing'
path.validate('missing_directory')
assert path.errors.messages, 'missing_directory should be invalid'
path_allow_missing.errors.messages = []
| mit | -6,372,121,253,035,960,000 | 38.036697 | 109 | 0.627732 | false |
oliverlee/sympy | sympy/sets/tests/test_sets.py | 7 | 36780 | from sympy import (Symbol, Set, Union, Interval, oo, S, sympify, nan,
GreaterThan, LessThan, Max, Min, And, Or, Eq, Ge, Le, Gt, Lt, Float,
FiniteSet, Intersection, imageset, I, true, false, ProductSet, E,
sqrt, Complement, EmptySet, sin, cos, Lambda, ImageSet, pi,
Eq, Pow, Contains, Sum, rootof, SymmetricDifference, Piecewise,
Matrix)
from mpmath import mpi
from sympy.core.compatibility import range
from sympy.utilities.pytest import raises
from sympy.utilities.pytest import raises, XFAIL
from sympy.abc import x, y, z, m, n
def test_interval_arguments():
assert Interval(0, oo) == Interval(0, oo, False, True)
assert Interval(0, oo).right_open is true
assert Interval(-oo, 0) == Interval(-oo, 0, True, False)
assert Interval(-oo, 0).left_open is true
assert Interval(oo, -oo) == S.EmptySet
assert isinstance(Interval(1, 1), FiniteSet)
e = Sum(x, (x, 1, 3))
assert isinstance(Interval(e, e), FiniteSet)
assert Interval(1, 0) == S.EmptySet
assert Interval(1, 1).measure == 0
assert Interval(1, 1, False, True) == S.EmptySet
assert Interval(1, 1, True, False) == S.EmptySet
assert Interval(1, 1, True, True) == S.EmptySet
assert isinstance(Interval(0, Symbol('a')), Interval)
assert Interval(Symbol('a', real=True, positive=True), 0) == S.EmptySet
raises(ValueError, lambda: Interval(0, S.ImaginaryUnit))
raises(ValueError, lambda: Interval(0, Symbol('z', real=False)))
raises(NotImplementedError, lambda: Interval(0, 1, And(x, y)))
raises(NotImplementedError, lambda: Interval(0, 1, False, And(x, y)))
raises(NotImplementedError, lambda: Interval(0, 1, z, And(x, y)))
def test_interval_symbolic_end_points():
a = Symbol('a', real=True)
assert Union(Interval(0, a), Interval(0, 3)).sup == Max(a, 3)
assert Union(Interval(a, 0), Interval(-3, 0)).inf == Min(-3, a)
assert Interval(0, a).contains(1) == LessThan(1, a)
def test_union():
assert Union(Interval(1, 2), Interval(2, 3)) == Interval(1, 3)
assert Union(Interval(1, 2), Interval(2, 3, True)) == Interval(1, 3)
assert Union(Interval(1, 3), Interval(2, 4)) == Interval(1, 4)
assert Union(Interval(1, 2), Interval(1, 3)) == Interval(1, 3)
assert Union(Interval(1, 3), Interval(1, 2)) == Interval(1, 3)
assert Union(Interval(1, 3, False, True), Interval(1, 2)) == \
Interval(1, 3, False, True)
assert Union(Interval(1, 3), Interval(1, 2, False, True)) == Interval(1, 3)
assert Union(Interval(1, 2, True), Interval(1, 3)) == Interval(1, 3)
assert Union(Interval(1, 2, True), Interval(1, 3, True)) == \
Interval(1, 3, True)
assert Union(Interval(1, 2, True), Interval(1, 3, True, True)) == \
Interval(1, 3, True, True)
assert Union(Interval(1, 2, True, True), Interval(1, 3, True)) == \
Interval(1, 3, True)
assert Union(Interval(1, 3), Interval(2, 3)) == Interval(1, 3)
assert Union(Interval(1, 3, False, True), Interval(2, 3)) == \
Interval(1, 3)
assert Union(Interval(1, 2, False, True), Interval(2, 3, True)) != \
Interval(1, 3)
assert Union(Interval(1, 2), S.EmptySet) == Interval(1, 2)
assert Union(S.EmptySet) == S.EmptySet
assert Union(Interval(0, 1), [FiniteSet(1.0/n) for n in range(1, 10)]) == \
Interval(0, 1)
assert Interval(1, 2).union(Interval(2, 3)) == \
Interval(1, 2) + Interval(2, 3)
assert Interval(1, 2).union(Interval(2, 3)) == Interval(1, 3)
assert Union(Set()) == Set()
assert FiniteSet(1) + FiniteSet(2) + FiniteSet(3) == FiniteSet(1, 2, 3)
assert FiniteSet('ham') + FiniteSet('eggs') == FiniteSet('ham', 'eggs')
assert FiniteSet(1, 2, 3) + S.EmptySet == FiniteSet(1, 2, 3)
assert FiniteSet(1, 2, 3) & FiniteSet(2, 3, 4) == FiniteSet(2, 3)
assert FiniteSet(1, 2, 3) | FiniteSet(2, 3, 4) == FiniteSet(1, 2, 3, 4)
x = Symbol("x")
y = Symbol("y")
z = Symbol("z")
assert S.EmptySet | FiniteSet(x, FiniteSet(y, z)) == \
FiniteSet(x, FiniteSet(y, z))
# Test that Intervals and FiniteSets play nicely
assert Interval(1, 3) + FiniteSet(2) == Interval(1, 3)
assert Interval(1, 3, True, True) + FiniteSet(3) == \
Interval(1, 3, True, False)
X = Interval(1, 3) + FiniteSet(5)
Y = Interval(1, 2) + FiniteSet(3)
XandY = X.intersect(Y)
assert 2 in X and 3 in X and 3 in XandY
assert XandY.is_subset(X) and XandY.is_subset(Y)
raises(TypeError, lambda: Union(1, 2, 3))
assert X.is_iterable is False
# issue 7843
assert Union(S.EmptySet, FiniteSet(-sqrt(-I), sqrt(-I))) == \
FiniteSet(-sqrt(-I), sqrt(-I))
def test_difference():
assert Interval(1, 3) - Interval(1, 2) == Interval(2, 3, True)
assert Interval(1, 3) - Interval(2, 3) == Interval(1, 2, False, True)
assert Interval(1, 3, True) - Interval(2, 3) == Interval(1, 2, True, True)
assert Interval(1, 3, True) - Interval(2, 3, True) == \
Interval(1, 2, True, False)
assert Interval(0, 2) - FiniteSet(1) == \
Union(Interval(0, 1, False, True), Interval(1, 2, True, False))
assert FiniteSet(1, 2, 3) - FiniteSet(2) == FiniteSet(1, 3)
assert FiniteSet('ham', 'eggs') - FiniteSet('eggs') == FiniteSet('ham')
assert FiniteSet(1, 2, 3, 4) - Interval(2, 10, True, False) == \
FiniteSet(1, 2)
assert FiniteSet(1, 2, 3, 4) - S.EmptySet == FiniteSet(1, 2, 3, 4)
assert Union(Interval(0, 2), FiniteSet(2, 3, 4)) - Interval(1, 3) == \
Union(Interval(0, 1, False, True), FiniteSet(4))
assert -1 in S.Reals - S.Naturals
def test_Complement():
assert Complement(Interval(1, 3), Interval(1, 2)) == Interval(2, 3, True)
assert Complement(FiniteSet(1, 3, 4), FiniteSet(3, 4)) == FiniteSet(1)
assert Complement(Union(Interval(0, 2),
FiniteSet(2, 3, 4)), Interval(1, 3)) == \
Union(Interval(0, 1, False, True), FiniteSet(4))
assert not 3 in Complement(Interval(0, 5), Interval(1, 4), evaluate=False)
assert -1 in Complement(S.Reals, S.Naturals, evaluate=False)
assert not 1 in Complement(S.Reals, S.Naturals, evaluate=False)
assert Complement(S.Integers, S.UniversalSet) == EmptySet()
assert S.UniversalSet.complement(S.Integers) == EmptySet()
assert (not 0 in S.Reals.intersect(S.Integers - FiniteSet(0)))
assert S.EmptySet - S.Integers == S.EmptySet
assert (S.Integers - FiniteSet(0)) - FiniteSet(1) == S.Integers - FiniteSet(0, 1)
assert S.Reals - Union(S.Naturals, FiniteSet(pi)) == \
Intersection(S.Reals - S.Naturals, S.Reals - FiniteSet(pi))
def test_complement():
assert Interval(0, 1).complement(S.Reals) == \
Union(Interval(-oo, 0, True, True), Interval(1, oo, True, True))
assert Interval(0, 1, True, False).complement(S.Reals) == \
Union(Interval(-oo, 0, True, False), Interval(1, oo, True, True))
assert Interval(0, 1, False, True).complement(S.Reals) == \
Union(Interval(-oo, 0, True, True), Interval(1, oo, False, True))
assert Interval(0, 1, True, True).complement(S.Reals) == \
Union(Interval(-oo, 0, True, False), Interval(1, oo, False, True))
assert S.UniversalSet.complement(S.EmptySet) == S.EmptySet
assert S.UniversalSet.complement(S.Reals) == S.EmptySet
assert S.UniversalSet.complement(S.UniversalSet) == S.EmptySet
assert S.EmptySet.complement(S.Reals) == S.Reals
assert Union(Interval(0, 1), Interval(2, 3)).complement(S.Reals) == \
Union(Interval(-oo, 0, True, True), Interval(1, 2, True, True),
Interval(3, oo, True, True))
assert FiniteSet(0).complement(S.Reals) == \
Union(Interval(-oo, 0, True, True), Interval(0, oo, True, True))
assert (FiniteSet(5) + Interval(S.NegativeInfinity,
0)).complement(S.Reals) == \
Interval(0, 5, True, True) + Interval(5, S.Infinity, True, True)
assert FiniteSet(1, 2, 3).complement(S.Reals) == \
Interval(S.NegativeInfinity, 1, True, True) + \
Interval(1, 2, True, True) + Interval(2, 3, True, True) +\
Interval(3, S.Infinity, True, True)
assert FiniteSet(x).complement(S.Reals) == Complement(S.Reals, FiniteSet(x))
assert FiniteSet(0, x).complement(S.Reals) == Complement(Interval(-oo, 0, True, True) +
Interval(0, oo, True, True)
,FiniteSet(x), evaluate=False)
square = Interval(0, 1) * Interval(0, 1)
notsquare = square.complement(S.Reals*S.Reals)
assert all(pt in square for pt in [(0, 0), (.5, .5), (1, 0), (1, 1)])
assert not any(
pt in notsquare for pt in [(0, 0), (.5, .5), (1, 0), (1, 1)])
assert not any(pt in square for pt in [(-1, 0), (1.5, .5), (10, 10)])
assert all(pt in notsquare for pt in [(-1, 0), (1.5, .5), (10, 10)])
def test_intersect():
x = Symbol('x')
assert Interval(0, 2).intersect(Interval(1, 2)) == Interval(1, 2)
assert Interval(0, 2).intersect(Interval(1, 2, True)) == \
Interval(1, 2, True)
assert Interval(0, 2, True).intersect(Interval(1, 2)) == \
Interval(1, 2, False, False)
assert Interval(0, 2, True, True).intersect(Interval(1, 2)) == \
Interval(1, 2, False, True)
assert Interval(0, 2).intersect(Union(Interval(0, 1), Interval(2, 3))) == \
Union(Interval(0, 1), Interval(2, 2))
assert FiniteSet(1, 2)._intersect((1, 2, 3)) == FiniteSet(1, 2)
assert FiniteSet(1, 2, x).intersect(FiniteSet(x)) == FiniteSet(x)
assert FiniteSet('ham', 'eggs').intersect(FiniteSet('ham')) == \
FiniteSet('ham')
assert FiniteSet(1, 2, 3, 4, 5).intersect(S.EmptySet) == S.EmptySet
assert Interval(0, 5).intersect(FiniteSet(1, 3)) == FiniteSet(1, 3)
assert Interval(0, 1, True, True).intersect(FiniteSet(1)) == S.EmptySet
assert Union(Interval(0, 1), Interval(2, 3)).intersect(Interval(1, 2)) == \
Union(Interval(1, 1), Interval(2, 2))
assert Union(Interval(0, 1), Interval(2, 3)).intersect(Interval(0, 2)) == \
Union(Interval(0, 1), Interval(2, 2))
assert Union(Interval(0, 1), Interval(2, 3)).intersect(Interval(1, 2, True, True)) == \
S.EmptySet
assert Union(Interval(0, 1), Interval(2, 3)).intersect(S.EmptySet) == \
S.EmptySet
assert Union(Interval(0, 5), FiniteSet('ham')).intersect(FiniteSet(2, 3, 4, 5, 6)) == \
Union(FiniteSet(2, 3, 4, 5), Intersection(FiniteSet(6), Union(Interval(0, 5), FiniteSet('ham'))))
# issue 8217
assert Intersection(FiniteSet(x), FiniteSet(y)) == \
Intersection(FiniteSet(x), FiniteSet(y), evaluate=False)
assert FiniteSet(x).intersect(S.Reals) == \
Intersection(S.Reals, FiniteSet(x), evaluate=False)
# tests for the intersection alias
assert Interval(0, 5).intersection(FiniteSet(1, 3)) == FiniteSet(1, 3)
assert Interval(0, 1, True, True).intersection(FiniteSet(1)) == S.EmptySet
assert Union(Interval(0, 1), Interval(2, 3)).intersection(Interval(1, 2)) == \
Union(Interval(1, 1), Interval(2, 2))
def test_intersection():
# iterable
i = Intersection(FiniteSet(1, 2, 3), Interval(2, 5), evaluate=False)
assert i.is_iterable
assert set(i) == set([S(2), S(3)])
# challenging intervals
x = Symbol('x', real=True)
i = Intersection(Interval(0, 3), Interval(x, 6))
assert (5 in i) is False
raises(TypeError, lambda: 2 in i)
# Singleton special cases
assert Intersection(Interval(0, 1), S.EmptySet) == S.EmptySet
assert Intersection(Interval(-oo, oo), Interval(-oo, x)) == Interval(-oo, x)
# Products
line = Interval(0, 5)
i = Intersection(line**2, line**3, evaluate=False)
assert (2, 2) not in i
assert (2, 2, 2) not in i
raises(ValueError, lambda: list(i))
assert Intersection(Intersection(S.Integers, S.Naturals, evaluate=False),
S.Reals, evaluate=False) == \
Intersection(S.Integers, S.Naturals, S.Reals, evaluate=False)
def test_issue_9623():
n = Symbol('n')
a = S.Reals
b = Interval(0, oo)
c = FiniteSet(n)
assert Intersection(a, b, c) == Intersection(b, c)
assert Intersection(Interval(1, 2), Interval(3, 4), FiniteSet(n)) == EmptySet()
def test_is_disjoint():
assert Interval(0, 2).is_disjoint(Interval(1, 2)) == False
assert Interval(0, 2).is_disjoint(Interval(3, 4)) == True
def test_ProductSet_of_single_arg_is_arg():
assert ProductSet(Interval(0, 1)) == Interval(0, 1)
def test_interval_subs():
a = Symbol('a', real=True)
assert Interval(0, a).subs(a, 2) == Interval(0, 2)
assert Interval(a, 0).subs(a, 2) == S.EmptySet
def test_interval_to_mpi():
assert Interval(0, 1).to_mpi() == mpi(0, 1)
assert Interval(0, 1, True, False).to_mpi() == mpi(0, 1)
assert type(Interval(0, 1).to_mpi()) == type(mpi(0, 1))
def test_measure():
a = Symbol('a', real=True)
assert Interval(1, 3).measure == 2
assert Interval(0, a).measure == a
assert Interval(1, a).measure == a - 1
assert Union(Interval(1, 2), Interval(3, 4)).measure == 2
assert Union(Interval(1, 2), Interval(3, 4), FiniteSet(5, 6, 7)).measure \
== 2
assert FiniteSet(1, 2, oo, a, -oo, -5).measure == 0
assert S.EmptySet.measure == 0
square = Interval(0, 10) * Interval(0, 10)
offsetsquare = Interval(5, 15) * Interval(5, 15)
band = Interval(-oo, oo) * Interval(2, 4)
assert square.measure == offsetsquare.measure == 100
assert (square + offsetsquare).measure == 175 # there is some overlap
assert (square - offsetsquare).measure == 75
assert (square * FiniteSet(1, 2, 3)).measure == 0
assert (square.intersect(band)).measure == 20
assert (square + band).measure == oo
assert (band * FiniteSet(1, 2, 3)).measure == nan
def test_is_subset():
assert Interval(0, 1).is_subset(Interval(0, 2)) is True
assert Interval(0, 3).is_subset(Interval(0, 2)) is False
assert FiniteSet(1, 2).is_subset(FiniteSet(1, 2, 3, 4))
assert FiniteSet(4, 5).is_subset(FiniteSet(1, 2, 3, 4)) is False
assert FiniteSet(1).is_subset(Interval(0, 2))
assert FiniteSet(1, 2).is_subset(Interval(0, 2, True, True)) is False
assert (Interval(1, 2) + FiniteSet(3)).is_subset(
(Interval(0, 2, False, True) + FiniteSet(2, 3)))
assert Interval(3, 4).is_subset(Union(Interval(0, 1), Interval(2, 5))) is True
assert Interval(3, 6).is_subset(Union(Interval(0, 1), Interval(2, 5))) is False
assert FiniteSet(1, 2, 3, 4).is_subset(Interval(0, 5)) is True
assert S.EmptySet.is_subset(FiniteSet(1, 2, 3)) is True
assert Interval(0, 1).is_subset(S.EmptySet) is False
assert S.EmptySet.is_subset(S.EmptySet) is True
raises(ValueError, lambda: S.EmptySet.is_subset(1))
# tests for the issubset alias
assert FiniteSet(1, 2, 3, 4).issubset(Interval(0, 5)) is True
assert S.EmptySet.issubset(FiniteSet(1, 2, 3)) is True
def test_is_proper_subset():
assert Interval(0, 1).is_proper_subset(Interval(0, 2)) is True
assert Interval(0, 3).is_proper_subset(Interval(0, 2)) is False
assert S.EmptySet.is_proper_subset(FiniteSet(1, 2, 3)) is True
raises(ValueError, lambda: Interval(0, 1).is_proper_subset(0))
def test_is_superset():
assert Interval(0, 1).is_superset(Interval(0, 2)) == False
assert Interval(0, 3).is_superset(Interval(0, 2))
assert FiniteSet(1, 2).is_superset(FiniteSet(1, 2, 3, 4)) == False
assert FiniteSet(4, 5).is_superset(FiniteSet(1, 2, 3, 4)) == False
assert FiniteSet(1).is_superset(Interval(0, 2)) == False
assert FiniteSet(1, 2).is_superset(Interval(0, 2, True, True)) == False
assert (Interval(1, 2) + FiniteSet(3)).is_superset(
(Interval(0, 2, False, True) + FiniteSet(2, 3))) == False
assert Interval(3, 4).is_superset(Union(Interval(0, 1), Interval(2, 5))) == False
assert FiniteSet(1, 2, 3, 4).is_superset(Interval(0, 5)) == False
assert S.EmptySet.is_superset(FiniteSet(1, 2, 3)) == False
assert Interval(0, 1).is_superset(S.EmptySet) == True
assert S.EmptySet.is_superset(S.EmptySet) == True
raises(ValueError, lambda: S.EmptySet.is_superset(1))
# tests for the issuperset alias
assert Interval(0, 1).issuperset(S.EmptySet) == True
assert S.EmptySet.issuperset(S.EmptySet) == True
def test_is_proper_superset():
assert Interval(0, 1).is_proper_superset(Interval(0, 2)) is False
assert Interval(0, 3).is_proper_superset(Interval(0, 2)) is True
assert FiniteSet(1, 2, 3).is_proper_superset(S.EmptySet) is True
raises(ValueError, lambda: Interval(0, 1).is_proper_superset(0))
def test_contains():
assert Interval(0, 2).contains(1) is S.true
assert Interval(0, 2).contains(3) is S.false
assert Interval(0, 2, True, False).contains(0) is S.false
assert Interval(0, 2, True, False).contains(2) is S.true
assert Interval(0, 2, False, True).contains(0) is S.true
assert Interval(0, 2, False, True).contains(2) is S.false
assert Interval(0, 2, True, True).contains(0) is S.false
assert Interval(0, 2, True, True).contains(2) is S.false
assert FiniteSet(1, 2, 3).contains(2) is S.true
assert FiniteSet(1, 2, Symbol('x')).contains(Symbol('x')) is S.true
# issue 8197
from sympy.abc import a, b
assert isinstance(FiniteSet(b).contains(-a), Contains)
assert isinstance(FiniteSet(b).contains(a), Contains)
assert isinstance(FiniteSet(a).contains(1), Contains)
raises(TypeError, lambda: 1 in FiniteSet(a))
# issue 8209
rad1 = Pow(Pow(2, S(1)/3) - 1, S(1)/3)
rad2 = Pow(S(1)/9, S(1)/3) - Pow(S(2)/9, S(1)/3) + Pow(S(4)/9, S(1)/3)
s1 = FiniteSet(rad1)
s2 = FiniteSet(rad2)
assert s1 - s2 == S.EmptySet
items = [1, 2, S.Infinity, S('ham'), -1.1]
fset = FiniteSet(*items)
assert all(item in fset for item in items)
assert all(fset.contains(item) is S.true for item in items)
assert Union(Interval(0, 1), Interval(2, 5)).contains(3) is S.true
assert Union(Interval(0, 1), Interval(2, 5)).contains(6) is S.false
assert Union(Interval(0, 1), FiniteSet(2, 5)).contains(3) is S.false
assert S.EmptySet.contains(1) is S.false
assert FiniteSet(rootof(x**3 + x - 1, 0)).contains(S.Infinity) is S.false
assert rootof(x**5 + x**3 + 1, 0) in S.Reals
assert not rootof(x**5 + x**3 + 1, 1) in S.Reals
# non-bool results
assert Union(Interval(1, 2), Interval(3, 4)).contains(x) == \
Or(And(x <= 2, x >= 1), And(x <= 4, x >= 3))
assert Intersection(Interval(1, x), Interval(2, 3)).contains(y) == \
And(y <= 3, y <= x, y >= 1, y >= 2)
def test_interval_symbolic():
x = Symbol('x')
e = Interval(0, 1)
assert e.contains(x) == And(0 <= x, x <= 1)
raises(TypeError, lambda: x in e)
e = Interval(0, 1, True, True)
assert e.contains(x) == And(0 < x, x < 1)
def test_union_contains():
x = Symbol('x')
i1 = Interval(0, 1)
i2 = Interval(2, 3)
i3 = Union(i1, i2)
raises(TypeError, lambda: x in i3)
e = i3.contains(x)
assert e == Or(And(0 <= x, x <= 1), And(2 <= x, x <= 3))
assert e.subs(x, -0.5) is false
assert e.subs(x, 0.5) is true
assert e.subs(x, 1.5) is false
assert e.subs(x, 2.5) is true
assert e.subs(x, 3.5) is false
U = Interval(0, 2, True, True) + Interval(10, oo) + FiniteSet(-1, 2, 5, 6)
assert all(el not in U for el in [0, 4, -oo])
assert all(el in U for el in [2, 5, 10])
def test_is_number():
assert Interval(0, 1).is_number is False
assert Set().is_number is False
def test_Interval_is_left_unbounded():
assert Interval(3, 4).is_left_unbounded is False
assert Interval(-oo, 3).is_left_unbounded is True
assert Interval(Float("-inf"), 3).is_left_unbounded is True
def test_Interval_is_right_unbounded():
assert Interval(3, 4).is_right_unbounded is False
assert Interval(3, oo).is_right_unbounded is True
assert Interval(3, Float("+inf")).is_right_unbounded is True
def test_Interval_as_relational():
x = Symbol('x')
assert Interval(-1, 2, False, False).as_relational(x) == \
And(Le(-1, x), Le(x, 2))
assert Interval(-1, 2, True, False).as_relational(x) == \
And(Lt(-1, x), Le(x, 2))
assert Interval(-1, 2, False, True).as_relational(x) == \
And(Le(-1, x), Lt(x, 2))
assert Interval(-1, 2, True, True).as_relational(x) == \
And(Lt(-1, x), Lt(x, 2))
assert Interval(-oo, 2, right_open=False).as_relational(x) == And(Lt(-oo, x), Le(x, 2))
assert Interval(-oo, 2, right_open=True).as_relational(x) == And(Lt(-oo, x), Lt(x, 2))
assert Interval(-2, oo, left_open=False).as_relational(x) == And(Le(-2, x), Lt(x, oo))
assert Interval(-2, oo, left_open=True).as_relational(x) == And(Lt(-2, x), Lt(x, oo))
assert Interval(-oo, oo).as_relational(x) == And(Lt(-oo, x), Lt(x, oo))
x = Symbol('x', real=True)
y = Symbol('y', real=True)
assert Interval(x, y).as_relational(x) == (x <= y)
assert Interval(y, x).as_relational(x) == (y <= x)
def test_Finite_as_relational():
x = Symbol('x')
y = Symbol('y')
assert FiniteSet(1, 2).as_relational(x) == Or(Eq(x, 1), Eq(x, 2))
assert FiniteSet(y, -5).as_relational(x) == Or(Eq(x, y), Eq(x, -5))
def test_Union_as_relational():
x = Symbol('x')
assert (Interval(0, 1) + FiniteSet(2)).as_relational(x) == \
Or(And(Le(0, x), Le(x, 1)), Eq(x, 2))
assert (Interval(0, 1, True, True) + FiniteSet(1)).as_relational(x) == \
And(Lt(0, x), Le(x, 1))
def test_Intersection_as_relational():
x = Symbol('x')
assert (Intersection(Interval(0, 1), FiniteSet(2),
evaluate=False).as_relational(x)
== And(And(Le(0, x), Le(x, 1)), Eq(x, 2)))
def test_EmptySet():
assert S.EmptySet.as_relational(Symbol('x')) is False
assert S.EmptySet.intersect(S.UniversalSet) == S.EmptySet
assert S.EmptySet.boundary == S.EmptySet
def test_finite_basic():
x = Symbol('x')
A = FiniteSet(1, 2, 3)
B = FiniteSet(3, 4, 5)
AorB = Union(A, B)
AandB = A.intersect(B)
assert A.is_subset(AorB) and B.is_subset(AorB)
assert AandB.is_subset(A)
assert AandB == FiniteSet(3)
assert A.inf == 1 and A.sup == 3
assert AorB.inf == 1 and AorB.sup == 5
assert FiniteSet(x, 1, 5).sup == Max(x, 5)
assert FiniteSet(x, 1, 5).inf == Min(x, 1)
# issue 7335
assert FiniteSet(S.EmptySet) != S.EmptySet
assert FiniteSet(FiniteSet(1, 2, 3)) != FiniteSet(1, 2, 3)
assert FiniteSet((1, 2, 3)) != FiniteSet(1, 2, 3)
# Ensure a variety of types can exist in a FiniteSet
s = FiniteSet((1, 2), Float, A, -5, x, 'eggs', x**2, Interval)
assert (A > B) is False
assert (A >= B) is False
assert (A < B) is False
assert (A <= B) is False
assert AorB > A and AorB > B
assert AorB >= A and AorB >= B
assert A >= A and A <= A
assert A >= AandB and B >= AandB
assert A > AandB and B > AandB
def test_powerset():
# EmptySet
A = FiniteSet()
pset = A.powerset()
assert len(pset) == 1
assert pset == FiniteSet(S.EmptySet)
# FiniteSets
A = FiniteSet(1, 2)
pset = A.powerset()
assert len(pset) == 2**len(A)
assert pset == FiniteSet(FiniteSet(), FiniteSet(1),
FiniteSet(2), A)
# Not finite sets
I = Interval(0, 1)
raises(NotImplementedError, I.powerset)
def test_product_basic():
H, T = 'H', 'T'
unit_line = Interval(0, 1)
d6 = FiniteSet(1, 2, 3, 4, 5, 6)
d4 = FiniteSet(1, 2, 3, 4)
coin = FiniteSet(H, T)
square = unit_line * unit_line
assert (0, 0) in square
assert 0 not in square
assert (H, T) in coin ** 2
assert (.5, .5, .5) in square * unit_line
assert (H, 3, 3) in coin * d6* d6
HH, TT = sympify(H), sympify(T)
assert set(coin**2) == set(((HH, HH), (HH, TT), (TT, HH), (TT, TT)))
assert (d4*d4).is_subset(d6*d6)
assert square.complement(Interval(-oo, oo)*Interval(-oo, oo)) == Union(
(Interval(-oo, 0, True, True) +
Interval(1, oo, True, True))*Interval(-oo, oo),
Interval(-oo, oo)*(Interval(-oo, 0, True, True) +
Interval(1, oo, True, True)))
assert (Interval(-5, 5)**3).is_subset(Interval(-10, 10)**3)
assert not (Interval(-10, 10)**3).is_subset(Interval(-5, 5)**3)
assert not (Interval(-5, 5)**2).is_subset(Interval(-10, 10)**3)
assert (Interval(.2, .5)*FiniteSet(.5)).is_subset(square) # segment in square
assert len(coin*coin*coin) == 8
assert len(S.EmptySet*S.EmptySet) == 0
assert len(S.EmptySet*coin) == 0
raises(TypeError, lambda: len(coin*Interval(0, 2)))
def test_real():
x = Symbol('x', real=True, finite=True)
I = Interval(0, 5)
J = Interval(10, 20)
A = FiniteSet(1, 2, 30, x, S.Pi)
B = FiniteSet(-4, 0)
C = FiniteSet(100)
D = FiniteSet('Ham', 'Eggs')
assert all(s.is_subset(S.Reals) for s in [I, J, A, B, C])
assert not D.is_subset(S.Reals)
assert all((a + b).is_subset(S.Reals) for a in [I, J, A, B, C] for b in [I, J, A, B, C])
assert not any((a + D).is_subset(S.Reals) for a in [I, J, A, B, C, D])
assert not (I + A + D).is_subset(S.Reals)
def test_supinf():
x = Symbol('x', real=True)
y = Symbol('y', real=True)
assert (Interval(0, 1) + FiniteSet(2)).sup == 2
assert (Interval(0, 1) + FiniteSet(2)).inf == 0
assert (Interval(0, 1) + FiniteSet(x)).sup == Max(1, x)
assert (Interval(0, 1) + FiniteSet(x)).inf == Min(0, x)
assert FiniteSet(5, 1, x).sup == Max(5, x)
assert FiniteSet(5, 1, x).inf == Min(1, x)
assert FiniteSet(5, 1, x, y).sup == Max(5, x, y)
assert FiniteSet(5, 1, x, y).inf == Min(1, x, y)
assert FiniteSet(5, 1, x, y, S.Infinity, S.NegativeInfinity).sup == \
S.Infinity
assert FiniteSet(5, 1, x, y, S.Infinity, S.NegativeInfinity).inf == \
S.NegativeInfinity
assert FiniteSet('Ham', 'Eggs').sup == Max('Ham', 'Eggs')
def test_universalset():
U = S.UniversalSet
x = Symbol('x')
assert U.as_relational(x) is True
assert U.union(Interval(2, 4)) == U
assert U.intersect(Interval(2, 4)) == Interval(2, 4)
assert U.measure == S.Infinity
assert U.boundary == S.EmptySet
assert U.contains(0) is S.true
def test_Union_of_ProductSets_shares():
line = Interval(0, 2)
points = FiniteSet(0, 1, 2)
assert Union(line * line, line * points) == line * line
def test_Interval_free_symbols():
# issue 6211
assert Interval(0, 1).free_symbols == set()
x = Symbol('x', real=True)
assert Interval(0, x).free_symbols == set([x])
def test_image_interval():
from sympy.core.numbers import Rational
x = Symbol('x', real=True)
a = Symbol('a', real=True)
assert imageset(x, 2*x, Interval(-2, 1)) == Interval(-4, 2)
assert imageset(x, 2*x, Interval(-2, 1, True, False)) == \
Interval(-4, 2, True, False)
assert imageset(x, x**2, Interval(-2, 1, True, False)) == \
Interval(0, 4, False, True)
assert imageset(x, x**2, Interval(-2, 1)) == Interval(0, 4)
assert imageset(x, x**2, Interval(-2, 1, True, False)) == \
Interval(0, 4, False, True)
assert imageset(x, x**2, Interval(-2, 1, True, True)) == \
Interval(0, 4, False, True)
assert imageset(x, (x - 2)**2, Interval(1, 3)) == Interval(0, 1)
assert imageset(x, 3*x**4 - 26*x**3 + 78*x**2 - 90*x, Interval(0, 4)) == \
Interval(-35, 0) # Multiple Maxima
assert imageset(x, x + 1/x, Interval(-oo, oo)) == Interval(-oo, -2) \
+ Interval(2, oo) # Single Infinite discontinuity
assert imageset(x, 1/x + 1/(x-1)**2, Interval(0, 2, True, False)) == \
Interval(Rational(3, 2), oo, False) # Multiple Infinite discontinuities
# Test for Python lambda
assert imageset(lambda x: 2*x, Interval(-2, 1)) == Interval(-4, 2)
assert imageset(Lambda(x, a*x), Interval(0, 1)) == \
ImageSet(Lambda(x, a*x), Interval(0, 1))
assert imageset(Lambda(x, sin(cos(x))), Interval(0, 1)) == \
ImageSet(Lambda(x, sin(cos(x))), Interval(0, 1))
def test_image_piecewise():
f = Piecewise((x, x <= -1), (1/x**2, x <= 5), (x**3, True))
f1 = Piecewise((0, x <= 1), (1, x <= 2), (2, True))
assert imageset(x, f, Interval(-5, 5)) == Union(Interval(-5, -1), Interval(S(1)/25, oo))
assert imageset(x, f1, Interval(1, 2)) == FiniteSet(0, 1)
@XFAIL # See: https://github.com/sympy/sympy/pull/2723#discussion_r8659826
def test_image_Intersection():
x = Symbol('x', real=True)
y = Symbol('y', real=True)
assert imageset(x, x**2, Interval(-2, 0).intersect(Interval(x, y))) == \
Interval(0, 4).intersect(Interval(Min(x**2, y**2), Max(x**2, y**2)))
def test_image_FiniteSet():
x = Symbol('x', real=True)
assert imageset(x, 2*x, FiniteSet(1, 2, 3)) == FiniteSet(2, 4, 6)
def test_image_Union():
x = Symbol('x', real=True)
assert imageset(x, x**2, Interval(-2, 0) + FiniteSet(1, 2, 3)) == \
(Interval(0, 4) + FiniteSet(9))
def test_image_EmptySet():
x = Symbol('x', real=True)
assert imageset(x, 2*x, S.EmptySet) == S.EmptySet
def test_issue_5724_7680():
assert I not in S.Reals # issue 7680
assert Interval(-oo, oo).contains(I) is S.false
def test_boundary():
x = Symbol('x', real=True)
y = Symbol('y', real=True)
assert FiniteSet(1).boundary == FiniteSet(1)
assert all(Interval(0, 1, left_open, right_open).boundary == FiniteSet(0, 1)
for left_open in (true, false) for right_open in (true, false))
def test_boundary_Union():
assert (Interval(0, 1) + Interval(2, 3)).boundary == FiniteSet(0, 1, 2, 3)
assert ((Interval(0, 1, False, True)
+ Interval(1, 2, True, False)).boundary == FiniteSet(0, 1, 2))
assert (Interval(0, 1) + FiniteSet(2)).boundary == FiniteSet(0, 1, 2)
assert Union(Interval(0, 10), Interval(5, 15), evaluate=False).boundary \
== FiniteSet(0, 15)
assert Union(Interval(0, 10), Interval(0, 1), evaluate=False).boundary \
== FiniteSet(0, 10)
assert Union(Interval(0, 10, True, True),
Interval(10, 15, True, True), evaluate=False).boundary \
== FiniteSet(0, 10, 15)
@XFAIL
def test_union_boundary_of_joining_sets():
""" Testing the boundary of unions is a hard problem """
assert Union(Interval(0, 10), Interval(10, 15), evaluate=False).boundary \
== FiniteSet(0, 15)
def test_boundary_ProductSet():
open_square = Interval(0, 1, True, True) ** 2
assert open_square.boundary == (FiniteSet(0, 1) * Interval(0, 1)
+ Interval(0, 1) * FiniteSet(0, 1))
second_square = Interval(1, 2, True, True) * Interval(0, 1, True, True)
assert (open_square + second_square).boundary == (
FiniteSet(0, 1) * Interval(0, 1)
+ FiniteSet(1, 2) * Interval(0, 1)
+ Interval(0, 1) * FiniteSet(0, 1)
+ Interval(1, 2) * FiniteSet(0, 1))
def test_boundary_ProductSet_line():
line_in_r2 = Interval(0, 1) * FiniteSet(0)
assert line_in_r2.boundary == line_in_r2
def test_is_open():
assert not Interval(0, 1, False, False).is_open
assert not Interval(0, 1, True, False).is_open
assert Interval(0, 1, True, True).is_open
assert not FiniteSet(1, 2, 3).is_open
def test_is_closed():
assert Interval(0, 1, False, False).is_closed
assert not Interval(0, 1, True, False).is_closed
assert FiniteSet(1, 2, 3).is_closed
def test_closure():
assert Interval(0, 1, False, True).closure == Interval(0, 1, False, False)
def test_interior():
assert Interval(0, 1, False, True).interior == Interval(0, 1, True, True)
def test_issue_7841():
raises(TypeError, lambda: x in S.Reals)
def test_Eq():
assert Eq(Interval(0, 1), Interval(0, 1))
assert Eq(Interval(0, 1), Interval(0, 2)) == False
s1 = FiniteSet(0, 1)
s2 = FiniteSet(1, 2)
assert Eq(s1, s1)
assert Eq(s1, s2) == False
assert Eq(s1*s2, s1*s2)
assert Eq(s1*s2, s2*s1) == False
def test_SymmetricDifference():
assert SymmetricDifference(FiniteSet(0, 1, 2, 3, 4, 5), \
FiniteSet(2, 4, 6, 8, 10)) == FiniteSet(0, 1, 3, 5, 6, 8, 10)
assert SymmetricDifference(FiniteSet(2, 3, 4), FiniteSet(2, 3 ,4 ,5 )) \
== FiniteSet(5)
assert FiniteSet(1, 2, 3, 4, 5) ^ FiniteSet(1, 2, 5, 6) == \
FiniteSet(3, 4, 6)
assert Set(1, 2 ,3) ^ Set(2, 3, 4) == Union(Set(1, 2, 3) - Set(2, 3, 4), \
Set(2, 3, 4) - Set(1, 2, 3))
assert Interval(0, 4) ^ Interval(2, 5) == Union(Interval(0, 4) - \
Interval(2, 5), Interval(2, 5) - Interval(0, 4))
def test_issue_9536():
from sympy.functions.elementary.exponential import log
a = Symbol('a', real=True)
assert FiniteSet(log(a)).intersect(S.Reals) == Intersection(S.Reals, FiniteSet(log(a)))
def test_issue_9637():
n = Symbol('n')
a = FiniteSet(n)
b = FiniteSet(2, n)
assert Complement(S.Reals, a) == Complement(S.Reals, a, evaluate=False)
assert Complement(Interval(1, 3), a) == Complement(Interval(1, 3), a, evaluate=False)
assert Complement(Interval(1, 3), b) == \
Complement(Union(Interval(1, 2, False, True), Interval(2, 3, True, False)), a)
assert Complement(a, S.Reals) == Complement(a, S.Reals, evaluate=False)
assert Complement(a, Interval(1, 3)) == Complement(a, Interval(1, 3), evaluate=False)
def test_issue_9808():
assert Complement(FiniteSet(y), FiniteSet(1)) == Complement(FiniteSet(y), FiniteSet(1), evaluate=False)
assert Complement(FiniteSet(1, 2, x), FiniteSet(x, y, 2, 3)) == \
Complement(FiniteSet(1), FiniteSet(y), evaluate=False)
def test_issue_9956():
assert Union(Interval(-oo, oo), FiniteSet(1)) == Interval(-oo, oo)
assert Interval(-oo, oo).contains(1) is S.true
def test_issue_Symbol_inter():
i = Interval(0, oo)
r = S.Reals
mat = Matrix([0, 0, 0])
assert Intersection(r, i, FiniteSet(m), FiniteSet(m, n)) == \
Intersection(i, FiniteSet(m))
assert Intersection(FiniteSet(1, m, n), FiniteSet(m, n, 2), i) == \
Intersection(i, FiniteSet(m, n))
assert Intersection(FiniteSet(m, n, x), FiniteSet(m, z), r) == \
Intersection(r, FiniteSet(m, z), FiniteSet(n, x))
assert Intersection(FiniteSet(m, n, 3), FiniteSet(m, n, x), r) == \
Intersection(r, FiniteSet(3, m, n), evaluate=False)
assert Intersection(FiniteSet(m, n, 3), FiniteSet(m, n, 2, 3), r) == \
Union(FiniteSet(3), Intersection(r, FiniteSet(m, n)))
assert Intersection(r, FiniteSet(mat, 2, n), FiniteSet(0, mat, n)) == \
Intersection(r, FiniteSet(n))
assert Intersection(FiniteSet(sin(x), cos(x)), FiniteSet(sin(x), cos(x), 1), r) == \
Intersection(r, FiniteSet(sin(x), cos(x)))
assert Intersection(FiniteSet(x**2, 1, sin(x)), FiniteSet(x**2, 2, sin(x)), r) == \
Intersection(r, FiniteSet(x**2, sin(x)))
def test_issue_10113():
f = x**2/(x**2 - 4)
assert imageset(x, f, S.Reals) == Union(Interval(-oo, 0), Interval(1, oo, True, True))
assert imageset(x, f, Interval(-2, 2)) == Interval(-oo, 0)
assert imageset(x, f, Interval(-2, 3)) == Union(Interval(-oo, 0), Interval(S(9)/5, oo))
def test_issue_10248():
assert list(Intersection(S.Reals, FiniteSet(x))) == [
And(x < oo, x > -oo)]
def test_issue_9447():
a = Interval(0, 1) + Interval(2, 3)
assert Complement(S.UniversalSet, a) == Complement(
S.UniversalSet, Union(Interval(0, 1), Interval(2, 3)), evaluate=False)
assert Complement(S.Naturals, a) == Complement(
S.Naturals, Union(Interval(0, 1), Interval(2, 3)), evaluate=False)
def test_issue_10337():
assert (FiniteSet(2) == 3) is False
assert (FiniteSet(2) != 3) is True
raises(TypeError, lambda: FiniteSet(2) < 3)
raises(TypeError, lambda: FiniteSet(2) <= 3)
raises(TypeError, lambda: FiniteSet(2) > 3)
raises(TypeError, lambda: FiniteSet(2) >= 3)
def test_issue_10326():
bad = [
EmptySet(),
FiniteSet(1),
Interval(1, 2),
S.ComplexInfinity,
S.ImaginaryUnit,
S.Infinity,
S.NaN,
S.NegativeInfinity,
]
interval = Interval(0, 5)
for i in bad:
assert i not in interval
x = Symbol('x', real=True)
nr = Symbol('nr', real=False)
assert x + 1 in Interval(x, x + 4)
assert nr not in Interval(x, x + 4)
assert Interval(1, 2) in FiniteSet(Interval(0, 5), Interval(1, 2))
assert Interval(-oo, oo).contains(oo) is S.false
assert Interval(-oo, oo).contains(-oo) is S.false
| bsd-3-clause | 6,755,135,656,937,935,000 | 36.378049 | 107 | 0.604051 | false |
skosukhin/spack | lib/spack/spack/version.py | 1 | 26634 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
"""
This module implements Version and version-ish objects. These are:
Version
A single version of a package.
VersionRange
A range of versions of a package.
VersionList
A list of Versions and VersionRanges.
All of these types support the following operations, which can
be called on any of the types::
__eq__, __ne__, __lt__, __gt__, __ge__, __le__, __hash__
__contains__
satisfies
overlaps
union
intersection
concrete
"""
import re
import numbers
from bisect import bisect_left
from functools import wraps
from six import string_types
from spack.util.spack_yaml import syaml_dict
__all__ = ['Version', 'VersionRange', 'VersionList', 'ver']
# Valid version characters
VALID_VERSION = r'[A-Za-z0-9_.-]'
def int_if_int(string):
"""Convert a string to int if possible. Otherwise, return a string."""
try:
return int(string)
except ValueError:
return string
def coerce_versions(a, b):
"""
Convert both a and b to the 'greatest' type between them, in this order:
Version < VersionRange < VersionList
This is used to simplify comparison operations below so that we're always
comparing things that are of the same type.
"""
order = (Version, VersionRange, VersionList)
ta, tb = type(a), type(b)
def check_type(t):
if t not in order:
raise TypeError("coerce_versions cannot be called on %s" % t)
check_type(ta)
check_type(tb)
if ta == tb:
return (a, b)
elif order.index(ta) > order.index(tb):
if ta == VersionRange:
return (a, VersionRange(b, b))
else:
return (a, VersionList([b]))
else:
if tb == VersionRange:
return (VersionRange(a, a), b)
else:
return (VersionList([a]), b)
def coerced(method):
"""Decorator that ensures that argument types of a method are coerced."""
@wraps(method)
def coercing_method(a, b, *args, **kwargs):
if type(a) == type(b) or a is None or b is None:
return method(a, b, *args, **kwargs)
else:
ca, cb = coerce_versions(a, b)
return getattr(ca, method.__name__)(cb, *args, **kwargs)
return coercing_method
class Version(object):
"""Class to represent versions"""
def __init__(self, string):
string = str(string)
if not re.match(VALID_VERSION, string):
raise ValueError("Bad characters in version string: %s" % string)
# preserve the original string, but trimmed.
string = string.strip()
self.string = string
# Split version into alphabetical and numeric segments
segment_regex = r'[a-zA-Z]+|[0-9]+'
segments = re.findall(segment_regex, string)
self.version = tuple(int_if_int(seg) for seg in segments)
# Store the separators from the original version string as well.
self.separators = tuple(re.split(segment_regex, string)[1:])
@property
def dotted(self):
"""The dotted representation of the version.
Example:
>>> version = Version('1-2-3b')
>>> version.dotted
Version('1.2.3b')
Returns:
Version: The version with separator characters replaced by dots
"""
return Version(self.string.replace('-', '.').replace('_', '.'))
@property
def underscored(self):
"""The underscored representation of the version.
Example:
>>> version = Version('1.2.3b')
>>> version.underscored
Version('1_2_3b')
Returns:
Version: The version with separator characters replaced by
underscores
"""
return Version(self.string.replace('.', '_').replace('-', '_'))
@property
def dashed(self):
"""The dashed representation of the version.
Example:
>>> version = Version('1.2.3b')
>>> version.dashed
Version('1-2-3b')
Returns:
Version: The version with separator characters replaced by dashes
"""
return Version(self.string.replace('.', '-').replace('_', '-'))
@property
def joined(self):
"""The joined representation of the version.
Example:
>>> version = Version('1.2.3b')
>>> version.joined
Version('123b')
Returns:
Version: The version with separator characters removed
"""
return Version(
self.string.replace('.', '').replace('-', '').replace('_', ''))
def up_to(self, index):
"""The version up to the specified component.
Examples:
>>> version = Version('1.23-4b')
>>> version.up_to(1)
Version('1')
>>> version.up_to(2)
Version('1.23')
>>> version.up_to(3)
Version('1.23-4')
>>> version.up_to(4)
Version('1.23-4b')
>>> version.up_to(-1)
Version('1.23-4')
>>> version.up_to(-2)
Version('1.23')
>>> version.up_to(-3)
Version('1')
Returns:
Version: The first index components of the version
"""
return self[:index]
def lowest(self):
return self
def highest(self):
return self
def isnumeric(self):
"""Tells if this version is numeric (vs. a non-numeric version). A
version will be numeric as long as the first section of it is,
even if it contains non-numerica portions.
Some numeric versions:
1
1.1
1.1a
1.a.1b
Some non-numeric versions:
develop
system
myfavoritebranch
"""
return isinstance(self.version[0], numbers.Integral)
def isdevelop(self):
"""Triggers on the special case of the `@develop` version."""
return self.string == 'develop'
@coerced
def satisfies(self, other):
"""A Version 'satisfies' another if it is at least as specific and has
a common prefix. e.g., we want [email protected] to satisfy a request for
[email protected] so that when a user asks to build with [email protected], we can find
a suitable compiler.
"""
nself = len(self.version)
nother = len(other.version)
return nother <= nself and self.version[:nother] == other.version
def __iter__(self):
return iter(self.version)
def __getitem__(self, idx):
cls = type(self)
if isinstance(idx, numbers.Integral):
return self.version[idx]
elif isinstance(idx, slice):
string_arg = []
pairs = zip(self.version[idx], self.separators[idx])
for token, sep in pairs:
string_arg.append(str(token))
string_arg.append(str(sep))
string_arg.pop() # We don't need the last separator
string_arg = ''.join(string_arg)
return cls(string_arg)
message = '{cls.__name__} indices must be integers'
raise TypeError(message.format(cls=cls))
def __repr__(self):
return 'Version(' + repr(self.string) + ')'
def __str__(self):
return self.string
def __format__(self, format_spec):
return self.string.format(format_spec)
@property
def concrete(self):
return self
def _numeric_lt(self, other):
"""Compares two versions, knowing they're both numeric"""
# Standard comparison of two numeric versions
for a, b in zip(self.version, other.version):
if a == b:
continue
else:
# Numbers are always "newer" than letters.
# This is for consistency with RPM. See patch
# #60884 (and details) from bugzilla #50977 in
# the RPM project at rpm.org. Or look at
# rpmvercmp.c if you want to see how this is
# implemented there.
if type(a) != type(b):
return type(b) == int
else:
return a < b
# If the common prefix is equal, the one
# with more segments is bigger.
return len(self.version) < len(other.version)
@coerced
def __lt__(self, other):
"""Version comparison is designed for consistency with the way RPM
does things. If you need more complicated versions in installed
packages, you should override your package's version string to
express it more sensibly.
"""
if other is None:
return False
# Coerce if other is not a Version
# simple equality test first.
if self.version == other.version:
return False
# First priority: anything < develop
sdev = self.isdevelop()
if sdev:
return False # source = develop, it can't be < anything
# Now we know !sdev
odev = other.isdevelop()
if odev:
return True # src < dst
# now we know neither self nor other isdevelop().
# Principle: Non-numeric is less than numeric
# (so numeric will always be preferred by default)
if self.isnumeric():
if other.isnumeric():
return self._numeric_lt(other)
else: # self = numeric; other = non-numeric
# Numeric > Non-numeric (always)
return False
else:
if other.isnumeric(): # self = non-numeric, other = numeric
# non-numeric < numeric (always)
return True
else: # Both non-numeric
# Maybe consider other ways to compare here...
return self.string < other.string
@coerced
def __eq__(self, other):
return (other is not None and
type(other) == Version and self.version == other.version)
@coerced
def __ne__(self, other):
return not (self == other)
@coerced
def __le__(self, other):
return self == other or self < other
@coerced
def __ge__(self, other):
return not (self < other)
@coerced
def __gt__(self, other):
return not (self == other) and not (self < other)
def __hash__(self):
return hash(self.version)
@coerced
def __contains__(self, other):
if other is None:
return False
return other.version[:len(self.version)] == self.version
def is_predecessor(self, other):
"""True if the other version is the immediate predecessor of this one.
That is, NO versions v exist such that:
(self < v < other and v not in self).
"""
if len(self.version) != len(other.version):
return False
sl = self.version[-1]
ol = other.version[-1]
return type(sl) == int and type(ol) == int and (ol - sl == 1)
def is_successor(self, other):
return other.is_predecessor(self)
@coerced
def overlaps(self, other):
return self in other or other in self
@coerced
def union(self, other):
if self == other or other in self:
return self
elif self in other:
return other
else:
return VersionList([self, other])
@coerced
def intersection(self, other):
if self == other:
return self
else:
return VersionList()
class VersionRange(object):
def __init__(self, start, end):
if isinstance(start, string_types):
start = Version(start)
if isinstance(end, string_types):
end = Version(end)
self.start = start
self.end = end
if start and end and end < start:
raise ValueError("Invalid Version range: %s" % self)
def lowest(self):
return self.start
def highest(self):
return self.end
@coerced
def __lt__(self, other):
"""Sort VersionRanges lexicographically so that they are ordered first
by start and then by end. None denotes an open range, so None in
the start position is less than everything except None, and None in
the end position is greater than everything but None.
"""
if other is None:
return False
s, o = self, other
if s.start != o.start:
return s.start is None or (
o.start is not None and s.start < o.start)
return (s.end != o.end and
o.end is None or (s.end is not None and s.end < o.end))
@coerced
def __eq__(self, other):
return (other is not None and
type(other) == VersionRange and
self.start == other.start and self.end == other.end)
@coerced
def __ne__(self, other):
return not (self == other)
@coerced
def __le__(self, other):
return self == other or self < other
@coerced
def __ge__(self, other):
return not (self < other)
@coerced
def __gt__(self, other):
return not (self == other) and not (self < other)
@property
def concrete(self):
return self.start if self.start == self.end else None
@coerced
def __contains__(self, other):
if other is None:
return False
in_lower = (self.start == other.start or
self.start is None or
(other.start is not None and (
self.start < other.start or
other.start in self.start)))
if not in_lower:
return False
in_upper = (self.end == other.end or
self.end is None or
(other.end is not None and (
self.end > other.end or
other.end in self.end)))
return in_upper
@coerced
def satisfies(self, other):
"""A VersionRange satisfies another if some version in this range
would satisfy some version in the other range. To do this it must
either:
a) Overlap with the other range
b) The start of this range satisfies the end of the other range.
This is essentially the same as overlaps(), but overlaps assumes
that its arguments are specific. That is, 4.7 is interpreted as
4.7.0.0.0.0... . This funciton assumes that 4.7 woudl be satisfied
by 4.7.3.5, etc.
Rationale:
If a user asks for [email protected]:4.7, and a package is only compatible with
[email protected]:4.8, then that package should be able to build under the
constraints. Just using overlaps() would not work here.
Note that we don't need to check whether the end of this range
would satisfy the start of the other range, because overlaps()
already covers that case.
Note further that overlaps() is a symmetric operation, while
satisfies() is not.
"""
return (self.overlaps(other) or
# if either self.start or other.end are None, then this can't
# satisfy, or overlaps() would've taken care of it.
self.start and other.end and self.start.satisfies(other.end))
@coerced
def overlaps(self, other):
return ((self.start is None or other.end is None or
self.start <= other.end or
other.end in self.start or self.start in other.end) and
(other.start is None or self.end is None or
other.start <= self.end or
other.start in self.end or self.end in other.start))
@coerced
def union(self, other):
if not self.overlaps(other):
if (self.end is not None and other.start is not None and
self.end.is_predecessor(other.start)):
return VersionRange(self.start, other.end)
if (other.end is not None and self.start is not None and
other.end.is_predecessor(self.start)):
return VersionRange(other.start, self.end)
return VersionList([self, other])
# if we're here, then we know the ranges overlap.
if self.start is None or other.start is None:
start = None
else:
start = self.start
# TODO: See note in intersection() about < and in discrepancy.
if self.start in other.start or other.start < self.start:
start = other.start
if self.end is None or other.end is None:
end = None
else:
end = self.end
# TODO: See note in intersection() about < and in discrepancy.
if other.end not in self.end:
if end in other.end or other.end > self.end:
end = other.end
return VersionRange(start, end)
@coerced
def intersection(self, other):
if self.overlaps(other):
if self.start is None:
start = other.start
else:
start = self.start
if other.start is not None:
if other.start > start or other.start in start:
start = other.start
if self.end is None:
end = other.end
else:
end = self.end
# TODO: does this make sense?
# This is tricky:
# 1.6.5 in 1.6 = True (1.6.5 is more specific)
# 1.6 < 1.6.5 = True (lexicographic)
# Should 1.6 NOT be less than 1.6.5? Hm.
# Here we test (not end in other.end) first to avoid paradox.
if other.end is not None and end not in other.end:
if other.end < end or other.end in end:
end = other.end
return VersionRange(start, end)
else:
return VersionList()
def __hash__(self):
return hash((self.start, self.end))
def __repr__(self):
return self.__str__()
def __str__(self):
out = ""
if self.start:
out += str(self.start)
out += ":"
if self.end:
out += str(self.end)
return out
class VersionList(object):
"""Sorted, non-redundant list of Versions and VersionRanges."""
def __init__(self, vlist=None):
self.versions = []
if vlist is not None:
if isinstance(vlist, string_types):
vlist = _string_to_version(vlist)
if type(vlist) == VersionList:
self.versions = vlist.versions
else:
self.versions = [vlist]
else:
vlist = list(vlist)
for v in vlist:
self.add(ver(v))
def add(self, version):
if type(version) in (Version, VersionRange):
# This normalizes single-value version ranges.
if version.concrete:
version = version.concrete
i = bisect_left(self, version)
while i - 1 >= 0 and version.overlaps(self[i - 1]):
version = version.union(self[i - 1])
del self.versions[i - 1]
i -= 1
while i < len(self) and version.overlaps(self[i]):
version = version.union(self[i])
del self.versions[i]
self.versions.insert(i, version)
elif type(version) == VersionList:
for v in version:
self.add(v)
else:
raise TypeError("Can't add %s to VersionList" % type(version))
@property
def concrete(self):
if len(self) == 1:
return self[0].concrete
else:
return None
def copy(self):
return VersionList(self)
def lowest(self):
"""Get the lowest version in the list."""
if not self:
return None
else:
return self[0].lowest()
def highest(self):
"""Get the highest version in the list."""
if not self:
return None
else:
return self[-1].highest()
@coerced
def overlaps(self, other):
if not other or not self:
return False
s = o = 0
while s < len(self) and o < len(other):
if self[s].overlaps(other[o]):
return True
elif self[s] < other[o]:
s += 1
else:
o += 1
return False
def to_dict(self):
"""Generate human-readable dict for YAML."""
if self.concrete:
return syaml_dict([
('version', str(self[0]))
])
else:
return syaml_dict([
('versions', [str(v) for v in self])
])
@staticmethod
def from_dict(dictionary):
"""Parse dict from to_dict."""
if 'versions' in dictionary:
return VersionList(dictionary['versions'])
elif 'version' in dictionary:
return VersionList([dictionary['version']])
else:
raise ValueError("Dict must have 'version' or 'versions' in it.")
@coerced
def satisfies(self, other, strict=False):
"""A VersionList satisfies another if some version in the list
would satisfy some version in the other list. This uses
essentially the same algorithm as overlaps() does for
VersionList, but it calls satisfies() on member Versions
and VersionRanges.
If strict is specified, this version list must lie entirely
*within* the other in order to satisfy it.
"""
if not other or not self:
return False
if strict:
return self in other
s = o = 0
while s < len(self) and o < len(other):
if self[s].satisfies(other[o]):
return True
elif self[s] < other[o]:
s += 1
else:
o += 1
return False
@coerced
def update(self, other):
for v in other.versions:
self.add(v)
@coerced
def union(self, other):
result = self.copy()
result.update(other)
return result
@coerced
def intersection(self, other):
# TODO: make this faster. This is O(n^2).
result = VersionList()
for s in self:
for o in other:
result.add(s.intersection(o))
return result
@coerced
def intersect(self, other):
"""Intersect this spec's list with other.
Return True if the spec changed as a result; False otherwise
"""
isection = self.intersection(other)
changed = (isection.versions != self.versions)
self.versions = isection.versions
return changed
@coerced
def __contains__(self, other):
if len(self) == 0:
return False
for version in other:
i = bisect_left(self, other)
if i == 0:
if version not in self[0]:
return False
elif all(version not in v for v in self[i - 1:]):
return False
return True
def __getitem__(self, index):
return self.versions[index]
def __iter__(self):
return iter(self.versions)
def __reversed__(self):
return reversed(self.versions)
def __len__(self):
return len(self.versions)
@coerced
def __eq__(self, other):
return other is not None and self.versions == other.versions
@coerced
def __ne__(self, other):
return not (self == other)
@coerced
def __lt__(self, other):
return other is not None and self.versions < other.versions
@coerced
def __le__(self, other):
return self == other or self < other
@coerced
def __ge__(self, other):
return not (self < other)
@coerced
def __gt__(self, other):
return not (self == other) and not (self < other)
def __hash__(self):
return hash(tuple(self.versions))
def __str__(self):
return ",".join(str(v) for v in self.versions)
def __repr__(self):
return str(self.versions)
def _string_to_version(string):
"""Converts a string to a Version, VersionList, or VersionRange.
This is private. Client code should use ver().
"""
string = string.replace(' ', '')
if ',' in string:
return VersionList(string.split(','))
elif ':' in string:
s, e = string.split(':')
start = Version(s) if s else None
end = Version(e) if e else None
return VersionRange(start, end)
else:
return Version(string)
def ver(obj):
"""Parses a Version, VersionRange, or VersionList from a string
or list of strings.
"""
if isinstance(obj, (list, tuple)):
return VersionList(obj)
elif isinstance(obj, string_types):
return _string_to_version(obj)
elif isinstance(obj, (int, float)):
return _string_to_version(str(obj))
elif type(obj) in (Version, VersionRange, VersionList):
return obj
else:
raise TypeError("ver() can't convert %s to version!" % type(obj))
| lgpl-2.1 | -6,174,181,543,707,250,000 | 29.40411 | 78 | 0.551551 | false |
acsone/website | website_blog_facebook_comment/models/website.py | 27 | 1338 | # -*- coding: utf-8 -*-
# Python source code encoding : https://www.python.org/dev/peps/pep-0263/
##############################################################################
#
# This module copyright :
# (c) 2015 Antiun Ingenieria, SL (Madrid, Spain, http://www.antiun.com)
# Endika Iglesias <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields
class Website(models.Model):
_inherit = 'website'
facebook_appid = fields.Char(string="Facebook AppID")
facebook_numposts = fields.Integer(string="Number of Posts", default=5)
| agpl-3.0 | -6,454,213,489,108,411,000 | 42.16129 | 78 | 0.622571 | false |
filipp/py-gsxws | tests/test_gsxws.py | 1 | 21335 | # -*- coding: utf-8 -*-
import os
import sys
import logging
from datetime import date, datetime
from unittest import TestCase, main, skip
sys.path.append(os.path.abspath('..'))
from gsxws.core import validate, GsxCache, connect
from gsxws.objectify import parse, gsx_diags_timestamp
from gsxws.products import Product
from gsxws import (repairs, escalations, lookups, returns,
GsxError, diagnostics, comptia, products,
comms, parts,)
def empty(a):
return a in [None, '', ' ']
class CommsTestCase(TestCase):
def setUp(self):
self.priority = 'HIGH'
self.article_id = 'SN3133'
connect(os.getenv('GSX_USER'), os.getenv('GSX_SOLDTO'), os.getenv('GSX_ENV'))
self.articles = comms.fetch(priority=self.priority, readStatus=False)
def test_priority(self):
for a in self.articles:
self.assertEqual(a.priority, self.priority)
def test_date(self):
for a in self.articles:
self.assertIsInstance(a.createdDate, date)
def test_content(self):
content = comms.content(self.article_id)
self.assertEqual(content.languageCode, 'en')
def test_ack(self):
result = comms.ack(self.article_id, 'UNREAD')
self.assertEqual(result.acknowledgeType, 'UNREAD')
class RemoteTestCase(TestCase):
def setUp(self):
connect(os.getenv('GSX_USER'), os.getenv('GSX_SOLDTO'), os.getenv('GSX_ENV'))
self.sn = os.getenv('GSX_SN')
class RemoteDeviceTestCase(RemoteTestCase):
def setUp(self):
super(RemoteDeviceTestCase, self).setUp()
device = Product(sn=self.sn)
comptia_codes = comptia.fetch()
# pick the first part with a component code
self.first_part = [x for x in device.parts() if not empty(x.componentCode)][0]
self.part = repairs.RepairOrderLine()
self.part.partNumber = os.getenv('GSX_PART', self.first_part.partNumber)
self.comptia_code = comptia_codes.get(
self.first_part.componentCode,
('X01', 'Memory Module (RAM) - Kernel Panic',)
)
self.part.comptiaCode = self.comptia_code[0][0]
self.part.comptiaModifier = 'A'
def assertUnicodeOrInt(self, val):
try:
self.assertIsInstance(val, str)
except AssertionError:
self.assertIsInstance(val, int)
class ComptiaTestCase(RemoteTestCase):
def test_fetch_comptia(self):
data = comptia.fetch()
self.assertIsInstance(data['E'], list)
class DiagnosticsTestCase(TestCase):
def setUp(self):
connect(os.getenv('GSX_USER'), os.getenv('GSX_SOLDTO'), os.getenv('GSX_ENV'))
self.sn = os.getenv('GSX_SN')
device = Product(sn=self.sn)
self.diag = diagnostics.Diagnostics(serialNumber=self.sn)
self.diag.shipTo = os.getenv('GSX_SHIPTO')
suites = self.diag.fetch_suites()
self.suite = suites[0]
def test_fetch(self):
res = self.diag.fetch()
for r in res.diagnosticTestData.testResult.result:
self.assertIsInstance(r.name, str)
self.assertUnicodeOrInt(r.value)
for r in res.diagnosticProfileData.profile.unit.key:
self.assertIsInstance(r.name, str)
self.assertUnicodeOrInt(r.value)
for r in res.diagnosticProfileData.report.reportData.key:
self.assertUnicodeOrInt(r.value)
def test_fetch_suites(self):
self.assertIsInstance(self.suite[0], int)
def test_run_test(self):
self.diag.diagnosticSuiteId = self.suite[0]
self.diag.run_test()
def test_fetch_dc_url(self):
url = self.diag.fetch_dc_url()
self.assertRegex(url, r'^https://')
def test_initiate_email(self):
self.diag.emailAddress = os.getenv('GSX_EMAIL')
res = self.diag.initiate()
self.assertRegex(str(res), r'\d+')
def test_initiate_phone(self):
self.diag.phoneNumber = os.getenv('GSX_PHONE')
with self.assertRaisesRegex(GsxError, 'SMS sending is not supported'):
self.diag.initiate()
class RepairTestCase(RemoteDeviceTestCase):
def setUp(self):
from datetime import datetime, timedelta
super(RepairTestCase, self).setUp()
customer = repairs.Customer(emailAddress='[email protected]')
customer.firstName = 'First Name'
customer.lastName = 'Last Name'
customer.addressLine1 = 'Address Line 1'
customer.primaryPhone = '0123456789'
customer.city = 'Helsinki'
customer.zipCode = '12345'
customer.state = 'ZZ'
customer.country = 'FI'
self.customer = customer
d = datetime.now() - timedelta(days=7)
self.date = d.strftime('%m/%d/%y')
self.time = d.strftime('%I:%M AM')
cdata = comptia.fetch()
gcode = str(self.first_part.componentCode)
self.symptom = '26094'
self.issue = 'Apps'
try:
self._symptoms = repairs.SymptomIssue(serialNumber=self.sn).fetch()
self.symptom = self._symptoms[0][0]
self._issues = repairs.SymptomIssue(reportedSymptomCode=self.symptom).fetch()
self.issue = self._issues[0][0]
except GsxError as e:
logging.debug(e)
class CoreFunctionTestCase(TestCase):
def test_dump(self):
rep = repairs.Repair(blaa='ääöö')
part = repairs.RepairOrderLine()
part.partNumber = '661-5571'
rep.orderLines = [part]
self.assertRegex(str(rep.dumps()), '<GsxObject><blaa>ääöö</blaa><orderLines>')
def test_cache(self):
"""Make sure the cache is working."""
c = GsxCache('test').set('spam', 'eggs')
self.assertEqual(c.get('spam'), 'eggs')
class TestTypes(TestCase):
def setUp(self):
with open('tests/fixtures/escalation_details_lookup.xml', 'rb') as xml:
self.data = parse(xml.read(), 'lookupResponseData')
def test_unicode(self):
self.assertIsInstance(self.data.lastModifiedBy, str)
def test_timestamp(self):
self.assertIsInstance(self.data.createTimestamp, datetime)
def test_ts_comp(self):
self.assertGreater(datetime.now(), self.data.createTimestamp)
def test_list(self):
for x in self.data.escalationNotes.iterchildren():
self.assertIsInstance(x.text, str)
class TestErrorFunctions(TestCase):
def setUp(self):
with open('tests/fixtures/multierror.xml', 'rb') as xml:
self.data = GsxError(xml=xml.read())
def test_code(self):
self.assertEqual(self.data.errors['RPR.ONS.025'],
'This unit is not eligible for an Onsite repair from GSX.')
def test_message(self):
self.assertRegex(self.data.message, 'Multiple error messages exist.')
def test_exception(self):
msg = 'Connection failed'
e = GsxError(msg)
self.assertEqual(e.message, msg)
@skip
def test_error_ca_fmip(self):
from gsxws.core import GsxResponse
xml = open('tests/fixtures/error_ca_fmip.xml', 'r').read()
with self.assertRaisesRegex(GsxError, 'A repair cannot be created'):
GsxResponse(xml=xml, el_method='CreateCarryInResponse',
el_response='repairConfirmation')
class TestLookupFunctions(RemoteDeviceTestCase):
def test_component_check(self):
l = lookups.Lookup(serialNumber=os.getenv('GSX_SN'))
l.repairStrategy = "CA"
l.shipTo = os.getenv('GSX_SHIPTO', os.getenv('GSX_SOLDTO'))
r = l.component_check()
self.assertIsInstance(r.eligibility, bool)
def test_component_check_with_parts(self):
l = lookups.Lookup(serialNumber=os.getenv('GSX_SN'))
l.repairStrategy = "CA"
l.shipTo = os.getenv('GSX_SHIPTO')
r = l.component_check([self.part])
self.assertIsInstance(r.eligibility, bool)
class TestEscalationFunctions(RemoteTestCase):
def setUp(self):
super(TestEscalationFunctions, self).setUp()
esc = escalations.Escalation()
esc.shipTo = os.getenv('GSX_SHIPTO')
esc.issueTypeCode = 'WS'
esc.notes = 'This is a test'
c1 = escalations.Context(1, self.sn)
c2 = escalations.Context(12, '2404776')
esc.escalationContext = [c1, c2]
self.escalation = esc.create()
def test_create_general_escalation(self):
self.assertTrue(self.escalation.escalationId)
def test_update_general_escalation(self):
esc = escalations.Escalation()
esc.escalationId = self.escalation.escalationId
esc.status = escalations.STATUS_CLOSED
result = esc.update()
self.assertEqual(result.updateStatus, 'SUCCESS')
def test_attach_general_escalation(self):
esc = escalations.Escalation()
esc.escalationId = self.escalation.escalationId
esc.attachment = escalations.FileAttachment(os.getenv('GSX_FILE'))
result = esc.update()
self.assertEqual(result.updateStatus, 'SUCCESS')
def test_lookup_general_escalation(self):
esc = escalations.Escalation()
esc.escalationId = self.escalation.escalationId
result = esc.lookup()
self.assertEqual(result.escalationType, 'GSX Help')
class TestSympomIssueFunctions(RemoteTestCase):
def setUp(self):
super(TestSympomIssueFunctions, self).setUp()
self._symptoms = repairs.SymptomIssue(serialNumber=self.sn).fetch()
self.symptom = self._symptoms[0][0]
def test_symptom_code(self):
self.assertIsInstance(self.symptom, int)
def test_issue_code(self):
self._issues = repairs.SymptomIssue(reportedSymptomCode=self.symptom).fetch()
self.issue = self._issues[0][0]
self.assertRegex(self.issue, r'[A-Z]+')
class TestRepairFunctions(RepairTestCase):
def test_create_carryin(self):
rep = repairs.CarryInRepair()
rep.serialNumber = self.sn
rep.unitReceivedDate = self.date
rep.unitReceivedTime = self.time
rep.orderLines = [self.part]
rep.shipTo = os.getenv('GSX_SHIPTO')
rep.poNumber = '123456'
rep.symptom = 'This is a test symptom'
rep.diagnosis = 'This is a test diagnosis'
rep.customerAddress = self.customer
rep.reportedSymptomCode = self.symptom
rep.reportedIssueCode = self.issue
rep.create()
self.assertTrue(validate(rep.dispatchId, 'dispatchId'))
def test_repair_or_replace(self):
rep = repairs.RepairOrReplace()
rep.serialNumber = os.getenv('GSX_SN')
rep.unitReceivedDate = self.date
rep.unitReceivedTime = self.time
rep.shipTo = os.getenv('GSX_SHIPTO')
rep.purchaseOrderNumber = '123456'
rep.coverageOptions = 'A1'
rep.symptom = 'This is a test symptom'
rep.diagnosis = 'This is a test diagnosis'
rep.shipper = 'XUPSN'
rep.trackingNumber = '123456'
rep.customerAddress = self.customer
rep.orderLines = [self.part]
rep.reportedSymptomCode = self.symptom
rep.reportedIssueCode = self.issue
rep.reportedSymptomCode = ''
rep.reportedIssueCode = ''
rep.create()
def test_mail_in(self):
rep = repairs.MailInRepair()
rep.serialNumber = self.sn
rep.unitReceivedDate = self.date
rep.unitReceivedTime = self.time
rep.orderLines = [self.part]
rep.shipTo = os.getenv('GSX_SHIPTO')
rep.diagnosedByTechId = os.getenv('GSX_TECHID')
rep.symptom = 'This is a test symptom'
rep.diagnosis = 'This is a test diagnosis'
rep.customerAddress = self.customer
rep.reportedSymptomCode = self.symptom
rep.reportedIssueCode = self.issue
rep.addressCosmeticDamage = False
rep.purchaseOrderNumber = '123456'
rep.soldToContact = 'Firstname Lastname'
rep.soldToContactPhone = '123456'
rep.comptia = [self.comptia_code]
rep.shipper = returns.CARRIERS[25][0]
rep.trackingNumber = '12345678'
rep.create()
def test_whole_unit_exchange(self):
rep = repairs.WholeUnitExchange()
rep.serialNumber = self.sn
rep.unitReceivedDate = self.date
rep.unitReceivedTime = self.time
rep.shipTo = os.getenv('GSX_SHIPTO')
rep.purchaseOrderNumber = '123456'
rep.symptom = 'This is a test symptom'
rep.diagnosis = 'This is a test diagnosis'
rep.poNumber = '123456'
rep.reportedSymptomCode = self.symptom
rep.reportedIssueCode = self.issue
rep.customerAddress = self.customer
rep.orderLines = [self.part]
rep.create()
def test_mark_complete(self):
rep = repairs.Repair(os.getenv('GSX_DISPATCH'))
r = rep.mark_complete(os.getenv('GSX_DISPATCH'))
result = r.repairConfirmationNumbers.confirmationNumber
self.assertEqual(result, os.getenv('GSX_DISPATCH'))
class TestPartFunction(RemoteTestCase):
def test_product_parts(self):
parts = Product(os.getenv('GSX_SN')).parts()
self.assertIsInstance(parts[0].partNumber, str)
class TestProductData(TestCase):
def test_models(self):
models = products.models()
@skip
def test_product_image(self):
product = Product(os.getenv('GSX_SN', '123456789'))
product.description = 'MacBook Air (13-inch min 2013)'
img = product.fetch_image('https://static.servoapp.com/images/products/macbook-air-13-inchmid-2013.jpg')
self.assertTrue(os.path.exists(img))
@skip
def test_part_image(self):
part = parts.Part(partNumber='661-1234')
img = part.fetch_image()
self.assertTrue(os.path.exists(img))
class TestRemoteWarrantyFunctions(TestCase):
@classmethod
def setUpClass(cls):
connect(os.getenv('GSX_USER'), os.getenv('GSX_SOLDTO'), os.getenv('GSX_ENV'))
def setUp(self):
super(TestRemoteWarrantyFunctions, self).setUp()
self.sn = os.getenv('GSX_SN')
device = Product(sn=self.sn)
self.product = Product(os.getenv('GSX_SN'))
self.wty = self.product.warranty(ship_to=os.getenv('GSX_SHIPTO'))
def test_repair_strategies(self):
self.assertEqual(self.product.repair_strategies[0], 'Carry-in')
def test_acplus_status(self):
self.assertTrue(self.wty.acPlusFlag)
def test_warranty_lookup(self):
self.assertEqual(self.wty.warrantyStatus, 'Out Of Warranty (No Coverage)')
def test_warranty_lookup_imei(self):
wty = Product(os.getenv('GSX_IMEI')).warranty()
self.assertEqual(wty.warrantyStatus, 'Out Of Warranty (No Coverage)')
def test_fmip_active(self):
self.assertTrue(self.product.fmip_is_active)
class TestLocalWarrantyFunctions(TestCase):
def setUp(self):
self.data = parse('tests/fixtures/warranty_status.xml',
'warrantyDetailInfo')
def test_product_type(self):
product = Product('DGKFL06JDHJP')
product.description='MacBook Pro (17-inch, Mid 2009)'
self.assertTrue(product.is_mac)
product.description='iMac (27-inch, Late 2013)'
self.assertTrue(product.is_mac)
product.description='iPhone 5'
self.assertTrue(product.is_iphone)
product.description = 'iPad 2 3G'
self.assertTrue(product.is_ipad)
self.assertTrue(product.is_ios)
def test_purchase_date(self):
self.assertIsInstance(self.data.estimatedPurchaseDate, date)
def test_config_description(self):
self.assertEqual(self.data.configDescription, 'IPHONE 4,16GB BLACK')
def test_limited_warranty(self):
self.assertTrue(self.data.limitedWarranty)
def test_parts_covered(self):
self.assertIsInstance(self.data.partCovered, bool)
self.assertTrue(self.data.partCovered)
class TestRepairDiagnostics(RemoteTestCase):
def setUp(self):
super(TestRepairDiagnostics, self).setUp()
self.results = diagnostics.Diagnostics(serialNumber=os.getenv('GSX_SN')).fetch()
def test_diag_result(self):
self.assertEqual(self.results.eventHeader.serialNumber, os.getenv('GSX_SN'))
def test_result_timestamp(self):
ts = gsx_diags_timestamp(self.results.eventHeader.startTimeStamp)
self.assertIsInstance(ts, datetime)
class TestIosDiagnostics(TestCase):
def setUp(self):
self.data = parse('tests/fixtures/ios_diagnostics.xml',
'lookupResponseData')
def test_sn(self):
self.assertEqual(self.data.diagnosticTestData.testContext.serialNumber,
"XXXXXXXXXXXX")
def test_result(self):
data = self.data.diagnosticTestData.testResult
for i in data.result:
logging.debug("%s: %s" % (i.name, i.value))
self.assertEqual(data.result[1].name, "FULLY_CHARGED")
def test_profile(self):
data = self.data.diagnosticProfileData.profile
for i in data.unit.key:
logging.debug("%s: %s" % (i.name, i.value))
self.assertEqual(data.unit.key[1].value, "fliPhone")
def test_report(self):
data = self.data.diagnosticProfileData.report
for i in data.reportData.key:
logging.debug("%s: %s" % (i.name, i.value))
self.assertEqual(data.reportData.key[0].name, "LAST_USAGE_LENGTH")
class TestOnsiteCoverage(RemoteTestCase):
def setUp(self):
super(TestOnsiteCoverage, self).setUp()
self.product = Product(os.getenv('GSX_SN'))
self.product.warranty()
def test_has_onsite(self):
self.assertTrue(self.product.has_onsite)
def test_coverage(self):
self.assertTrue(self.product.parts_and_labor_covered)
def test_is_vintage(self):
self.assertFalse(self.product.is_vintage)
class TestActivation(TestCase):
def setUp(self):
self.data = parse('tests/fixtures/ios_activation.xml',
'activationDetailsInfo')
def test_unlock_date(self):
self.assertIsInstance(self.data.unlockDate, date)
def test_unlocked(self):
self.assertIs(type(self.data.unlocked), bool)
self.assertTrue(self.data.unlocked)
p = Product(os.getenv('GSX_SN'))
self.assertTrue(p.is_unlocked(self.data))
def test_imei(self):
self.assertEqual(self.data.imeiNumber, '010648001526755')
class TestPartsLookup(TestCase):
def setUp(self):
self.data = parse('tests/fixtures/parts_lookup.xml',
'PartsLookupResponse')
self.part = self.data.parts[0]
def test_parts(self):
self.assertEqual(len(self.data.parts), 3)
def test_exchange_price(self):
self.assertEqual(self.part.exchangePrice, 14.4)
def test_stock_price(self):
self.assertEqual(self.part.stockPrice, 17.1)
def test_serialized(self):
self.assertIsInstance(self.part.isSerialized, bool)
self.assertTrue(self.part.isSerialized)
def test_description(self):
self.assertEqual(self.part.partDescription, 'SVC,REMOTE')
class TestOnsiteDispatchDetail(TestCase):
def setUp(self):
self.data = parse('tests/fixtures/onsite_dispatch_detail.xml',
'onsiteDispatchDetails')
def test_details(self):
self.assertEqual(self.data.dispatchId, 'G101260028')
def test_address(self):
self.assertEqual(self.data.primaryAddress.zipCode, 85024)
self.assertEqual(self.data.primaryAddress.firstName, 'Christopher')
def test_orderlines(self):
self.assertIsInstance(self.data.dispatchOrderLines.isSerialized, bool)
class RepairUpdateTestCase(RemoteTestCase):
def setUp(self):
super(RepairUpdateTestCase, self).setUp()
self.dispatchId = 'G210427158'
self.repair = repairs.CarryInRepair(self.dispatchId)
def test_set_status_open(self):
result = self.repair.set_status('BEGR')
self.assertEqual(result.confirmationNumber, self.dispatchId)
def test_set_status_ready(self):
result = self.repair.set_status('RFPU')
self.assertEqual(result.confirmationNumber, self.dispatchId)
def test_set_repair_techid(self):
result = self.repair.set_techid(os.getenv('GSX_TECHID'))
self.assertEqual(result.confirmationNumber, self.dispatchId)
class TestCarryinRepairDetail(TestCase):
def setUp(self):
self.data = parse('tests/fixtures/repair_details_ca.xml',
'lookupResponseData')
def test_details(self):
self.assertEqual(self.data.dispatchId, 'G2093174681')
def test_unicode_name(self):
self.assertEqual(self.data.primaryAddress.firstName, u'Ääkköset')
class ConnectionTestCase(TestCase):
"""Basic connection tests."""
def test_access_denied(self):
"""Make sure we fail with 403 when connecting from non-whitelisted IP."""
with self.assertRaisesRegex(GsxError, 'Access denied'):
connect(os.getenv('GSX_USER'), os.getenv('GSX_SOLDTO'),
os.getenv('GSX_ENV'))
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
GsxCache.nukeall() # to avoid pickle errors between different Python versions
main()
| bsd-2-clause | -4,900,335,449,283,007,000 | 33.393548 | 112 | 0.646173 | false |
freakboy3742/django-push-notifications | push_notifications/fields.py | 12 | 2892 | import re
import struct
from django import forms
from django.core.validators import RegexValidator
from django.db import models, connection
from django.utils import six
from django.utils.translation import ugettext_lazy as _
__all__ = ["HexadecimalField", "HexIntegerField"]
hex_re = re.compile(r"^(([0-9A-f])|(0x[0-9A-f]))+$")
postgres_engines = [
"django.db.backends.postgresql_psycopg2",
"django.contrib.gis.db.backends.postgis",
]
class HexadecimalField(forms.CharField):
"""
A form field that accepts only hexadecimal numbers
"""
def __init__(self, *args, **kwargs):
self.default_validators = [RegexValidator(hex_re, _("Enter a valid hexadecimal number"), "invalid")]
super(HexadecimalField, self).__init__(*args, **kwargs)
def prepare_value(self, value):
# converts bigint from db to hex before it is displayed in admin
if value and not isinstance(value, six.string_types) \
and connection.vendor in ("mysql", "sqlite"):
value = hex(value).rstrip("L")
return super(forms.CharField, self).prepare_value(value)
class HexIntegerField(six.with_metaclass(models.SubfieldBase, models.BigIntegerField)):
"""
This field stores a hexadecimal *string* of up to 64 bits as an unsigned integer
on *all* backends including postgres.
Reasoning: Postgres only supports signed bigints. Since we don't care about
signedness, we store it as signed, and cast it to unsigned when we deal with
the actual value (with struct)
On sqlite and mysql, native unsigned bigint types are used. In all cases, the
value we deal with in python is always in hex.
"""
def db_type(self, connection):
engine = connection.settings_dict["ENGINE"]
if "mysql" in engine:
return "bigint unsigned"
elif "sqlite" in engine:
return "UNSIGNED BIG INT"
else:
return super(HexIntegerField, self).db_type(connection=connection)
def get_prep_value(self, value):
if value is None or value == "":
return None
if isinstance(value, six.string_types):
value = int(value, 16)
# on postgres only, interpret as signed
if connection.settings_dict["ENGINE"] in postgres_engines:
value = struct.unpack("q", struct.pack("Q", value))[0]
return value
def to_python(self, value):
if isinstance(value, six.string_types):
return value
if value is None:
return ""
# on postgres only, re-interpret from signed to unsigned
if connection.settings_dict["ENGINE"] in postgres_engines:
value = hex(struct.unpack("Q", struct.pack("q", value))[0])
return value
def formfield(self, **kwargs):
defaults = {"form_class": HexadecimalField}
defaults.update(kwargs)
# yes, that super call is right
return super(models.IntegerField, self).formfield(**defaults)
def run_validators(self, value):
# make sure validation is performed on integer value not string value
return super(models.BigIntegerField, self).run_validators(self.get_prep_value(value))
| mit | 553,495,842,151,306,100 | 33.023529 | 102 | 0.728216 | false |
Portugol-Studio-TTC/Portugol-GoGoBoard-Plugin | Portugol-GoGoBoard-Plugin/src/main/resources/br/univali/portugol/plugin/gogoboard/compilador/logoc.py | 1 | 56708 | #
# Tinker Logo Compiler - A lexical analyzer and parser for a
# Logo language for Robotics
#
# Copyright (C) 2014 Chiang Mai University
# Contact Arnan (Roger) Sipiatkiat [[email protected]]
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation version 2.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
LOGO_VERSION = 1.3
import sys
import ply.lex as lex
import ply.yacc as yacc
import re
#import tkMessageBox
class tinkerLogo:
def __init__(self):
pass
def version(self):
return LOGO_VERSION
def compile(self, LogoCodeString):
# ======================================
# Run the program through a first-pass
# parser, which determines the name and
# parameters of procedures
# ======================================
self.fpp = firstPassParser()
self.logoProcedures = self.fpp.parseProcedures(LogoCodeString)
self.autoRunState = self.fpp.autoRunState
#print fpp.procedures_dict
# ======================================
# Run the Logo parser
# ======================================
self.logo_parser = tinkerParser(self.fpp.procedures_dict, self.fpp.ordered_procedures_list)
self.logo_parser.parse(LogoCodeString)
#self.autoRunState = self.logo_parser.autoRunState
# =====================================================================
# Post-Processing - Replace ALL symbols with actual procedure addresses
# =====================================================================
#self.logo_parser.logoByteCodeString = '0,' + self.logo_parser.logoByteCodeString
#print "Byte Code String: \r\n" + self.logo_parser.logoByteCodeString + "\r\n"
#self.logo_parser.logoByteCodeString = self.logo_parser.logoByteCodeString.strip(',')
# convert the program string into list items by splitting commas and stripping whitespaces
# note that we remove the trailing ', ' (comma and space)
self.logo_bytecode_list = [x.strip() for x in self.logo_parser.logoByteCodeString[:-2].split(',')]
for item_index in range(len(self.logo_bytecode_list)):
# locate the CALL commands
# Note that CALL will follow by two address bytes, which are now given placeholders using the procedure name
# For example, "CALL, test, test" means we want to call a procedure named 'test'
# we need to replace 'test, test' with two bytes representing a 16-bit address
if self.logo_bytecode_list[item_index] == 'CALL':
self.procedure_name = self.logo_bytecode_list[item_index+1]
self.call_address = self.logo_parser.procedures_address_dict[self.procedure_name] # retrieve the procedure address
self.logo_bytecode_list[item_index + 1] = str(self.call_address >> 8) # high byte
self.logo_bytecode_list[item_index + 2] = str(self.call_address & 0xff) # low byte
item_index += 2
# if compile error
if self.logo_bytecode_list[0] == '':
self.logo_bytecode = ""
return
#print self.logo_bytecode_list
# ==============================================
# Translate symbols to byte code
# ==============================================
# The order of the items are important. It determines the binary code number of the commands
self.logo_bytecode_lookup = {
'CODE_END': 0,
'NUM8': 1,
'NUM16': 2,
'LIST': 3,
'EOL': 4,
'EOLR': 5,
'INPUT': 6,
'STOP': 7,
'OUTPUT': 8,
'REPEAT': 9,
'IF': 10,
'IFELSE': 11,
'BEEP': 12,
'NOTE': 13,
'WAITUNTIL': 14,
'FOREVER': 15,
'WAIT': 16,
'TIMER': 17,
'RESETT': 18,
'SEND': 19,
'IR': 20,
'NEWIR': 21,
'RANDOM': 22,
'OP_PLUS': 23,
'OP_MINUS': 24,
'OP_MULTIPLY': 25,
'OP_DIVISION': 26,
'OP_MODULO': 27, # i.e. 5 % 3 = 2
'OP_EQUAL': 28,
'OP_GREATER': 29,
'OP_LESS': 30,
'OP_AND': 31,
'OP_OR': 32,
'OP_XOR': 33,
'OP_NOT': 34,
'SETGLOBAL': 35,
'GETGLOBAL': 36,
'ASET': 37,
'AGET': 38,
'RECORD': 39,
'RECALL': 40,
'RESETDP': 41,
'SETDP': 42,
'ERASE': 43,
'WHEN': 44,
'WHENOFF': 45,
'M_A': 46,
'M_B': 47,
'IF_STATE_CHANGE': 48, # executes only when the condition's state changes from false to true
'ON': 49,
'ONFOR': 50,
'OFF': 51,
'THISWAY': 52,
'THATWAY': 53,
'RD': 54,
'READSENSOR': 55,
# 'SENSOR2': 56,
'READSWITCH': 56,
# 'SWITCH2': 58,
'SETPOWER': 59,
'BRAKE': 60,
'OP_GREATER_OR_EQUAL': 61, # >=
'OP_LESS_OR_EQUAL': 62, # <=
'TALK_TO_NODE': 63, # set the target remote node ID
'ISON': 64, # returns true if specified motor is on
'ISOFF': 65, # true if specified motor is off
'ISTHISWAY': 66, # returns true if direction is thisway
'ISTHATWAY': 67, # returns true if direction is thatway
'STOPALL': 68,
'EB': 69,
'DB': 70,
'LOWBYTE': 71,
'HIGHBYTE': 72,
# 'SENSOR3': 73,
# 'SENSOR4': 74,
# 'SENSOR5': 75,
# 'SENSOR6': 76,
# 'SENSOR7': 77,
# 'SENSOR8': 78,
# 'SWITCH3': 79,
# 'SWITCH4': 80,
# 'SWITCH5': 81,
# 'SWITCH6': 82,
# 'SWITCH7': 83,
# 'SWITCH8': 84,
'LEDON': 85,
'LEDOFF': 86,
'SETH': 87,
'LT': 88,
'RT': 89,
'TALKTO': 90,
'GETPOWER': 91, # returns current power level
'CL_I2C_STOP': 92,
'CL_I2C_WRITE': 93,
'CL_I2C_READ': 94,
'SERIAL': 95,
'NEWSERIAL': 96,
'RTC_INIT': 97,
'RTC_GET_ITEM': 98,
'SHOW': 99,
'CLS': 100,
'GETPOS': 101,
'SETPOS': 102,
'TALK_TO_7SEG_1': 103,
'TALK_TO_7SEG_2': 104,
'TALK_TO_LCD_1': 105,
'TALK_TO_LCD_2': 106,
'I2C_WRITE_REGISTER': 107,
'I2C_READ_REGISTER': 108,
'LONG_LIST': 109,
'CALL': 110 ,
'STRING': 111,
'SETTICKRATE': 112,
'TICKCOUNT': 113,
'CLEARTICK': 114,
# Raspberry Pi commands
# ==========================================
'USECAMERA': 200,
'CLOSECAMERA': 201,
'STARTFINDFACE': 202,
'STOPFINDFACE': 203,
'FACEFOUND': 204,
'TAKESNAPSHOT': 205,
'CAMERAISON': 206,
'ISFINDINGFACE': 207,
'USESMS': 210,
'SENDSMS': 211,
'SENDMAIL': 212,
'SENDSNAPSHOT': 213,
'PLAYSOUND': 214,
'STOPSOUND': 215,
'SHOWIMAGE': 216,
'SCREENTAPPED': 217,
'WIFICONNECT': 218,
'WIFIDISCONNECT': 219,
'REBOOT': 220,
'SHUTDOWN': 221,
# creates a new data record file for the given variable
'NEWRECORDFILE': 222,
'RECORD_TO_RPI': 223,
'SHOWLOGPLOT': 224,
# RFID byte codes
'USERFID': 227,
'CLOSERFID': 228,
'RFIDBEEP': 229,
'READRFID': 230,
'WRITERFID': 231,
'RFIDTAGFOUND': 232,
'RFIDREADERFOUND': 233,
#text to voice
'SAY': 234,
# key-value lookup and compare
'KEY': 235,
'OP_KEY_COMPARE': 236,
'INTKEY': 237,
'CLEARKEYS': 238,
'SEND_MESSAGE': 239,
}
self.logo_bytecode = ""
for item in self.logo_bytecode_list:
if item.isdigit():
self.logo_bytecode += chr(int(item))
else:
self.logo_bytecode += chr(self.logo_bytecode_lookup[item.upper()])
self.bin_code = ""
for c in self.logo_bytecode:
self.bin_code += str(ord(c)) + ", "
print ("Raw byte code: \r\n" + self.bin_code)
def byteCode(self):
' Returns a binary code string created by the compiler'
return self.logo_bytecode
class firstPassParser:
def __init__(self):
pass
def parseProcedures(self, sourceText):
''' identifies the logo procedures and creates a LogoProcedure object
for each. It also handles the parameter names.
returns a list of cmdClasses.LogProcedures Object
'''
logoProcedureList = []
# -----------------------------
# identify procedures
# -----------------------------
#
# this search will split the procedures into a list. The syntax "to" and "end" are
# stripped away
r = re.compile('^[ \t]*to[ \t]+(.*?)^[ \t]*end[ \t]*$', re.MULTILINE | re.IGNORECASE | re.DOTALL )
procedures = r.findall(sourceText)
self.procedures_dict = {}
self.ordered_procedures_list = [] # holds a list of procedure name ordered by the sequence in the code.
# we need it since the procedures_dict by definition does not store
# the items in the same order it was inserted. This causes errors
# in the parser when looking up local variables.
# --------------------------------------
# identify procedure name and parameters
# --------------------------------------
self.isFirstProcedure = True
self.autoRunState = False
for procedureText in procedures:
print ("Procedure:")
print ("--------------------------")
print ("Name: " + procedureText)
firstLine = re.search('.*?\n', procedureText, re.MULTILINE).group()
# remove the first line from the procedure
# make sure the count=1 option is set to prevent accedental
# removal of lines that are the same as the first line i.e.
#
# to tail :i
# make "i :i + 1
# tail :i
# end
#
# when to/end are stripped by the first pass parser, we have
#
# tail :i
# make "i :i + 1
# tail :i
#
# this causes us to loose the last command if count=1 is not set
procedureText = re.sub(firstLine, '', procedureText, count=1)
# parse the first line
tokens = re.findall('([:"]*.*?)[ \t\n]+', firstLine)
if tokens is None:
# raise an error -> or may be just skip it
# there is no procedure name. the produre probably looks like this
#
# to
# end
print ("Warning: procedure without a name was found")
continue
# first token is the procedure name
procName = tokens[0]
if procName.lower() == "autorun" and self.isFirstProcedure:
self.autoRunState = True
self.isFirstProcedure = False
# check if the procedure name is valid
if not self.nameIsValid(procName):
# Invalid procedure name
print ("Error: procedure name %s is invalid" % procName)
continue
tokens = tokens[1:]
parameter_list = []
# check for parameters
if len(tokens) > 0:
for i, parameter in enumerate(tokens):
tokens[i] = parameter.lstrip('":') # remove any preceeding quotes (") or colons (:)
parameter_list.append(tokens[i])
self.procedures_dict[procName] = parameter_list
self.ordered_procedures_list.append(procName)
## do this checkig at runtime instead
##
## if not self.nameIsValid(tokens[i]):
## print "Error: invalid parameter name %s in procedure %s" % (parameter, procName)
## # should raise an error
# put the parsed procedure information into a LogoProcedure object
# LogoProcedureObj = cmdClasses.LogoProcedure(procName)
# LogoProcedureObj.parameterNames = tokens
# LogoProcedureObj.statementsSource = procedureText
#
# # add the procedure to the procedure list
# logoProcedureList.append(LogoProcedureObj)
#
# return logoProcedureList
def nameIsValid(self, nameString):
''' verifies if the nameString is a valid name
That is, it must begine with alphabets and followed by
zero or more alphanumeric characters
'''
return (re.search('[a-zA-Z_]\w*', nameString) is not None)
class tinkerGoGoLex:
#Reserved words
reserved = {
## 'SYMBOL' ; 'SYMBOL for use in the gramma definition'
# 'to' : 'TO',
'end' : 'END',
# Core program statements
# 'aset' : 'ASET',
# 'display_show' : 'DISPLAY_SHOW',
'forever' : 'FOREVER',
'repeat' : 'REPEAT',
'if' : 'IF',
'ifelse' : 'IFELSE',
'ifstatechange' : 'IF_STATE_CHANGE',
'waituntil' : 'WAITUNTIL',
'set' : 'SET',
# statements with no arguments
'stop' : 'STOP',
'beep' : 'BEEP',
'resett' : 'RESETT',
'resetdp' : 'RESETDP',
'on' : 'M_ON',
'off' : 'M_OFF',
'thisway' : 'M_THISWAY',
'cw' : 'M_CW',
'thatway' : 'M_THATWAY',
'ccw' : 'M_CCW',
'rd' : 'M_RD',
'stopall' : 'STOPALL',
'ledon' : 'ULED_ON',
'ledoff' : 'ULED_OFF',
'rtc_init' : 'RTC_INIT',
'cls' : 'DISPLAY_CLS',
'talk_to_7seg_1': 'TALK_TO_7SEG_1',
'talk_to_7seg_2': 'TALK_TO_7SEG_2',
'talk_to_lcd_1' : 'TALK_TO_LCD_1',
'talk_to_lcd_2' : 'TALK_TO_LCD_2',
# statements with one number (or expression) argument
'output' : 'OUTPUT',
'wait' : 'WAIT',
# 'send' : 'SEND',
'onfor' : 'ONFOR',
'record' : 'RECORD',
'setdp' : 'SETDP',
'erase' : 'ERASE',
'setpower' : 'SETPOWER',
'seth' : 'SERVO_SET_H',
'lt' : 'SERVO_LT',
'rt' : 'SERVO_RT',
'talk_to_motor' : 'TALK_TO_MOTOR',
'setpos' : 'DISPLAY_SET_POS',
'show' : 'DISPLAY_SHOW',
'i2cwrite' : 'I2C_WRITE',
# voice recorder/player module
'play' : 'PLAY',
'nexttrack' : 'NEXT_TRACK',
'prevtrack' : 'PREV_TRACK',
'gototrack' : 'GOTO_TRACK',
'erasetracks' : 'ERASE_TRACKS',
# statements with two number arguments
# 'note' : 'NOTE',
# statements with three number arguments
# 'i2c_write_register' : 'I2C_WRITE_REGISTER',
# Expressions with no parameters
'timer' : 'TIMER',
'ir' : 'IR',
'recall' : 'RECALL',
'serial' : 'SERIAL',
'getpos' : 'DISPLAY_GET_POS',
# sensor and switch aliases
'sensor1' : 'SENSOR1',
'sensor2' : 'SENSOR2',
'sensor3' : 'SENSOR3',
'sensor4' : 'SENSOR4',
'sensor5' : 'SENSOR5',
'sensor6' : 'SENSOR6',
'sensor7' : 'SENSOR7',
'sensor8' : 'SENSOR8',
'switch1' : 'SWITCH1',
'switch2' : 'SWITCH2',
'switch3' : 'SWITCH3',
'switch4' : 'SWITCH4',
'switch5' : 'SWITCH5',
'switch6' : 'SWITCH6',
'switch7' : 'SWITCH7',
'switch8' : 'SWITCH8',
# real-time-clock commands
'seconds' : 'SECONDS',
'minutes' : 'MINUTES',
'hours' : 'HOURS',
'dow' : 'DOW', # Day of week
'day' : 'DAY',
'month' : 'MONTH',
'year' : 'YEAR', # 0 = the year 2000
# expression with one parameter
'readsensor' : 'READ_SENSOR',
'readswitch' : 'READ_SWITCH',
'random' : 'RANDOM',
'lowbyte' : 'LOWBYTE',
'highbyte' : 'HIGHBYTE',
'i2cread' : 'I2C_READ',
'settickrate' : 'SET_TICK_RATE',
'tickcount' : 'TICK_COUNT',
'cleartick' : 'CLEAR_TICK',
# Raspberry Pi commands
'usecamera' : 'USE_CAMERA',
'closecamera' : 'CLOSE_CAMERA',
'startfindface' : 'START_FIND_FACE',
'stopfindface' : 'STOP_FIND_FACE',
'takesnapshot' : 'TAKE_SNAP_SHOT',
'cameraison' : 'CAMERA_IS_ON',
'isfindingface' : 'IS_FINDING_FACE',
'sendmail' : 'SEND_MAIL',
'usesms' : 'USE_SMS',
'sendsms' : 'SEND_SMS',
# 'SENDSNAPSHOT' : 'SEND_SNAPSHOT',
'playsound' : 'PLAY_SOUND',
'stopsound' : 'STOP_SOUND',
'showimage' : 'SHOW_IMAGE',
'newrecordfile' : 'NEW_RECORD_FILE',
'showlogplot' : 'SHOW_LOG_PLOT',
'userfid' : 'USE_RFID',
'closerfid' : 'CLOSE_RFID',
'rfidbeep' : 'RFID_BEEP',
'readrfid' : 'RFID_READ',
'writerfid' : 'RFID_WRITE',
'say' : 'SAY',
'ison' : 'IS_ON',
'isoff' : 'IS_OFF',
'isthisway' : 'IS_THISWAY',
'isthatway' : 'IS_THATWAY',
'getpower' : 'GET_POWER',
'key' : 'GET_KEY_VALUE',
'intkey': 'GET_KEY_INT_VALUE',
'clearkeys': 'CLEARKEYS',
'sendmessage': 'SEND_MESSAGE',
}
tokens = \
[
'NAME', 'NUMBER',
# 'LIST_BLOCK',
'OP_PLUS', 'OP_MINUS', 'OP_MULTIPLY', 'OP_DIVISION', 'OP_MODULO',
'LPAREN', 'RPAREN', 'LBRACKET','RBRACKET',
'OP_LESS', 'OP_LESS_OR_EQUAL','OP_GREATER', 'OP_GREATER_OR_EQUAL', 'OP_EQUAL', 'OP_AND', 'OP_OR', 'OP_XOR','OP_NOT',
'NEWIR', 'NEWSERIAL',
'FACE_FOUND', 'SCREEN_TAPPED',
'RFID_TAG_FOUND', 'RFID_READER_FOUND',
'TALKTO',
'TALK_TO_NODE', 'NODE_ID',
'PROCEDURE_NAME_DECLARATION',
'PARAMETER',
'STRING',
# 'COMMA',
# 'NEWLINE',
#'DELIMITER',
'PROCECURE_CALL_0_PARAM',
'PROCECURE_CALL_1_PARAM',
'PROCECURE_CALL_2_PARAM',
'PROCECURE_CALL_3_PARAM',
'PROCECURE_CALL_4_PARAM',
'PROCECURE_CALL_5_PARAM',
'PROCECURE_CALL_6_PARAM',
] + list(reserved.values())
# Tokens
# t_OP_PLUS = r'\+'
# t_OP_MINUS = r'-'
# t_OP_MULTIPLY= r'\*'
# t_OP_DIVISION = r'/'
# t_OP_EQUAL = r'='
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_LBRACKET = r'\['
t_RBRACKET = r'\]'
# t_OP_LESS = r'<'
# t_OP_GREATER = r'>'
def __init__(self, procedures_dict):
self.lexer = lex.lex(module=self)
self.procedures_dict = procedures_dict
def t_COMMENT(self,t):
r';.*'
pass
def t_OP_PLUS(self,t):
r'\+'
t.value = "OP_PLUS"
return t
def t_OP_MINUS(self,t):
r'-'
t.value = "OP_MINUS"
return t
def t_OP_MULTIPLY(self,t):
r'\*'
t.value = "OP_MULTIPLY"
return t
def t_OP_DIVISION(self,t):
r'/'
t.value = "OP_DIVISION"
return t
def t_OP_MODULO(self,t):
r'%'
t.value = "OP_MODULO"
return t
def t_OP_GREATER_OR_EQUAL(self, t):
r'>='
t.value = "OP_GREATER_OR_EQUAL"
return t
def t_OP_LESS_OR_EQUAL(self, t):
r'<='
t.value = "OP_LESS_OR_EQUAL"
return t
def t_OP_LESS(self,t):
r'<'
t.value = "OP_LESS"
return t
def t_OP_GREATER(self,t):
r'>'
t.value = "OP_GREATER"
return t
def t_OP_EQUAL(self,t):
r'='
t.value = "OP_EQUAL"
return t
def t_OP_AND(self,t):
r'and'
t.value = "OP_AND"
return t
def t_OP_OR(self,t):
r'or'
t.value = "OP_OR"
return t
def t_OP_XOR(self,t):
r'xor'
t.value = "OP_XOR"
return t
def t_OP_NOT(self,t):
r'not'
t.value = "OP_NOT"
return t
def t_PARAMETER(self, t):
r'\:[a-zA-Z_][a-zA-Z0-9_]*'
return t
def t_NUMBER(self, t):
r'\d+'
try:
t.value = int(t.value)
if t.value < 256:
t.value = 'NUM8, '+str(t.value)
else:
t.value = 'NUM16, '+str(t.value >> 8)+', '+str(t.value & 0xff)
except ValueError:
print("Integer value too large %d", t.value)
t.value = 0
return t
# Ignored characters
t_ignore = " \t"
def t_newline(self, t):
r'\n+'
t.lexer.lineno += t.value.count("\n")
def t_TALKTO(self, t):
r'[abcd]+\,'
t.type = "TALKTO"
return t
def t_TALK_TO_NODE(self,t):
r'talkto'
t.type = "TALK_TO_NODE"
t.value = "TALK_TO_NODE"
return t
def t_ISON(self, t):
r'[abcd]+on\?'
t.type = "IS_ON"
return t
def t_ISOFF(self, t):
r'[abcd]+off\?'
t.type = "IS_OFF"
return t
def t_ISTHISWAY(self, t):
r'[abcd]+thisway\?'
t.type = "IS_THISWAY"
return t
def t_ISCCW(self, t):
r'[abcd]+ccw\?'
t.type = "IS_THATWAY"
t.value = t.value[:-4] + 'thatway?'
#print "TEST " + t.value
return t
def t_ISCW(self, t):
r'[abcd]+cw\?'
t.type = "IS_THISWAY"
t.value = t.value[:-3] + 'thisway?'
return t
def t_ISTHATWAY(self, t):
r'[abcd]+thatway\?'
t.type = "IS_THATWAY"
return t
def t_GETPOWER(self, t):
r'[abcd]power'
t.type = "GET_POWER"
return t
def t_NODE_ID(self, t):
r'n[0-9][0-9]*'
t.type = "NODE_ID"
t.value = int(t.value[1:]) # remove the preceeding 'n'
return t
def t_NEWIR(self, t):
r'newir\?'
t.type = "NEWIR"
t.value = "NEWIR"
return t
def t_NEWSERIAL(self, t):
r'newserial\?'
t.type = "NEWSERIAL"
t.value = "NEWSERIAL"
return t
def t_FACE_FOUND(self, t):
r'facefound\?'
t.type = "FACE_FOUND"
t.value = "FACEFOUND"
return t
def t_SCREEN_TAPPED(self, t):
r'screentapped\?'
t.type = "SCREEN_TAPPED"
t.value = "SCREENTAPPED"
return t
def t_RFID_TAG_FOUND(self, t):
r'rfidtagfound\?'
t.type = "RFID_TAG_FOUND"
t.value = "RFIDTAGFOUND"
return t
def t_RFID_READER_FOUND(self, t):
r'rfidreaderfound\?'
t.type = "RFID_READER_FOUND"
t.value = "RFIDREADERFOUND"
return t
def t_PROCEDURE_NAME_DECLARATION(self,t):
r'to(\s)+[a-zA-Z_][a-zA-Z_0-9]*'
t.value = t.value[2:].lstrip()
return t
def t_STRING(self, t):
r'".*?"'
# the '?' makes the match non-greedy. This helps when more than one string is on the same line
t.type = "STRING"
t.value = t.value[1:-1] # return the string without quotes
return t
def t_ID(self, t):
r'[a-zA-Z_][a-zA-Z_0-9]*'
if t.value in self.reserved:
t.type = self.reserved.get(t.value,'ID') # Check for reserved words
# if the name is a procedure call -> set the type to PROCEDURE_CALL_X_PARAM
# where X is the number of parameters. The number of parameters is kept
# in the procedures_dict
elif t.value in self.procedures_dict:
t.type = "PROCECURE_CALL_" + str(len(self.procedures_dict[t.value])) + "_PARAM"
self.showLog ("Found procedure call. Type = " + t.type + ", value = " + t.value)
else:
t.type = "NAME"
return t
def showLog(self, message):
pass
# print message
def t_error(self, t):
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
# lex.lex()
# lex.input(logoString)
# print logoString
#
# while True:
# tok = lex.token()
# if not tok: break
# print tok.type + "," + tok.value
class tinkerParser:
# =========================================================
# Parsing rules
# =========================================================
precedence = (
('left','OP_AND', 'OP_OR', 'OP_XOR'),
('left','OP_LESS', 'OP_LESS_OR_EQUAL', 'OP_GREATER','OP_GREATER_OR_EQUAL', 'OP_EQUAL'),
('right', 'OP_NOT'),
('left','OP_PLUS','OP_MINUS'),
('left','OP_MULTIPLY','OP_DIVISION', 'OP_MODULO'),
# ('right','UMINUS'),
('right', 'RANDOM', 'LOWBYTE', 'HIGHBYTE', 'I2C_READ', 'READ_SENSOR', 'READ_SWITCH'),
('right', 'NAME'),
('right', 'PROCECURE_CALL_0_PARAM', 'PROCECURE_CALL_1_PARAM', 'PROCECURE_CALL_2_PARAM',
'PROCECURE_CALL_3_PARAM', 'PROCECURE_CALL_4_PARAM', 'PROCECURE_CALL_5_PARAM', 'PROCECURE_CALL_6_PARAM',
'I2C_WRITE'
)
)
def __init__(self, procedurs_dict, ordered_procedures_list):
self.lexer = tinkerGoGoLex(procedurs_dict)
self.tokens = self.lexer.tokens
self.parser = yacc.yacc(module=self,debug=True)
self.procedures_dict = procedurs_dict
self.ordered_procedures_list = ordered_procedures_list
self.procedures_address_dict = {} # stores the byte address of each procedure
self.current_procedure_index = 0 # current procedure index of the procedures_dict
self.logoByteCodeString = "" # stores the compiled procedure string. It will be later translated into
# the binary code.
self.procedure_len_counter = 0 # tracks the procedure length. Used to calculate the procedure call address
self.lineno = 0
self.procedures = []
self.autoRunState = False
self.if_state_change_counter = 0 # used to assign a unique id for each if-state-change command. This id
# is used in the logo vm to track the state of the condition
def parse(self, data):
if data:
return self.parser.parse(data,self.lexer.lexer,0,0,None)
else:
return []
def p_procedures(self, t):
''' procedures : procedure
| procedure procedures
'''
if len(t) == 2:
t[0] = t[1]
elif len(t) == 3:
t[0] = t[1]
def p_procedure(self, t):
''' procedure : PROCEDURE_NAME_DECLARATION statements END
| PROCEDURE_NAME_DECLARATION parameters statements END
'''
self.showLog("-> Procedure: " + t[1])
if t[1] in self.procedures:
self.showLog("Error: Procedure " + t[1] + " already defined.")
else:
self.procedures.append(t[1])
# if this is the first procedure -> it will be run when the 'run' button is pressed.
# make sure we stop the program when this procedure ends
main_procedure = True if self.procedures[0] == t[1] else False
if len(t) == 4:
t[0] = t[2]
elif len(t) == 5:
t[0] = t[3]
self.showLog("Prameters = " + t[2])
# ----------------------------------------------------
# Clean up
# ----------------------------------------------------
# Remove any preceding commas
#if t[0][0] == ',':
# t[0] = t[0][1:]
t[0] = t[0].lstrip(',')
# Remove any empty commands ", ,"
t[0] = t[0].replace(", ,",",")
# ----------------------------------------------------
# Add procedure parameter count
# ----------------------------------------------------
# The procedure body starts with a number indicating how many parameters it has.
# We look up this value from the procedures_dict.
if not main_procedure:
t[0] = str(len(self.procedures_dict[t[1]])) + ", " + t[0]
# ----------------------------------------------------
# Add code end
# ----------------------------------------------------
if main_procedure == True:
t[0] += ", CODE_END, "
else:
t[0] += ", STOP, "
self.procedures_address_dict[t[1]]= self.procedure_len_counter
self.procedure_len_counter += len(t[0][:-2].split(','))
self.showLog("Procedure Content = " + t[0])
self.logoByteCodeString += t[0]
self.current_procedure_index += 1 # point to the next procedure. Used in processing variables
def p_statements(self, t):
''' statements : statement
| statement statements
'''
# self.showLog("----------")
# for c in t:
# print c
if len(t) == 3:
# self.showLog("--> statement statements")
t[0] = t[1].rstrip(',') + ", " + t[2].lstrip(',')
elif len(t) == 2:
# self.showLog("-->statement")
t[0] = t[1]
else:
# self.showLog("--> not matched")
pass
# def p_statement_expr(self, t):
# 'statement : expression'
# t[0] = t[1]
def p_expression_statement(self, t):
'expression : procedure_call'
t[0] = t[1]
def p_parameters(self, t):
''' parameters : PARAMETER
| PARAMETER parameters
'''
if len(t) == 2:
t[0] = t[1]
elif len(t) == 3:
t[0] = t[1].rstrip(',') + ", " + t[2].lstrip(',')
# def p_statement_assign(self, t):
# 'statement : NAME EQUALS expression'
# names[t[1]] = t[3]
# list of names
global_variables = []
def p_statement_assign(self, t):
'statement : SET NAME expression'
if not t[2] in self.global_variables:
self.global_variables.append(t[2])
t[0] = "NUM8, " + str( self.global_variables.index(t[2])) + ", " + t[3] + ", SETGLOBAL"
def p_statement_talkto_node(self, t):
''' statement : TALK_TO_NODE expression_node_name
| TALK_TO_NODE expression
'''
t[0] = t[2] + "," + t[1]
# def p_statement_talkto_node(self, t):
# ' statement : TALK_TO_NODE NODE_ID'
#
# t[0] = 'NUM16, ' + t[2] + ', ' + t[1]
def p_statement_procedure_call(self,t):
'statement : procedure_call'
t[0] = t[1]
# def p_statement_error(self,t):
# 'p_eror'
# print "I don't know how to " + t.value
# self.stopParser()
def p_procedure_call(self,t):
'''
procedure_call : PROCECURE_CALL_0_PARAM
| PROCECURE_CALL_1_PARAM expression
| PROCECURE_CALL_2_PARAM expression expression
| PROCECURE_CALL_3_PARAM expression expression expression
| PROCECURE_CALL_4_PARAM expression expression expression expression
| PROCECURE_CALL_5_PARAM expression expression expression expression expression
| PROCECURE_CALL_6_PARAM expression expression expression expression expression expression
'''
# this is justa place holder for a procedure call.
# it will be replaced with CALL, Address-highbyte, Address-lowbyte
# in post-processing
call_phrase = ", CALL, "+t[1]+", "+t[1]
if len(t) == 2:
t[0] = "CALL, "+t[1]+", "+t[1]
if len(t) == 3:
t[0] = t[2] + call_phrase
if len(t) == 4:
t[0] = t[3] + ', ' + t[2] + call_phrase
if len(t) == 5:
t[0] = t[4] + ', ' + t[3] + ', ' + t[2] + call_phrase
if len(t) == 6:
t[0] = t[5] + ', ' + t[4] + ', ' + t[3] + ', ' + t[2] + call_phrase
if len(t) == 7:
t[0] = t[6] + ', ' + t[5] + ', ' + t[4] + ', ' + t[3] + ', ' + t[2] + call_phrase
if len(t) == 8:
t[0] = t[7] + ', ' + t[6] + ', ' + t[5] + ', ' + t[4] + ', ' + t[3] + ', ' + t[2] + call_phrase
def p_statement_unary(self, t):
''' statement : STOP
| BEEP
| RESETT
| RESETDP
| M_ON
| M_OFF
| M_THISWAY
| M_CW
| M_THATWAY
| M_CCW
| M_RD
| STOPALL
| ULED_ON
| ULED_OFF
| RTC_INIT
| DISPLAY_CLS
| TALK_TO_7SEG_1
| TALK_TO_7SEG_2
| TALK_TO_LCD_1
| TALK_TO_LCD_2
| CLEAR_TICK
'''
if t[1].lower() == 'cw':
t[0] = 'thisway'
elif t[1].lower() == 'ccw':
t[0] = 'thatway'
else:
t[0] = t[1]
def p_statement_one_parameter(self, t):
'''statement : OUTPUT expression
| ONFOR expression
| WAIT expression
| RECORD expression
| SETDP expression
| ERASE expression
| SETPOWER expression
| SERVO_SET_H expression
| SERVO_LT expression
| SERVO_RT expression
| TALK_TO_MOTOR expression
| DISPLAY_SET_POS expression
| DISPLAY_SHOW expression
| SET_TICK_RATE expression
'''
if t[1] == 'show':
t[0] = t[2] + ', NUM8, 2, ' + t[1] # 2 tells the display module to show a number (not text)
else:
t[0] = t[2] + ', ' + t[1]
def p_statement_record_to_raspberrypi(self,t):
'''statement : RECORD expression string_expression
'''
t[0] = t[2] + ', ' + t[3] + ', ' + 'RECORD_TO_RPI'
def p_statement_send_int_message_to_raspberrypi(self,t):
'''statement : SEND_MESSAGE string_expression expression
'''
t[0] = t[3] + ', NUM8, 1,' + t[2] + ', ' + 'SEND_MESSAGE'
def p_statement_send_string_message_to_raspberrypi(self,t):
'''statement : SEND_MESSAGE string_expression string_expression
'''
t[0] = t[3] + ', NUM8, 2,' + t[2] + ', ' + 'SEND_MESSAGE'
def p_statement_i2c(self,t):
''' statement : I2C_WRITE expression expression expression'''
t[0] = t[4] + ', ' + t[3] + ', ' + t[2] + ', I2C_WRITE_REGISTER'
def p_statement_show_string(self, t):
''' statement : DISPLAY_SHOW string_expression
'''
string_length = len(t[2].split(',')) - 3 # string format is 'STRING','len','char1','char2',...,'char len'
# so we need to minus 2 for the headers and another 1 for the
# trailing comma
if string_length <= 4:
t[0] = t[2] + ', NUM8, 3, ' + t[1] # 3 tells the display module to show a short 4 character text
else:
# display long text (for the 16x2 module only)
t[0] = t[2] + ', NUM8, 5, ' + t[1] # 5 tells the display module to show a long text
def p_statement_rpi_send_mail(self,t):
''' statement : SEND_MAIL string_expression string_expression string_expression
'''
# send_mail address, title, body
string_length = len(t[2].split(',')) - 3
string_length += len(t[3].split(',')) - 3
string_length += len(t[4].split(',')) - 3
if string_length > 100:
print ("Warning. Text Length for SEND_MAIL is long. This could cause the program to fail.")
t[0] = t[4] + ',' + t[3] + ',' + t[2] + ',' + t[1]
#t[0] = t[2] + t[1]
def p_statement_rpi_send_sms(self,t):
''' statement : SEND_SMS string_expression string_expression
'''
# t[0] = mail subject + mail-to address + send_mail
string_length = len(t[3])
if string_length > 50:
print ("Warning. Text Length for SEND_SMS is long. This could cause the program to fail.")
t[0] = t[3] + ',' + t[2] + ',' + t[1]
def p_statement_rpi_one_string_arg(self,t):
''' statement : PLAY_SOUND string_expression
| SHOW_IMAGE string_expression
| NEW_RECORD_FILE string_expression
| SAY string_expression
'''
t[0] = t[2] + ',' + t[1]
def p_statement_rpi_show_log_plot(self,t):
''' statement : SHOW_LOG_PLOT expression string_expression
| SHOW_LOG_PLOT string_expression
'''
# plot N latest values
if len(t) == 4:
t[0] = t[2] + ',' + t[3] + ',' + t[1]
# plot all values ( 0 = All)
else:
t[0] = 'NUM8, 0,' + t[2] + ',' + t[1]
def p_statement_voice_player(self,t):
''' statement : PLAY
| NEXT_TRACK
| PREV_TRACK
| GOTO_TRACK expression
| ERASE_TRACKS
'''
if t[1] == 'play':
t[0] = 'NUM8, 6, NUM8, 1, NUM8, 184, I2C_WRITE_REGISTER'
elif t[1] == 'nexttrack':
t[0] = 'NUM8, 9, NUM8, 1, NUM8, 184, I2C_WRITE_REGISTER'
elif t[1] == 'prevtrack':
t[0] = 'NUM8, 18, NUM8, 1, NUM8, 184, I2C_WRITE_REGISTER'
elif t[1] == 'gototrack':
t[0] = t[2] + ', NUM8, 48, OP_PLUS, NUM8, 3, NUM8, 184, I2C_WRITE_REGISTER'
elif t[1] == 'erasetracks':
t[0] = 'NUM8, 12, NUM8, 1, NUM8, 184, I2C_WRITE_REGISTER'
# add a delay to allow time for the voice recorder to execute
t[0] = t[0] + ', NUM8, 10, WAIT'
def p_statement_rpi_rfid(self, t):
''' statement : RFID_WRITE expression
'''
t[0] = t[2] + "," + t[1]
def p_statement_rpi_unary(self, t):
''' statement : USE_CAMERA
| CLOSE_CAMERA
| START_FIND_FACE
| STOP_FIND_FACE
| TAKE_SNAP_SHOT
| STOP_SOUND
| USE_RFID
| CLOSE_RFID
| RFID_BEEP
| USE_SMS
| CLEARKEYS
'''
t[0] = t[1]
def p_statement_forever(self, t):
'statement : FOREVER list'
t[0] = t[2] + ", FOREVER"
def p_statement_repeat(self, t):
'statement : REPEAT expression list'
t[0] = t[2] + t[3] + ", REPEAT"
def p_statement_waituntil(self, t):
'statement : WAITUNTIL expression_list'
t[0] = t[2] + ", WAITUNTIL"
def p_statement_if(self, t):
'statement : IF expression list'
t[0] = t[2] + t[3] + ", IF"
def p_statement_if_state_change(self, t):
'statement : IF_STATE_CHANGE expression list'
# this if only executes the list when the condition state changes from false to true
# the firmware tracks the current state using the unique id assigned by self.if_state_change_counter
t[0] = 'NUM8, '+ str(self.if_state_change_counter) + ", " + t[2] + t[3] + ", IF_STATE_CHANGE"
self.if_state_change_counter += 1
def p_statement_ifelse(self, t):
''' statement : IFELSE expression list list
'''
if len(t) == 7:
t[0] = t[2] + t[3] + t[6] + ", IFELSE"
elif len(t) == 5:
t[0] = t[2] + t[3] + t[4] + ", IFELSE"
def p_statement_talkto(self, t):
' statement : TALKTO '
t[0] = self.create_talkto_bytecode(t[1])
def p_expression_ison_isoff(self, t):
''' expression : IS_ON
| IS_OFF
'''
t[0] = self.create_motor_state_reporter_bytecode(t[1])
def p_expression_isthisway(self,t):
' expression : IS_THISWAY'
t[0] = self.create_motor_state_reporter_bytecode(t[1])
def p_expression_isthatway(self,t):
' expression : IS_THATWAY'
t[0] = self.create_motor_state_reporter_bytecode(t[1])
def p_expression_getpower(self,t):
' expression : GET_POWER'
t[0] = self.create_motor_state_reporter_bytecode(t[1])
if t[0] == "ERROR":
print ("Error in Get Power: cannot read power from more than one port")
self.stopParser()
def p_list(self, t):
' list : list_open statements list_close '
# stip(',') removes any empty tokens
list_len = len(t[2].strip(',').split(','))+1
#print "statement list = " + t[2]
#print "len = " + str(list_len)
# LONG_LIST is used for lists tha are longer than 256 bytes
# Note that the original Cricket Logo does not have LONG_LISTs
if list_len < 256:
t[0] = ", LIST, " + str(list_len) + ", " + t[2] + ", EOL"
else:
t[0] = ", LONG_LIST, " + str(list_len >> 8) + ", " + str(list_len & 0xff) + ", " + t[2] + ", EOL"
def p_expression_list(self,t):
' expression_list : list_open expression list_close'
# stip(',') removes any empty tokens
list_len = len(t[2].strip(',').split(','))+1
#print "expression list = " + t[2]
#print "len = " + str(list_len)
# LONG_LIST is used for lists tha are longer than 256 bytes
# Note that the original Cricket Logo does not have LONG_LISTs
if list_len < 256:
t[0] = ", LIST, " + str(list_len) + ", " + t[2] + ", EOLR"
else:
t[0] = ", LONG_LIST, " + str(list_len >> 8) + ", " + str(list_len & 0xff) + ", " + t[2] + ", EOLR"
def p_list_open(self, t):
''' list_open : LBRACKET
'''
t[0] = "["
def p_list_close(self, t):
''' list_close : RBRACKET
'''
# we don't declare
# 'delimiters RBRACKET' because statements have already been declared to proceed with delimiters
# 'RBRACKET delimiters' because it is possible for a list to terminate a statement. And statements
# have been declared to accept proceeding delimiters
t[0] = "]"
# # this procedure cannot be moved in front of p_statemets(t)
# # it will cause an error.
# def p_delimiters(self, t):
# ''' delimiters : DELIMITER
# | DELIMITER delimiters
# '''
# t[0] = "DELIMITER"
# =======================================
# Expressions
# =======================================
def p_expression_binop(self, t):
'''expression : expression OP_PLUS expression
| expression OP_MINUS expression
| expression OP_MULTIPLY expression
| expression OP_DIVISION expression
| expression OP_MODULO expression
| expression OP_LESS expression
| expression OP_LESS_OR_EQUAL expression
| expression OP_GREATER expression
| expression OP_GREATER_OR_EQUAL expression
| expression OP_EQUAL expression
| expression OP_AND expression
| expression OP_OR expression
| expression OP_XOR expression
'''
t[0] = t[1] + ', ' + t[3] + ', ' + t[2]
# def p_expression_uminus(self, t):
# 'expression : MINUS expression %prec UMINUS'
# # t[0] = -t[2]
# t[0] = '-' + t[2]
def p_key_compare_expression(self, t):
'''expression : GET_KEY_EXPRESSION OP_EQUAL string_expression
'''
t[0] = t[1] + ', ' + t[3] + ',' + 'OP_KEY_COMPARE'
def p_expression_node_name(self, t):
'expression_node_name : NODE_ID'
t[0] = 'NUM16, ' + str(t[1] >> 8) + ', ' + str(t[1] & 0xff)
def p_expression_group(self, t):
'expression : LPAREN expression RPAREN'
t[0] = t[2]
def p_expression_number(self, t):
'expression : NUMBER'
t[0] = t[1]
def p_expression_name(self, t):
'expression : NAME'
#print "=-=-=-"
# print self.procedures_dict.keys()
# print self.current_procedure_index
#current_procedure_name = self.procedures_dict.keys()[self.current_procedure_index]
current_procedure_name = self.ordered_procedures_list[self.current_procedure_index]
# Check if NAME is a local variable
if t[1] in self.procedures_dict[current_procedure_name]:
t[0] = 'INPUT, ' + str(self.procedures_dict[current_procedure_name].index(t[1]))
self.showLog("Found LOCAL variable " + t[1] + " in procedure " + current_procedure_name)
# Check if NAME is a global variable
elif t[1] in self.global_variables:
t[0] = "NUM8, " + str( self.global_variables.index(t[1])) + ", GETGLOBAL"
self.showLog("Found GLOBAL variable " + t[1] + " in procedure " + current_procedure_name)
else:
print ("I don't know the expression '%s' at line %s" % (t[1], t.lineno(1)))
#print ("This procedure's parameters = " + self.procedures_dict[current_procedure_name])
tkMessageBox.showerror ("Compile Error", "I don't know the expression '%s' at line %s" % (t[1], t.lineno(1)))
# should raise an error here
t[0] = "NAME-ERROR"
# raise SyntaxError
self.stopParser()
def p_expression_string(self, t):
'string_expression : STRING'
t[0] = "STRING, " + str(len(t[1])) + ", "
for char in t[1]:
t[0] = t[0] + str(ord(char)) + ', '
t[0] = t[0][:-2] # remove the trailing ', '
def p_expression_rpi_unary(self,t):
''' expression : FACE_FOUND
| CAMERA_IS_ON
| IS_FINDING_FACE
| SCREEN_TAPPED
| RFID_READ
| RFID_TAG_FOUND
| RFID_READER_FOUND
'''
t[0] = t[1]
def p_expression_no_argument(self, t):
'''expression : TIMER
| IR
| RECALL
| SERIAL
| DISPLAY_GET_POS
| NEWIR
| NEWSERIAL
| TICK_COUNT
'''
t[0]=t[1]
def p_expression_sensor_aliases(self, t):
'''expression : SENSOR1
| SENSOR2
| SENSOR3
| SENSOR4
| SENSOR5
| SENSOR6
| SENSOR7
| SENSOR8
'''
t[0]= "NUM8, " + t[1][6:] + ", readsensor"
def p_expression_switch_aliases(self, t):
'''expression : SWITCH1
| SWITCH2
| SWITCH3
| SWITCH4
| SWITCH5
| SWITCH6
| SWITCH7
| SWITCH8
'''
t[0]= "NUM8, " + t[1][6:] + ", readswitch"
def p_expression_key_compare(self, t):
''' GET_KEY_EXPRESSION : GET_KEY_VALUE string_expression
'''
t[0] = t[2] + "," + t[1]
def p_expression_key_int_value(self,t):
''' expression : GET_KEY_INT_VALUE string_expression
'''
t[0] = t[2] + ',' + t[1]
def p_clock_expressions(self,t ):
''' expression : SECONDS
| MINUTES
| HOURS
| DOW
| DAY
| MONTH
| YEAR
'''
if t[1] == 'seconds':
item = 0
elif t[1] == 'minutes':
item = 1
elif t[1] == 'hours':
item = 2
elif t[1] == 'dow':
item = 3
elif t[1] == 'day':
item = 4
elif t[1] == 'month':
item = 5
elif t[1] == 'year':
item = 6
t[0] = 'NUM8, ' + str(item) + ', RTC_GET_ITEM'
def p_expression_one_parameter(self, t):
''' expression : LOWBYTE expression
| HIGHBYTE expression
| OP_NOT expression
| RANDOM expression
| READ_SENSOR expression
| READ_SWITCH expression
'''
t[0] = t[2] + ", " + t[1]
def create_talkto_bytecode(self, inString):
' converts abcd: -> NUM8 0b1111 TALKTO'
inString = inString[:-1] # remove the trailing colon
talk_to_value = 0
for m in inString:
if m == 'a':
talk_to_value |= 1
elif m == 'b':
talk_to_value |= 2
elif m == 'c':
talk_to_value |= 4
elif m == 'd':
talk_to_value |= 8
return "NUM8, " + str(talk_to_value) + ", TALKTO"
def create_motor_state_reporter_bytecode(self, inString):
if inString[-5:] != "power":
inString = inString[:-1].lower() # remove the trailing question mark
talk_to_value = 0
if inString[-2:] == "on":
portString = inString[:-2]
cmd = "ison"
elif inString[-3:] == "off":
portString = inString[:-3]
cmd = "isoff"
elif (inString[-7:] == "thisway"):
portString = inString[:-7]
cmd = "isthisway"
elif (inString[-7:] == "thatway"):
portString = inString[:-7]
cmd = "isthatway"
elif (inString[-5:] == "power"):
portString = inString[:-5]
cmd = "getpower"
if len(portString) > 1: # can't request power level from more than one port
return "ERROR"
for m in portString:
if m == 'a':
talk_to_value |= 1
elif m == 'b':
talk_to_value |= 2
elif m == 'c':
talk_to_value |= 4
elif m == 'd':
talk_to_value |= 8
return "NUM8, " + str(talk_to_value) + ", " + cmd.upper()
def p_expression_i2cread(self,t):
''' expression : I2C_READ expression expression
'''
t[0] = t[3] + ', ' + t[2] + ', I2C_READ_REGISTER'
def showLog(self, text):
pass
#print text
def p_error(self, t):
print( "Line %s: Syntax error at '%s'" % (t.lineno, t.value))
self.stopParser()
tkMessageBox.showerror ("Compile Error", "At line %s, I don't understand '%s' or what came before it." % (t.lineno, t.value))
def stopParser(self):
try:
# Stop parsing by eating up all the remaining tokens.
while yacc.token() != None:
pass
except:
pass
# =================================================================================
# The following example is a modification of the official example that
# demonstrates how to use the Logo Compiler.
# - entry: The logo code is received by argument 1 from the command line and
# is no longer contained in the "logo.txt" text file as in the original example.
# - Output: Print the binary code compiled on many levels.
# symbolic and current binaries.
#
# Requires the PLY package (http://www.dabeaz.com/ply)
# =================================================================================
if __name__ == '__main__':
# ======================================
# Read the input program
# ======================================
logoString = sys.argv[1]
compiler = tinkerLogo()
compiler.compile(logoString)
# retrieve the byte code for use elsewhere in your code
outputByteCode = compiler.byteCode()
| gpl-3.0 | -4,086,673,645,929,676,000 | 30.628308 | 133 | 0.446269 | false |
MSeal/py_cache_manager | cacheman/registers.py | 2 | 8195 | import pickle
from six.moves import cPickle
from six import iteritems
import shutil
import os
import sys
import psutil
import csv
import traceback
from .utils import random_name
if sys.version_info[0] == 2:
text_read_mode = 'rU'
text_write_mode = 'wb'
else:
text_read_mode = 'r'
text_write_mode = 'w'
def dict_loader(*arg, **kwargs):
return {}
def disabled_saver(*arg, **kwargs):
pass
disabled_deleter = disabled_saver
def generate_path(cache_dir, cache_name, extension):
return os.path.join(cache_dir, '.'.join([cache_name, extension]))
def generate_pickle_path(cache_dir, cache_name):
return generate_path(cache_dir, cache_name, 'pkl')
def generate_csv_path(cache_dir, cache_name):
return generate_path(cache_dir, cache_name, 'csv')
def ensure_directory(dirname):
if not os.path.exists(dirname):
try:
os.makedirs(dirname)
except OSError:
if not os.path.isdir(dirname):
raise IOError('Unable to build cache directory: {}'.format(dirname))
def _exclude_zombie_procs(procs):
alive_procs = []
for p in procs:
try:
if p.status() != psutil.STATUS_ZOMBIE:
alive_procs.append(p)
except:
pass
return alive_procs
def _tmp_pid_extensions(pid=None):
extensions = ['tmp', random_name()]
if pid:
extensions.append(str(pid))
return extensions
def fork_content_save(cache_name, contents, presaver, saver, cleaner, timeout, seen_pids):
children = _exclude_zombie_procs([proc for proc in psutil.Process().children(recursive=False)
if proc.pid in seen_pids[cache_name]])
cache_pids = set(child.pid for child in children)
terminated_pids = seen_pids[cache_name] - cache_pids
for pid in terminated_pids:
# Slay the undead... they mingle with the living...
try: os.waitpid(pid, 0)
except OSError: pass
if cleaner:
cleaner(cache_name, _tmp_pid_extensions(pid))
seen_pids[cache_name] = cache_pids
exts = _tmp_pid_extensions()
try:
fork_pid = os.fork()
except OSError as e:
print(("Warning, saving {} synchronously: {} ".format(cache_name, repr(e)) +
"-- you're out of memory or you might be out of shared memory (check kernel.shmmax)"))
if presaver:
presaver(cache_name, contents, exts)
saver(cache_name, contents, exts)
return
except AttributeError:
# Windows has no fork... TODO make windows async saver
if presaver:
presaver(cache_name, contents, exts)
saver(cache_name, contents, exts)
return
if fork_pid != 0:
cache_pids.add(fork_pid)
else:
try:
pid = os.getpid()
pid_exts = _tmp_pid_extensions(pid)
except Exception as e:
print("Warning: ignored error in '{}' cache saver - {}".format(cache_name, repr(e)))
try:
if presaver:
presaver(cache_name, contents, pid_exts)
# Refilter our zombies
children = _exclude_zombie_procs(children)
if children:
gone, alive_and_undead = psutil.wait_procs(children, timeout=timeout)
# Avoid killing processes that have since died
alive = _exclude_zombie_procs(alive_and_undead)
for p in alive:
print("Warning killing previous save for '{}' cache on pid {}".format(cache_name, p.pid))
p.kill()
saver(cache_name, contents, pid_exts)
except Exception as e:
if cleaner:
try: cleaner(cache_name, contents, pid_exts)
except: pass
print("Warning: ignored error in '{}' cache saver - {}".format(cache_name, repr(e)))
finally:
# Exit aggresively -- we don't want cleanup to occur
os._exit(0)
def pickle_saver(cache_dir, cache_name, contents):
tmp_exts = ['tmp', random_name()]
try:
try:
pickle_pre_saver(cache_dir, cache_name, contents, tmp_exts)
pickle_mover(cache_dir, cache_name, contents, tmp_exts)
except (IOError, EOFError):
traceback.print_exc()
raise IOError('Unable to save {} cache'.format(cache_name))
except:
try: pickle_cleaner(cache_dir, cache_name, tmp_exts)
except: pass
raise
def pickle_pre_saver(cache_dir, cache_name, contents, extensions):
ensure_directory(cache_dir)
cache_path = generate_pickle_path(cache_dir, cache_name)
with open('.'.join([cache_path] + extensions), 'wb') as pkl_file:
try:
cPickle.dump(contents, pkl_file)
except:
# We do this because older cPickle was incorrectly raising exceptions
pickle.dump(contents, pkl_file)
def pickle_mover(cache_dir, cache_name, contents, extensions):
cache_path = generate_pickle_path(cache_dir, cache_name)
shutil.move('.'.join([cache_path] + extensions), cache_path)
def pickle_cleaner(cache_dir, cache_name, extensions):
cache_path = generate_pickle_path(cache_dir, cache_name)
try: os.remove('.'.join([cache_path] + extensions))
except OSError: pass
def pickle_deleter(cache_dir, cache_name):
try:
os.remove(generate_pickle_path(cache_dir, cache_name))
except OSError:
pass
def pickle_loader(cache_dir, cache_name):
'''
Default loader for any cache, this function loads from a pickle file based on cache name.
'''
contents = None
try:
with open(generate_pickle_path(cache_dir, cache_name), 'rb') as pkl_file:
try:
contents = cPickle.load(pkl_file)
except:
exc_info = sys.exc_info()
try: contents = pickle.load(pkl_file)
except (IndexError, AttributeError): pass
if contents is None:
raise exc_info[1].with_traceback(exc_info[2])
except (IOError, EOFError):
return None
return contents
def csv_saver(cache_dir, cache_name, contents, row_builder=None):
tmp_exts = ['tmp', random_name()]
try:
try:
csv_pre_saver(cache_dir, cache_name, contents, tmp_exts, row_builder)
csv_mover(cache_dir, cache_name, contents, tmp_exts)
except (IOError, EOFError):
traceback.print_exc()
raise IOError('Unable to save {} cache'.format(cache_name))
except:
try: csv_cleaner(cache_dir, cache_name, tmp_exts)
except: pass
raise
def csv_pre_saver(cache_dir, cache_name, contents, extensions, row_builder=None):
ensure_directory(cache_dir)
cache_path = generate_csv_path(cache_dir, cache_name)
with open('.'.join([cache_path] + extensions), text_write_mode) as csv_file:
writer = csv.writer(csv_file, dialect='excel', quoting=csv.QUOTE_MINIMAL)
for key, value in iteritems(contents):
writer.writerow(row_builder(key, value) if row_builder else [key, value])
def csv_mover(cache_dir, cache_name, contents, extensions):
cache_path = generate_csv_path(cache_dir, cache_name)
shutil.move('.'.join([cache_path] + extensions), cache_path)
def csv_cleaner(cache_dir, cache_name, extensions):
cache_path = generate_csv_path(cache_dir, cache_name)
try: os.remove('.'.join([cache_path] + extensions))
except OSError: pass
def csv_loader(cache_dir, cache_name, row_reader=None):
contents = {}
try:
with open(generate_csv_path(cache_dir, cache_name), text_read_mode) as csv_file:
reader = csv.reader(csv_file, dialect='excel', quoting=csv.QUOTE_MINIMAL)
for row in reader:
if row:
key, val = row_reader(row) if row_reader else (row[0], row[1])
contents[key] = val
except (IOError, EOFError):
return None
return contents
| bsd-2-clause | -3,871,558,480,038,379,500 | 35.081448 | 109 | 0.594997 | false |
VojtechBartos/smsgw | smsgw/resources/contacts/schemas/put.py | 1 | 1911 | # -*- coding: utf-8 -*-
# http://google-styleguide.googlecode.com/svn/trunk/pyguide.html
from smsgw.resources import patterns
schema = {
"description": "Schema for the contacts PUT endpoint",
"type": "object",
"method": "PUT",
"required": [ "firstName", "lastName", "phoneNumber" ],
"additionalProperties": False,
"properties": {
"firstName": {
"type": "string",
"minLength": 2,
"maxLength": 16,
"messages": {
"minLength": "Max length of first name is 2 characters.",
"maxLength": "Max length of first name is 16 characters."
}
},
"lastName": {
"type": "string",
"minLength": 2,
"maxLength": 16,
"messages": {
"minLength": "Max length of last name is 2 characters.",
"maxLength": "Max length of last name is 16 characters."
}
},
"email": {
"type": ["string", "null"],
"pattern": "^(%s)?$" % patterns.EMAIL,
"messages": {
"pattern": "E-mail is in wrong format."
}
},
"phoneNumber": {
"type": "string",
"pattern": patterns.PHONE_NUMBER,
"messages": {
"type": "Phone number needs to be string.",
"pattern": "Phone number has invalid format. (+420736202512 as an example.)"
}
},
"note": {
"type": ["string", "null"],
"maxLength": 255,
"messages": {
"maxLength": "Max length of note is 255 characters."
}
},
"tags": {
"type": ["array", "null"],
"items": {
"type": "string",
"minLength": 1,
"maxLength": 23
}
}
}
}
| mit | 2,341,249,699,059,499,000 | 29.822581 | 92 | 0.440607 | false |
berkmancenter/mediacloud | apps/common/src/python/mediawords/util/process.py | 1 | 2628 | import os
import psutil
from mediawords.util.log import create_logger
from mediawords.util.perl import decode_object_from_bytes_if_needed
log = create_logger(__name__)
def _kill_children(pid: int) -> None:
"""Kill all children of PID except for ourselves."""
proc = psutil.Process(pid)
command = proc.cmdline()
# Kill the children first (except for ourselves) and then go for the parent
# Go straight for SIGKILL as "acks_late" is set so unfinished jobs should get restarted properly
# Get the list first because it might change while we're killing processes
children = list(proc.children(recursive=True))
log.debug(f"Process children: {children}")
for child in children:
# Don't kill ourselves just yet
if child.pid != os.getpid():
try:
log.warning(f"Killing child with PID {child.pid} ({str(child.cmdline())})")
child.kill()
except psutil.NoSuchProcess:
log.warning(f"Child {child.pid} is gone already")
if pid != os.getpid():
log.warning(f"Killing parent with PID {proc.pid} ({str(command)})")
proc.kill()
def fatal_error(message: str) -> None:
"""Print error message, exit(1) the process.
Sometimes when an error happens, we can't use die() because it would get caught in try-except.
We don't always want that: for example, if crawler dies because of misconfiguration in mediawords.yml, crawler's
errors would get logged into "downloads" table as if the error happened because of a valid reason.
In those cases, we go straight to exit(1) using this helper subroutine."""
message = decode_object_from_bytes_if_needed(message)
log.error(message)
# If a Celery worker calls fatal_error(), it doesn't manage to kill the parent process because Celery forks new
# processes to run the actual job. So, find the parent process and send it a signal too for it to shut down.
parent_proc = psutil.Process(os.getppid())
parent_command = parent_proc.cmdline()
log.debug(f"Parent command: {parent_command}")
if 'python3' in parent_command[0].lower() or 'perl' in parent_command[0].lower():
_kill_children(parent_proc.pid)
current_proc = psutil.Process(os.getpid())
current_command = current_proc.cmdline()
log.debug(f"Current command: {current_command}")
if 'python3' in current_command[0].lower() or 'perl' in current_command[0].lower():
_kill_children(current_proc.pid)
log.warning(f"Killing ourselves with PID {os.getpid()}")
# noinspection PyProtectedMember
os._exit(1)
| agpl-3.0 | -7,614,883,986,507,458,000 | 35.5 | 116 | 0.679224 | false |
redouane/Dzik | dzik.py | 1 | 2117 | # -*- coding: latin-1 -*-
### App By : Redouane ###
### E-MAIL : [email protected] ###
### BLOG : redouanezait.com ###
### LICENSE : GNU GPL v3 ###
from main import gui
from PyQt4.QtGui import QApplication, QMessageBox
from PyQt4.QtCore import QThread, SIGNAL
import urllib
import re
import sys
if hasattr(sys,"setdefaultencoding"):
sys.setdefaultencoding("latin-1")
currentversion = '1.3'
def main():
class App(QApplication):
def __init__(self, argv):
super(App, self).__init__(argv)
css = open('style.qss', 'r').read()
self.main = gui.Mainwindow()
self.urlbar = gui.urlgroup(self.main)
self.setStyleSheet(css)
self.thread = checkforUpdate()
self.connect(self.thread, SIGNAL('updateinfos(PyQt_PyObject)'), self.update)
self.thread.start()
def update(self, updateinfos):
self.updatebox = QMessageBox()
self.updatebox.about(self.main, 'Mise �� jour', 'Une nouvelle Mise �� jour est disponible : <br \> <b> Version : </b>' + updateinfos['version'] + '<br /> <b> Lien : </b>' + updateinfos['link'] + '<br /> <b>Nouveaut�s : </b>' + updateinfos['whatsnew'])
class checkforUpdate(QThread):
def run(self):
xml = 'http://dl.dropbox.com/u/6858914/DZik/DZik.xml'
try:
xml = urllib.urlopen(xml).read()
latestversion = re.findall('<version>(.+)</version>', xml)[0]
if float(latestversion) <= float(currentversion):
return
else:
link = re.findall('<link>(.+)</link>', xml)[0]
whatsnew = re.findall('<whatsnew>(.+)</whatsnew>', xml)[0]
updateinfos = {'version':latestversion, 'link':link , 'whatsnew':whatsnew}
self.emit(SIGNAL('updateinfos(PyQt_PyObject)'), updateinfos)
except IOError:
return
dzik = App(sys.argv)
dzik.exec_()
if __name__ == '__main__':
main() | gpl-3.0 | -750,749,802,087,596,700 | 35.344828 | 264 | 0.538206 | false |
pypa/virtualenv | docs/conf.py | 2 | 2481 | import subprocess
import sys
from datetime import date, datetime
from pathlib import Path
import sphinx_rtd_theme
from virtualenv.version import __version__
company = "PyPA"
name = "virtualenv"
version = ".".join(__version__.split(".")[:2])
release = __version__
copyright = f"2007-{date.today().year}, {company}, PyPA"
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosectionlabel",
"sphinx.ext.extlinks",
]
templates_path = []
unused_docs = []
source_suffix = ".rst"
exclude_patterns = ["_build", "changelog/*", "_draft.rst"]
main_doc = "index"
pygments_style = "default"
always_document_param_types = True
project = name
today_fmt = "%B %d, %Y"
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_theme_options = {
"canonical_url": "https://virtualenv.pypa.io/",
"logo_only": False,
"display_version": True,
"prev_next_buttons_location": "bottom",
"collapse_navigation": False,
"sticky_navigation": True,
"navigation_depth": 6,
"includehidden": True,
}
html_static_path = ["_static"]
html_last_updated_fmt = datetime.now().isoformat()
htmlhelp_basename = "Pastedoc"
autoclass_content = "both" # Include __init__ in class documentation
autodoc_member_order = "bysource"
autosectionlabel_prefix_document = True
extlinks = {
"issue": ("https://github.com/pypa/virtualenv/issues/%s", "#"),
"pull": ("https://github.com/pypa/virtualenv/pull/%s", "PR #"),
"user": ("https://github.com/%s", "@"),
"pypi": ("https://pypi.org/project/%s", ""),
}
def generate_draft_news():
root = Path(__file__).parents[1]
new = subprocess.check_output(
[sys.executable, "-m", "towncrier", "--draft", "--version", "NEXT"],
cwd=root,
universal_newlines=True,
)
(root / "docs" / "_draft.rst").write_text("" if "No significant changes" in new else new)
generate_draft_news()
def setup(app):
# the CLI arguments are dynamically generated
doc_tree = Path(app.doctreedir)
cli_interface_doctree = doc_tree / "cli_interface.doctree"
if cli_interface_doctree.exists():
cli_interface_doctree.unlink()
HERE = Path(__file__).parent
if str(HERE) not in sys.path:
sys.path.append(str(HERE))
# noinspection PyUnresolvedReferences
from render_cli import CliTable, literal_data
app.add_css_file("custom.css")
app.add_directive(CliTable.name, CliTable)
app.add_role("literal_data", literal_data)
| mit | 3,847,665,274,110,247,000 | 26.876404 | 93 | 0.655784 | false |
fgmacedo/django-awards | docs/conf.py | 1 | 8151 | # -*- coding: utf-8 -*-
#
# complexity documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
cwd = os.getcwd()
parent = os.path.dirname(cwd)
sys.path.append(parent)
import awards
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Django Awards'
copyright = u'2016, Fernando Macedo'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = awards.__version__
# The full version, including alpha/beta/rc tags.
release = awards.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-awardsdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-awards.tex', u'Django Awards Documentation',
u'Fernando Macedo', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-awards', u'Django Awards Documentation',
[u'Fernando Macedo'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-awards', u'Django Awards Documentation',
u'Fernando Macedo', 'django-awards', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit | 2,122,734,400,599,213,000 | 31.090551 | 80 | 0.706784 | false |
yannrouillard/weboob | weboob/applications/radioob/radioob.py | 1 | 10151 | # -*- coding: utf-8 -*-
# Copyright(C) 2010-2012 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
import subprocess
import sys
import os
import re
from weboob.capabilities.radio import ICapRadio, Radio
from weboob.capabilities.audio import ICapAudio, BaseAudio
from weboob.capabilities.base import empty
from weboob.tools.application.repl import ReplApplication, defaultcount
from weboob.tools.application.media_player import InvalidMediaPlayer, MediaPlayer, MediaPlayerNotFound
from weboob.tools.application.formatters.iformatter import PrettyFormatter
__all__ = ['Radioob']
class RadioListFormatter(PrettyFormatter):
MANDATORY_FIELDS = ('id', 'title', 'description')
def get_title(self, obj):
return obj.title
def get_description(self, obj):
result = '%-30s' % obj.description
if hasattr(obj, 'current') and not empty(obj.current):
if obj.current.who:
result += ' (Current: %s - %s)' % (obj.current.who, obj.current.what)
else:
result += ' (Current: %s)' % obj.current.what
return result
class Radioob(ReplApplication):
APPNAME = 'radioob'
VERSION = '0.i'
COPYRIGHT = 'Copyright(C) 2010-2013 Romain Bignon\nCopyright(C) 2013 Pierre Maziere'
DESCRIPTION = "Console application allowing to search for web radio stations, listen to them and get information " \
"like the current song."
SHORT_DESCRIPTION = "search, show or listen to radio stations"
CAPS = (ICapRadio, ICapAudio)
EXTRA_FORMATTERS = {'radio_list': RadioListFormatter}
COMMANDS_FORMATTERS = {'ls': 'radio_list',
'search': 'radio_list',
'playlist': 'radio_list',
}
COLLECTION_OBJECTS = (Radio, BaseAudio, )
PLAYLIST = []
def __init__(self, *args, **kwargs):
ReplApplication.__init__(self, *args, **kwargs)
self.player = MediaPlayer(self.logger)
def main(self, argv):
self.load_config()
return ReplApplication.main(self, argv)
def complete_download(self, text, line, *ignored):
args = line.split(' ')
if len(args) == 2:
return self._complete_object()
elif len(args) >= 3:
return self.path_completer(args[2])
def do_download(self, line):
"""
download ID [FILENAME]
Download an audio file
"""
_id, dest = self.parse_command_args(line, 2, 1)
audio = self.get_object(_id, 'get_audio', ['url'])
if not audio:
print >>sys.stderr, 'Audio file not found: %s' % _id
return 3
if not audio.url:
print >>sys.stderr, 'Error: the direct URL is not available.'
return 4
def check_exec(executable):
with open('/dev/null', 'w') as devnull:
process = subprocess.Popen(['which', executable], stdout=devnull)
if process.wait() != 0:
print >>sys.stderr, 'Please install "%s"' % executable
return False
return True
def audio_to_file(_audio):
ext = _audio.ext
if not ext:
ext = 'audiofile'
return '%s.%s' % (re.sub('[?:/]', '-', _audio.id), ext)
if dest is not None and os.path.isdir(dest):
dest += '/%s' % audio_to_file(audio)
if dest is None:
dest = audio_to_file(audio)
if audio.url.startswith('rtmp'):
if not check_exec('rtmpdump'):
return 1
args = ('rtmpdump', '-e', '-r', audio.url, '-o', dest)
elif audio.url.startswith('mms'):
if not check_exec('mimms'):
return 1
args = ('mimms', '-r', audio.url, dest)
else:
if check_exec('wget'):
args = ('wget', '-c', audio.url, '-O', dest)
elif check_exec('curl'):
args = ('curl', '-C', '-', audio.url, '-o', dest)
else:
return 1
os.spawnlp(os.P_WAIT, args[0], *args)
def complete_play(self, text, line, *ignored):
args = line.split(' ')
if len(args) == 2:
return self._complete_object()
def do_play(self, line):
"""
play ID [stream_id]
Play a radio or a audio file with a found player (optionnaly specify the wanted stream).
"""
_id, stream_id = self.parse_command_args(line, 2, 1)
if not _id:
print >>sys.stderr, 'This command takes an argument: %s' % self.get_command_help('play', short=True)
return 2
try:
stream_id = int(stream_id)
except (ValueError,TypeError):
stream_id = 0
radio = self.get_object(_id, 'get_radio')
audio = self.get_object(_id, 'get_audio')
if radio is None and audio is None:
print >>sys.stderr, 'Radio or Audio file not found:', _id
return 3
if audio is None:
try:
stream = radio.streams[stream_id]
except IndexError:
print >>sys.stderr, 'Stream #%d not found' % stream_id
return 1
else:
stream = audio
try:
player_name = self.config.get('media_player')
media_player_args = self.config.get('media_player_args')
if not player_name:
self.logger.debug(u'You can set the media_player key to the player you prefer in the radioob '
'configuration file.')
self.player.play(stream, player_name=player_name, player_args=media_player_args)
except (InvalidMediaPlayer, MediaPlayerNotFound) as e:
print '%s\nRadio URL: %s' % (e, stream.url)
def do_playlist(self, line):
"""
playlist cmd [args]
playlist add ID [ID2 ID3 ...]
playlist remove ID [ID2 ID3 ...]
playlist export [FILENAME]
playlist display
"""
if not line:
print >>sys.stderr, 'This command takes an argument: %s' % self.get_command_help('playlist')
return 2
cmd, args = self.parse_command_args(line, 2, req_n=1)
if cmd == "add":
_ids = args.strip().split(' ')
for _id in _ids:
audio = self.get_object(_id, 'get_audio')
if not audio:
print >>sys.stderr, 'Audio file not found: %s' % _id
return 3
if not audio.url:
print >>sys.stderr, 'Error: the direct URL is not available.'
return 4
self.PLAYLIST.append(audio)
elif cmd == "remove":
_ids = args.strip().split(' ')
for _id in _ids:
audio_to_remove = self.get_object(_id, 'get_audio')
if not audio_to_remove:
print >>sys.stderr, 'Audio file not found: %s' % _id
return 3
if not audio_to_remove.url:
print >>sys.stderr, 'Error: the direct URL is not available.'
return 4
for audio in self.PLAYLIST:
if audio.id == audio_to_remove.id:
self.PLAYLIST.remove(audio)
break
elif cmd == "export":
filename = "playlist.m3u"
if args:
filename = args
file = open(filename, 'w')
for audio in self.PLAYLIST:
file.write('%s\r\n' % audio.url)
file.close()
elif cmd == "display":
for audio in self.PLAYLIST:
self.cached_format(audio)
else:
print >>sys.stderr, 'Playlist command only support "add", "remove", "display" and "export" arguments.'
return 2
def complete_info(self, text, line, *ignored):
args = line.split(' ')
if len(args) == 2:
return self._complete_object()
def do_info(self, _id):
"""
info ID
Get information about a radio or an audio file.
"""
if not _id:
print >>sys.stderr, 'This command takes an argument: %s' % self.get_command_help('info', short=True)
return 2
radio = self.get_object(_id, 'get_radio')
audio = self.get_object(_id, 'get_audio')
if radio is None and audio is None:
print >>sys.stderr, 'Radio or Audio file not found:', _id
return 3
if audio is None:
self.format(radio)
else:
self.format(audio)
@defaultcount(10)
def do_search(self, pattern=None):
"""
search PATTERN
List radios matching a PATTERN.
If PATTERN is not given, this command will list all the radios.
"""
self.set_formatter_header(u'Search pattern: %s' % pattern if pattern else u'All radios')
self.change_path([u'search'])
for backend, radio in self.do('iter_radios_search', pattern=pattern):
self.add_object(radio)
self.format(radio)
for backend, audio in self.do('search_audio', pattern=pattern):
self.add_object(audio)
self.format(audio)
def do_ls(self, line):
"""
ls
List radios
"""
ret = super(Radioob, self).do_ls(line)
return ret
| agpl-3.0 | -5,309,808,482,953,308,000 | 32.50165 | 120 | 0.547237 | false |
papaeye/pep8 | docs/conf.py | 9 | 7991 | # -*- coding: utf-8 -*-
#
# pep8 documentation build configuration file, created by
# sphinx-quickstart on Tue Aug 21 09:47:49 2012.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest',
'sphinx.ext.coverage', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pep8'
copyright = u'2012-2013, Florent Xicluna'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
pep8_version = __import__('pep8').__version__.split('.')
# The short X.Y version.
version = '.'.join(pep8_version[:2])
# The full version, including alpha/beta/rc tags.
release = '.'.join(pep8_version)
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for
# all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pep8doc'
# -- Options for LaTeX output -------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto/manual]).
latex_documents = [
('index', 'pep8.tex', u'pep8 documentation',
u'Florent Xicluna', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pep8', u'pep8 documentation',
[u'Florent Xicluna'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pep8', u'pep8 documentation', u'Florent Xicluna',
'pep8', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| mit | 1,003,528,522,612,535,800 | 30.964 | 79 | 0.698035 | false |
nicecapj/crossplatfromMmorpgServer | ThirdParty/protobuf/python/setup.py | 2 | 10221 | #! /usr/bin/env python
#
# See README for usage instructions.
import glob
import os
import subprocess
import sys
# We must use setuptools, not distutils, because we need to use the
# namespace_packages option for the "google" package.
from setuptools import setup, Extension, find_packages
from distutils.command.clean import clean as _clean
if sys.version_info[0] == 3:
# Python 3
from distutils.command.build_py import build_py_2to3 as _build_py
else:
# Python 2
from distutils.command.build_py import build_py as _build_py
from distutils.spawn import find_executable
# Find the Protocol Compiler.
if 'PROTOC' in os.environ and os.path.exists(os.environ['PROTOC']):
protoc = os.environ['PROTOC']
elif os.path.exists("../src/protoc"):
protoc = "../src/protoc"
elif os.path.exists("../src/protoc.exe"):
protoc = "../src/protoc.exe"
elif os.path.exists("../vsprojects/Debug/protoc.exe"):
protoc = "../vsprojects/Debug/protoc.exe"
elif os.path.exists("../vsprojects/Release/protoc.exe"):
protoc = "../vsprojects/Release/protoc.exe"
else:
protoc = find_executable("protoc")
def GetVersion():
"""Gets the version from google/protobuf/__init__.py
Do not import google.protobuf.__init__ directly, because an installed
protobuf library may be loaded instead."""
with open(os.path.join('google', 'protobuf', '__init__.py')) as version_file:
exec(version_file.read(), globals())
return __version__
def generate_proto(source, require = True):
"""Invokes the Protocol Compiler to generate a _pb2.py from the given
.proto file. Does nothing if the output already exists and is newer than
the input."""
if not require and not os.path.exists(source):
return
output = source.replace(".proto", "_pb2.py").replace("../src/", "")
if (not os.path.exists(output) or
(os.path.exists(source) and
os.path.getmtime(source) > os.path.getmtime(output))):
print("Generating %s..." % output)
if not os.path.exists(source):
sys.stderr.write("Can't find required file: %s\n" % source)
sys.exit(-1)
if protoc is None:
sys.stderr.write(
"protoc is not installed nor found in ../src. Please compile it "
"or install the binary package.\n")
sys.exit(-1)
protoc_command = [ protoc, "-I../src", "-I.", "--python_out=.", source ]
if subprocess.call(protoc_command) != 0:
sys.exit(-1)
def GenerateUnittestProtos():
generate_proto("../src/google/protobuf/any_test.proto", False)
generate_proto("../src/google/protobuf/map_unittest.proto", False)
generate_proto("../src/google/protobuf/unittest_arena.proto", False)
generate_proto("../src/google/protobuf/unittest_no_arena.proto", False)
generate_proto("../src/google/protobuf/unittest_no_arena_import.proto", False)
generate_proto("../src/google/protobuf/unittest.proto", False)
generate_proto("../src/google/protobuf/unittest_custom_options.proto", False)
generate_proto("../src/google/protobuf/unittest_import.proto", False)
generate_proto("../src/google/protobuf/unittest_import_public.proto", False)
generate_proto("../src/google/protobuf/unittest_mset.proto", False)
generate_proto("../src/google/protobuf/unittest_mset_wire_format.proto", False)
generate_proto("../src/google/protobuf/unittest_no_generic_services.proto", False)
generate_proto("../src/google/protobuf/unittest_proto3_arena.proto", False)
generate_proto("../src/google/protobuf/util/json_format_proto3.proto", False)
generate_proto("google/protobuf/internal/any_test.proto", False)
generate_proto("google/protobuf/internal/descriptor_pool_test1.proto", False)
generate_proto("google/protobuf/internal/descriptor_pool_test2.proto", False)
generate_proto("google/protobuf/internal/factory_test1.proto", False)
generate_proto("google/protobuf/internal/factory_test2.proto", False)
generate_proto("google/protobuf/internal/file_options_test.proto", False)
generate_proto("google/protobuf/internal/import_test_package/inner.proto", False)
generate_proto("google/protobuf/internal/import_test_package/outer.proto", False)
generate_proto("google/protobuf/internal/missing_enum_values.proto", False)
generate_proto("google/protobuf/internal/message_set_extensions.proto", False)
generate_proto("google/protobuf/internal/more_extensions.proto", False)
generate_proto("google/protobuf/internal/more_extensions_dynamic.proto", False)
generate_proto("google/protobuf/internal/more_messages.proto", False)
generate_proto("google/protobuf/internal/packed_field_test.proto", False)
generate_proto("google/protobuf/internal/test_bad_identifiers.proto", False)
generate_proto("google/protobuf/pyext/python.proto", False)
class clean(_clean):
def run(self):
# Delete generated files in the code tree.
for (dirpath, dirnames, filenames) in os.walk("."):
for filename in filenames:
filepath = os.path.join(dirpath, filename)
if filepath.endswith("_pb2.py") or filepath.endswith(".pyc") or \
filepath.endswith(".so") or filepath.endswith(".o") or \
filepath.endswith('google/protobuf/compiler/__init__.py') or \
filepath.endswith('google/protobuf/util/__init__.py'):
os.remove(filepath)
# _clean is an old-style class, so super() doesn't work.
_clean.run(self)
class build_py(_build_py):
def run(self):
# Generate necessary .proto file if it doesn't exist.
generate_proto("../src/google/protobuf/descriptor.proto")
generate_proto("../src/google/protobuf/compiler/plugin.proto")
generate_proto("../src/google/protobuf/any.proto")
generate_proto("../src/google/protobuf/api.proto")
generate_proto("../src/google/protobuf/duration.proto")
generate_proto("../src/google/protobuf/empty.proto")
generate_proto("../src/google/protobuf/field_mask.proto")
generate_proto("../src/google/protobuf/source_context.proto")
generate_proto("../src/google/protobuf/struct.proto")
generate_proto("../src/google/protobuf/timestamp.proto")
generate_proto("../src/google/protobuf/type.proto")
generate_proto("../src/google/protobuf/wrappers.proto")
GenerateUnittestProtos()
# Make sure google.protobuf/** are valid packages.
for path in ['', 'internal/', 'compiler/', 'pyext/', 'util/']:
try:
open('google/protobuf/%s__init__.py' % path, 'a').close()
except EnvironmentError:
pass
# _build_py is an old-style class, so super() doesn't work.
_build_py.run(self)
class test_conformance(_build_py):
target = 'test_python'
def run(self):
if sys.version_info >= (2, 7):
# Python 2.6 dodges these extra failures.
os.environ["CONFORMANCE_PYTHON_EXTRA_FAILURES"] = (
"--failure_list failure_list_python-post26.txt")
cmd = 'cd ../conformance && make %s' % (test_conformance.target)
status = subprocess.check_call(cmd, shell=True)
def get_option_from_sys_argv(option_str):
if option_str in sys.argv:
sys.argv.remove(option_str)
return True
return False
if __name__ == '__main__':
ext_module_list = []
warnings_as_errors = '--warnings_as_errors'
if get_option_from_sys_argv('--cpp_implementation'):
# Link libprotobuf.a and libprotobuf-lite.a statically with the
# extension. Note that those libraries have to be compiled with
# -fPIC for this to work.
compile_static_ext = get_option_from_sys_argv('--compile_static_extension')
extra_compile_args = ['-Wno-write-strings',
'-Wno-invalid-offsetof',
'-Wno-sign-compare']
libraries = ['protobuf']
extra_objects = None
if compile_static_ext:
libraries = None
extra_objects = ['../src/.libs/libprotobuf.a',
'../src/.libs/libprotobuf-lite.a']
test_conformance.target = 'test_python_cpp'
if "clang" in os.popen('$CC --version 2> /dev/null').read():
extra_compile_args.append('-Wno-shorten-64-to-32')
if warnings_as_errors in sys.argv:
extra_compile_args.append('-Werror')
sys.argv.remove(warnings_as_errors)
# C++ implementation extension
ext_module_list.extend([
Extension(
"google.protobuf.pyext._message",
glob.glob('google/protobuf/pyext/*.cc'),
include_dirs=[".", "../src"],
libraries=libraries,
extra_objects=extra_objects,
library_dirs=['../src/.libs'],
extra_compile_args=extra_compile_args,
),
Extension(
"google.protobuf.internal._api_implementation",
glob.glob('google/protobuf/internal/api_implementation.cc'),
extra_compile_args=['-DPYTHON_PROTO2_CPP_IMPL_V2'],
),
])
os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION'] = 'cpp'
# Keep this list of dependencies in sync with tox.ini.
install_requires = ['six>=1.9', 'setuptools']
if sys.version_info <= (2,7):
install_requires.append('ordereddict')
install_requires.append('unittest2')
setup(
name='protobuf',
version=GetVersion(),
description='Protocol Buffers',
download_url='https://github.com/google/protobuf/releases',
long_description="Protocol Buffers are Google's data interchange format",
url='https://developers.google.com/protocol-buffers/',
maintainer='[email protected]',
maintainer_email='[email protected]',
license='New BSD License',
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
],
namespace_packages=['google'],
packages=find_packages(
exclude=[
'import_test_package',
],
),
test_suite='google.protobuf.internal',
cmdclass={
'clean': clean,
'build_py': build_py,
'test_conformance': test_conformance,
},
install_requires=install_requires,
ext_modules=ext_module_list,
)
| mit | 2,703,297,802,181,223,000 | 39.399209 | 84 | 0.669993 | false |
rhelmer/socorro-collector | tests/test_throttler.py | 12 | 6818 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import re
import mock
from socorro.lib.util import DotDict
from socorro.collector.throttler import (
LegacyThrottler,
ACCEPT,
DEFER,
DISCARD,
IGNORE
)
def testLegacyThrottler():
# phase 1 tests
config = DotDict()
config.throttle_conditions = [ ('alpha', re.compile('ALPHA'), 100),
('beta', 'BETA', 100),
('gamma', lambda x: x == 'GAMMA', 100),
('delta', True, 100),
(None, True, 0)
]
config.minimal_version_for_understanding_refusal = {
'product1': '3.5',
'product2': '4.0'
}
config.never_discard = False
config.logger = mock.Mock()
thr = LegacyThrottler(config)
expected = 5
actual = len(thr.processed_throttle_conditions)
assert expected == actual, \
"expected thr.preprocessThrottleConditions to have length %d, but got " \
"%d instead" % (expected, actual)
raw_crash = DotDict({ 'ProductName':'product1',
'Version':'3.0',
'alpha':'ALPHA',
})
expected = False
actual = thr.understands_refusal(raw_crash)
assert expected == actual, \
"understand refusal expected %d, but got %d instead" % (expected, actual)
raw_crash = DotDict({ 'ProductName':'product1',
'Version':'3.6',
'alpha':'ALPHA',
})
expected = True
actual = thr.understands_refusal(raw_crash)
assert expected == actual, \
"understand refusal expected %d, but got %d instead" % (expected, actual)
expected = (ACCEPT, 100)
actual = thr.throttle(raw_crash)
assert expected == actual, \
"regexp throttle expected %d, but got %d instead" % (expected, actual)
raw_crash = DotDict({ 'ProductName':'product1',
'Version':'3.4',
'alpha':'not correct',
})
expected = (DEFER, 0)
actual = thr.throttle(raw_crash)
assert expected == actual, \
"regexp throttle expected %d, but got %d instead" % (expected, actual)
raw_crash = DotDict({ 'ProductName':'product1',
'Version':'3.6',
'alpha':'not correct',
})
expected = (DISCARD, 0)
actual = thr.throttle(raw_crash)
assert expected == actual, \
"regexp throttle expected %d, but got %d instead" % (expected, actual)
raw_crash = DotDict({ 'ProductName':'product1',
'Version':'3.6',
'beta':'BETA',
})
expected = (ACCEPT, 100)
actual = thr.throttle(raw_crash)
assert expected == actual, \
"string equality throttle expected %d, but got %d instead" % \
(expected, actual)
raw_crash = DotDict({ 'ProductName':'product1',
'Version':'3.6',
'beta':'not BETA',
})
expected = (DISCARD, 0)
actual = thr.throttle(raw_crash)
assert expected == actual, \
"string equality throttle expected %d, but got %d instead" % \
(expected, actual)
raw_crash = DotDict({ 'ProductName':'product1',
'Version':'3.6',
'gamma':'GAMMA',
})
expected = (ACCEPT, 100)
actual = thr.throttle(raw_crash)
assert expected == actual, \
"string equality throttle expected %d, but got %d instead" % \
(expected, actual)
raw_crash = DotDict({ 'ProductName':'product1',
'Version':'3.6',
'gamma':'not GAMMA',
})
expected = (DISCARD, 0)
actual = thr.throttle(raw_crash)
assert expected == actual, \
"string equality throttle expected %d, but got %d instead" % \
(expected, actual)
raw_crash = DotDict({ 'ProductName':'product1',
'Version':'3.6',
'delta':"value doesn't matter",
})
expected = (ACCEPT, 100)
actual = thr.throttle(raw_crash)
assert expected == actual, \
"string equality throttle expected %d, but got %d instead" % \
(expected, actual)
# phase 2 tests
config = DotDict()
config.throttle_conditions = [
('*', lambda x: 'alpha' in x, None),
('*', lambda x: x['beta'] == 'BETA', 100),
]
config.minimal_version_for_understanding_refusal = {
'product1': '3.5',
'product2': '4.0'
}
config.never_discard = True
config.logger = mock.Mock()
thr = LegacyThrottler(config)
expected = 2
actual = len(thr.processed_throttle_conditions)
assert expected == actual, \
"expected thr.preprocessThrottleConditions to have length %d, but got " \
"%d instead" % (expected, actual)
raw_crash = DotDict({ 'ProductName':'product1',
'Version':'3.6',
'beta': 'ugh',
'alpha':"value doesn't matter",
})
expected = (IGNORE, None)
actual = thr.throttle(raw_crash)
assert expected == actual, \
"IGNORE expected %d, but got %d instead" % \
(expected, actual)
raw_crash = DotDict({ 'ProductName':'product1',
'Version':'3.6',
'beta': 'ugh',
'delta':"value doesn't matter",
})
expected = (DEFER, 0)
actual = thr.throttle(raw_crash)
assert expected == actual, \
"DEFER expected %d, but got %d instead" % \
(expected, actual)
raw_crash = DotDict({ 'ProductName':'product1',
'Version':'3.6',
'beta': 'BETA',
'alpha':"value doesn't matter",
})
expected = (IGNORE, None)
actual = thr.throttle(raw_crash)
assert expected == actual, \
"IGNORE expected %d, but got %d instead" % \
(expected, actual)
raw_crash = DotDict({ 'ProductName':'product1',
'Version':'3.6',
'beta': 'BETA',
'delta':"value doesn't matter",
})
expected = (ACCEPT, 100)
actual = thr.throttle(raw_crash)
assert expected == actual, \
"ACCEPT expected %d, but got %d instead" % \
(expected, actual)
| mpl-2.0 | -5,109,374,843,130,263,000 | 34.14433 | 79 | 0.505133 | false |
DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/exploits/ZIBE/pyreadline/lineeditor/wordmatcher.py | 1 | 3330 | # -*- coding: utf-8 -*-
#*****************************************************************************
# Copyright (C) 2006 Jorgen Stenarson. <[email protected]>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#*****************************************************************************
import re, operator
def str_find_all(str, ch):
result = []
index = 0
while index >= 0:
index = str.find(ch, index)
if index >= 0:
result.append(index)
index += 1
return result
word_pattern = re.compile(u"(x*)")
def markwords(str, iswordfun):
markers = {True : u"x", False : u"o"}
return "".join([markers[iswordfun(ch)] for ch in str])
def split_words(str, iswordfun):
return [x for x in word_pattern.split(markwords(str,iswordfun)) if x != u""]
def mark_start_segment(str, is_segment):
def mark_start(s):
if s[0:1] == u"x":
return u"s" + s[1:]
else:
return s
return u"".join(map(mark_start, split_words(str, is_segment)))
def mark_end_segment(str, is_segment):
def mark_start(s):
if s[0:1] == u"x":
return s[:-1] + u"s"
else:
return s
return u"".join(map(mark_start, split_words(str, is_segment)))
def mark_start_segment_index(str, is_segment):
return str_find_all(mark_start_segment(str, is_segment), u"s")
def mark_end_segment_index(str, is_segment):
return [x + 1 for x in str_find_all(mark_end_segment(str, is_segment), u"s")]
################ Following are used in lineobj ###########################
def is_word_token(str):
return not is_non_word_token(str)
def is_non_word_token(str):
if len(str) != 1 or str in u" \t\n":
return True
else:
return False
def next_start_segment(str, is_segment):
str = u"".join(str)
result = []
for start in mark_start_segment_index(str, is_segment):
result[len(result):start] = [start for x in range(start - len(result))]
result[len(result):len(str)] = [len(str) for x in range(len(str) - len(result) + 1)]
return result
def next_end_segment(str, is_segment):
str = u"".join(str)
result = []
for start in mark_end_segment_index(str, is_segment):
result[len(result):start] = [start for x in range(start - len(result))]
result[len(result):len(str)] = [len(str) for x in range(len(str) - len(result) + 1)]
return result
def prev_start_segment(str, is_segment):
str = u"".join(str)
result = []
prev = 0
for start in mark_start_segment_index(str, is_segment):
result[len(result):start+1] = [prev for x in range(start - len(result) + 1)]
prev=start
result[len(result):len(str)] = [prev for x in range(len(str) - len(result) + 1)]
return result
def prev_end_segment(str, is_segment):
str = u"".join(str)
result = []
prev = 0
for start in mark_end_segment_index(str, is_segment):
result[len(result):start + 1] = [prev for x in range(start - len(result) + 1)]
prev=start
result[len(result):len(str)] = [len(str) for x in range(len(str) - len(result) + 1)]
return result
| unlicense | 3,479,548,934,251,788,000 | 31.647059 | 100 | 0.554955 | false |
yugang/crosswalk-test-suite | webapi/tct-messaging-email-tizen-tests/inst.wgt.py | 2 | 7218 | #!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
import string
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PKG_NAME = os.path.basename(SCRIPT_DIR)
PARAMETERS = None
#XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/5000/dbus/user_bus_socket"
SRC_DIR = "/home/app/content"
PKG_SRC_DIR = "%s/tct/opt/%s" % (SRC_DIR, PKG_NAME)
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code != None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def updateCMD(cmd=None):
if "pkgcmd" in cmd:
cmd = "su - %s -c '%s;%s'" % (PARAMETERS.user, XW_ENV, cmd)
return cmd
def getUSERID():
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell id -u %s" % (
PARAMETERS.device, PARAMETERS.user)
else:
cmd = "ssh %s \"id -u %s\"" % (
PARAMETERS.device, PARAMETERS.user )
return doCMD(cmd)
def getPKGID(pkg_name=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
else:
cmd = "ssh %s \"%s\"" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
(return_code, output) = doCMD(cmd)
if return_code != 0:
return None
test_pkg_id = None
for line in output:
pkg_infos = line.split()
if len(pkg_infos) == 4:
continue
name = pkg_infos[5]
name = name.lstrip('[').rstrip(']')
print "name is: %s" % name
if pkg_name == name:
test_pkg_id = pkg_infos[3]
test_pkg_id = test_pkg_id.lstrip('[').rstrip(']')
print test_pkg_id
break
return test_pkg_id
def doRemoteCMD(cmd=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (PARAMETERS.device, updateCMD(cmd))
else:
cmd = "ssh %s \"%s\"" % (PARAMETERS.device, updateCMD(cmd))
return doCMD(cmd)
def doRemoteCopy(src=None, dest=None):
if PARAMETERS.mode == "SDB":
cmd_prefix = "sdb -s %s push" % PARAMETERS.device
cmd = "%s %s %s" % (cmd_prefix, src, dest)
else:
cmd = "scp -r %s %s:/%s" % (src, PARAMETERS.device, dest)
(return_code, output) = doCMD(cmd)
doRemoteCMD("sync")
if return_code != 0:
return True
else:
return False
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".wgt"):
pkg_id = getPKGID(os.path.basename(os.path.splitext(file)[0]))
if not pkg_id:
action_status = False
continue
(return_code, output) = doRemoteCMD(
"pkgcmd -u -t wgt -q -n %s" % pkg_id)
for line in output:
if "Failure" in line:
action_status = False
break
(return_code, output) = doRemoteCMD(
"rm -rf %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
(return_code, output) = doRemoteCMD("rm -rf %s/Images" % SRC_DIR)
if return_code != 0:
action_status = False
(return_code, output) = doRemoteCMD("rm -rf %s/Sounds" % SRC_DIR)
if return_code != 0:
action_status = False
return action_status
def instPKGs():
action_status = True
(return_code, output) = doRemoteCMD(
"mkdir -p %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".wgt"):
if not doRemoteCopy(os.path.join(root, file), "%s/%s" % (SRC_DIR, file)):
action_status = False
(return_code, output) = doRemoteCMD(
"pkgcmd -i -t wgt -q -p %s/%s" % (SRC_DIR, file))
doRemoteCMD("rm -rf %s/%s" % (SRC_DIR, file))
for line in output:
if "Failure" in line:
action_status = False
break
(return_code, output) = doRemoteCMD("mkdir -p %s/Images" % SRC_DIR)
if return_code != 0:
action_status = False
if not doRemoteCopy("%s/webapi-tizen-messaging-test_image.jpg" % SCRIPT_DIR, "%s/Images" % SRC_DIR):
action_status = False
(return_code, output) = doRemoteCMD("mkdir -p %s/Sounds" % SRC_DIR)
if return_code != 0:
action_status = False
if not doRemoteCopy("%s/webapi-tizen-messaging-test_noise.mp3" % SCRIPT_DIR, "%s/Sounds" % SRC_DIR):
action_status = False
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-m", dest="mode", action="store", help="Specify mode")
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
opts_parser.add_option(
"-a", dest="user", action="store", help="User name")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception, e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.user:
PARAMETERS.user = "app"
if not PARAMETERS.mode:
PARAMETERS.mode = "SDB"
if PARAMETERS.mode == "SDB":
if not PARAMETERS.device:
(return_code, output) = doCMD("sdb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
else:
PARAMETERS.mode = "SSH"
if not PARAMETERS.device:
print "No device provided"
sys.exit(1)
user_info = getUSERID()
re_code = user_info[0]
if re_code == 0 :
global XW_ENV
userid = user_info[1][0]
XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/%s/dbus/user_bus_socket"%str(userid)
else:
print "[Error] cmd commands error : %s"%str(user_info[1])
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
| bsd-3-clause | -3,413,121,798,800,158,700 | 29.455696 | 106 | 0.548074 | false |
grlee77/numpy | numpy/core/tests/test_indexerrors.py | 17 | 5130 | import numpy as np
from numpy.testing import (
assert_raises, assert_raises_regex,
)
class TestIndexErrors:
'''Tests to exercise indexerrors not covered by other tests.'''
def test_arraytypes_fasttake(self):
'take from a 0-length dimension'
x = np.empty((2, 3, 0, 4))
assert_raises(IndexError, x.take, [0], axis=2)
assert_raises(IndexError, x.take, [1], axis=2)
assert_raises(IndexError, x.take, [0], axis=2, mode='wrap')
assert_raises(IndexError, x.take, [0], axis=2, mode='clip')
def test_take_from_object(self):
# Check exception taking from object array
d = np.zeros(5, dtype=object)
assert_raises(IndexError, d.take, [6])
# Check exception taking from 0-d array
d = np.zeros((5, 0), dtype=object)
assert_raises(IndexError, d.take, [1], axis=1)
assert_raises(IndexError, d.take, [0], axis=1)
assert_raises(IndexError, d.take, [0])
assert_raises(IndexError, d.take, [0], mode='wrap')
assert_raises(IndexError, d.take, [0], mode='clip')
def test_multiindex_exceptions(self):
a = np.empty(5, dtype=object)
assert_raises(IndexError, a.item, 20)
a = np.empty((5, 0), dtype=object)
assert_raises(IndexError, a.item, (0, 0))
a = np.empty(5, dtype=object)
assert_raises(IndexError, a.itemset, 20, 0)
a = np.empty((5, 0), dtype=object)
assert_raises(IndexError, a.itemset, (0, 0), 0)
def test_put_exceptions(self):
a = np.zeros((5, 5))
assert_raises(IndexError, a.put, 100, 0)
a = np.zeros((5, 5), dtype=object)
assert_raises(IndexError, a.put, 100, 0)
a = np.zeros((5, 5, 0))
assert_raises(IndexError, a.put, 100, 0)
a = np.zeros((5, 5, 0), dtype=object)
assert_raises(IndexError, a.put, 100, 0)
def test_iterators_exceptions(self):
"cases in iterators.c"
def assign(obj, ind, val):
obj[ind] = val
a = np.zeros([1, 2, 3])
assert_raises(IndexError, lambda: a[0, 5, None, 2])
assert_raises(IndexError, lambda: a[0, 5, 0, 2])
assert_raises(IndexError, lambda: assign(a, (0, 5, None, 2), 1))
assert_raises(IndexError, lambda: assign(a, (0, 5, 0, 2), 1))
a = np.zeros([1, 0, 3])
assert_raises(IndexError, lambda: a[0, 0, None, 2])
assert_raises(IndexError, lambda: assign(a, (0, 0, None, 2), 1))
a = np.zeros([1, 2, 3])
assert_raises(IndexError, lambda: a.flat[10])
assert_raises(IndexError, lambda: assign(a.flat, 10, 5))
a = np.zeros([1, 0, 3])
assert_raises(IndexError, lambda: a.flat[10])
assert_raises(IndexError, lambda: assign(a.flat, 10, 5))
a = np.zeros([1, 2, 3])
assert_raises(IndexError, lambda: a.flat[np.array(10)])
assert_raises(IndexError, lambda: assign(a.flat, np.array(10), 5))
a = np.zeros([1, 0, 3])
assert_raises(IndexError, lambda: a.flat[np.array(10)])
assert_raises(IndexError, lambda: assign(a.flat, np.array(10), 5))
a = np.zeros([1, 2, 3])
assert_raises(IndexError, lambda: a.flat[np.array([10])])
assert_raises(IndexError, lambda: assign(a.flat, np.array([10]), 5))
a = np.zeros([1, 0, 3])
assert_raises(IndexError, lambda: a.flat[np.array([10])])
assert_raises(IndexError, lambda: assign(a.flat, np.array([10]), 5))
def test_mapping(self):
"cases from mapping.c"
def assign(obj, ind, val):
obj[ind] = val
a = np.zeros((0, 10))
assert_raises(IndexError, lambda: a[12])
a = np.zeros((3, 5))
assert_raises(IndexError, lambda: a[(10, 20)])
assert_raises(IndexError, lambda: assign(a, (10, 20), 1))
a = np.zeros((3, 0))
assert_raises(IndexError, lambda: a[(1, 0)])
assert_raises(IndexError, lambda: assign(a, (1, 0), 1))
a = np.zeros((10,))
assert_raises(IndexError, lambda: assign(a, 10, 1))
a = np.zeros((0,))
assert_raises(IndexError, lambda: assign(a, 10, 1))
a = np.zeros((3, 5))
assert_raises(IndexError, lambda: a[(1, [1, 20])])
assert_raises(IndexError, lambda: assign(a, (1, [1, 20]), 1))
a = np.zeros((3, 0))
assert_raises(IndexError, lambda: a[(1, [0, 1])])
assert_raises(IndexError, lambda: assign(a, (1, [0, 1]), 1))
def test_mapping_error_message(self):
a = np.zeros((3, 5))
index = (1, 2, 3, 4, 5)
assert_raises_regex(
IndexError,
"too many indices for array: "
"array is 2-dimensional, but 5 were indexed",
lambda: a[index])
def test_methods(self):
"cases from methods.c"
a = np.zeros((3, 3))
assert_raises(IndexError, lambda: a.item(100))
assert_raises(IndexError, lambda: a.itemset(100, 1))
a = np.zeros((0, 3))
assert_raises(IndexError, lambda: a.item(100))
assert_raises(IndexError, lambda: a.itemset(100, 1))
| bsd-3-clause | 5,362,057,874,939,715,000 | 37.571429 | 76 | 0.566667 | false |
gismo141/pyew | plugins/pdf.py | 16 | 13682 | #!/usr/bin/env python
"""
This file is part of Pyew
Copyright (C) 2009, 2010 Joxean Koret
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import re
import sys
import zlib
import urllib
import binascii
import tempfile
try:
from easygui import textbox, codebox, ccbox
hasEasyGui = True
except:
hasEasyGui = False
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
try:
from pdfid_PL import PDFiD2String, PDFiD
except:
pass
FILTER=''.join([(len(repr(chr(x)))==3) and chr(x) or '.' for x in range(256)])
# Shamelessly ripped from pyPDF
def ASCII85Decode(data):
retval = ""
group = []
x = 0
hitEod = False
# remove all whitespace from data
data = [y for y in data if not (y in ' \n\r\t')]
while not hitEod:
c = data[x]
if len(retval) == 0 and c == "<" and data[x+1] == "~":
x += 2
continue
#elif c.isspace():
# x += 1
# continue
elif c == 'z':
assert len(group) == 0
retval += '\x00\x00\x00\x00'
continue
elif c == "~" and data[x+1] == ">":
if len(group) != 0:
# cannot have a final group of just 1 char
assert len(group) > 1
cnt = len(group) - 1
group += [ 85, 85, 85 ]
hitEod = cnt
else:
break
else:
c = ord(c) - 33
assert c >= 0 and c < 85
group += [ c ]
if len(group) >= 5:
b = group[0] * (85**4) + \
group[1] * (85**3) + \
group[2] * (85**2) + \
group[3] * 85 + \
group[4]
assert b < (2**32 - 1)
c4 = chr((b >> 0) % 256)
c3 = chr((b >> 8) % 256)
c2 = chr((b >> 16) % 256)
c1 = chr(b >> 24)
retval += (c1 + c2 + c3 + c4)
if hitEod:
retval = retval[:-4+hitEod]
group = []
x += 1
return retval
# Shamelessly ripped from pdfminerr http://code.google.com/p/pdfminerr
def RunLengthDecode(data):
"""
RunLength decoder (Adobe version) implementation based on PDF Reference
version 1.4 section 3.3.4:
The RunLengthDecode filter decodes data that has been encoded in a
simple byte-oriented format based on run length. The encoded data
is a sequence of runs, where each run consists of a length byte
followed by 1 to 128 bytes of data. If the length byte is in the
range 0 to 127, the following length + 1 (1 to 128) bytes are
copied literally during decompression. If length is in the range
129 to 255, the following single byte is to be copied 257 - length
(2 to 128) times during decompression. A length value of 128
denotes EOD.
>>> s = "\x05123456\xfa7\x04abcde\x80junk"
>>> rldecode(s)
'1234567777777abcde'
"""
decoded = []
i=0
while i < len(data):
#print "data[%d]=:%d:" % (i,ord(data[i]))
length = ord(data[i])
if length == 128:
break
if length >= 0 and length < 128:
run = data[i+1:(i+1)+(length+1)]
#print "length=%d, run=%s" % (length+1,run)
decoded.append(run)
i = (i+1) + (length+1)
if length > 128:
run = data[i+1]*(257-length)
#print "length=%d, run=%s" % (257-length,run)
decoded.append(run)
i = (i+1) + 1
return ''.join(decoded)
def unescape(buf):
buf = buf.replace("#", "%")
buf = urllib.unquote(buf)
return buf
# Shamelessly ripped from pdfminerr http://code.google.com/p/pdfminerr
class LZWDecoder(object):
debug = 0
def __init__(self, fp):
self.fp = fp
self.buff = 0
self.bpos = 8
self.nbits = 9
self.table = None
self.prevbuf = None
return
def readbits(self, bits):
v = 0
while 1:
# the number of remaining bits we can get from the current buffer.
r = 8-self.bpos
if bits <= r:
# |-----8-bits-----|
# |-bpos-|-bits-| |
# | |----r----|
v = (v<<bits) | ((self.buff>>(r-bits)) & ((1<<bits)-1))
self.bpos += bits
break
else:
# |-----8-bits-----|
# |-bpos-|---bits----...
# | |----r----|
v = (v<<r) | (self.buff & ((1<<r)-1))
bits -= r
x = self.fp.read(1)
if not x: raise EOFError
self.buff = ord(x)
self.bpos = 0
return v
def feed(self, code):
x = ''
if code == 256:
self.table = [ chr(c) for c in xrange(256) ] # 0-255
self.table.append(None) # 256
self.table.append(None) # 257
self.prevbuf = ''
self.nbits = 9
elif code == 257:
pass
elif not self.prevbuf:
x = self.prevbuf = self.table[code]
else:
if code < len(self.table):
x = self.table[code]
self.table.append(self.prevbuf+x[0])
else:
self.table.append(self.prevbuf+self.prevbuf[0])
x = self.table[code]
l = len(self.table)
if l == 511:
self.nbits = 10
elif l == 1023:
self.nbits = 11
elif l == 2047:
self.nbits = 12
self.prevbuf = x
return x
def run(self):
while 1:
try:
code = self.readbits(self.nbits)
except EOFError:
break
x = self.feed(code)
yield x
if self.debug:
print >>stderr, ('nbits=%d, code=%d, output=%r, table=%r' %
(self.nbits, code, x, self.table[258:]))
return
# lzwdecode
def LZWDecode(data):
"""
>>> lzwdecode('\x80\x0b\x60\x50\x22\x0c\x0c\x85\x01')
'\x2d\x2d\x2d\x2d\x2d\x41\x2d\x2d\x2d\x42'
"""
fp = StringIO(data)
return ''.join(LZWDecoder(fp).run())
def pdfInfo(pyew, doprint=True):
""" Get the information about the PDF """
if not pyew.physical:
filename = tempfile.mkstemp("pyew")[1]
f = file(filename, "wb")
f.write(pyew.getBuffer())
f.close()
else:
filename = pyew.filename
print PDFiD2String(PDFiD(filename, False, True, False, False), False)
def pdfStreams(pyew, doprint=True, get_buf=False):
""" Get information about the streams """
buf = pyew.getBuffer()
tokens = re.split("[,<>;\[\](:)'\r\n\t/ ]", buf)
bfilters = False
filters = []
stream_filters = {}
streams = 0
for token in tokens:
if token == '':
continue
token = unescape(token)
if token == "Filter":
bfilters = True
elif token == "stream":
streams += 1
elif token == "endstream":
bfilters = False
if filters != []:
stream_filters[streams] = filters
filters = []
elif bfilters and token.lower().find("decode") > -1:
filters.append(token)
if doprint:
for stream in stream_filters:
for filter in stream_filters[stream]:
print "Stream %d uses %s" % (stream, filter.replace("[", "").replace("]", ""))
if not get_buf:
return stream_filters
else:
return stream_filters, buf
def pdfViewStreams(pyew, doprint=True, stream_id=-1, gui=False):
""" Show decoded streams """
streams_filters, buf = pdfStreams(pyew, doprint=False, get_buf=True)
streams = 0
while 1:
pos = buf.find("stream")
if pos == -1:
break
streams += 1
pos2 = buf.find("endstream")
# -8 means -len("stream")
#tmp = buf[pos+8:pos2-1]
tmp = buf[pos+6:pos2]
tmp = tmp.lstrip(" ")
failed = False
dones = []
if stream_id == -1 or streams == stream_id:
if streams_filters.has_key(streams):
for filter in streams_filters[streams]:
try:
print "Applying Filter %s ..." % filter
if filter in dones:
print pyew.hexdump(tmp, pyew.hexcolumns)
msg = "The filter %s is already applied, it seems to be a PDF Bomb."
msg += os.linesep + "Do you want to apply it? "
ret = raw_input(msg % filter)
if ret != "y":
continue
else:
dones.append(filter)
if filter == "FlateDecode":
tmp = zlib.decompress(tmp.strip("\r").strip("\n"))
elif filter == "ASCIIHexDecode":
tmp = binascii.unhexlify(tmp.replace("\r", "").replace("\n", "").replace(" ", "").strip("<").strip(">"))
elif filter == "ASCII85Decode":
tmp = ASCII85Decode(tmp.strip("\r").strip("\n"))
elif filter == "RunLengthDecode":
tmp = RunLengthDecode(tmp)
elif filter == "LZWDecode":
tmp = LZWDecode(tmp)
except:
failed = True
print "Error applying filter %s" % filter, sys.exc_info()[1]
print "Encoded Stream %d" % streams
else:
print "Stream %d" % streams
if not gui:
print "-"*80
if tmp.find("\x00") == -1:
print tmp
else:
print pyew.hexdump(tmp, pyew.hexcolumns)
print "-"*80
else:
if tmp.find("\x00") == -1:
textbox("Stream %d" % streams, "Stream", tmp)
else:
codebox("Stream %d" % streams, "Stream", pyew.hexdump(tmp, pyew.hexcolumns))
if tmp.find("\x00") > -1 and not failed and not gui:
res = raw_input("Show disassembly (y/n)? [n]: ")
if res == "y":
print pyew.disassemble(tmp)
buf = buf[pos2+11:]
if buf.find("stream") == -1:
break
if stream_id == -1:
try:
if not gui:
res = raw_input("Continue? ")
if res in ["q", "n"]:
break
else:
if not ccbox("Do you want to continue?", "Streams Viewer"):
break
except:
break
elif stream_id == streams:
break
def pdfViewGui(pyew, doprint=True, stream_id=-1, args=None):
""" Show decoded streams (in a GUI) """
return pdfViewStreams(pyew, doprint=doprint, stream_id=stream_id, gui=True)
def pdfObj(pyew, doprint=True, args=None):
""" Show object's list """
pyew.dosearch(pyew.f, "r", "\d+ \d+ obj.*", cols=60, doprint=True, offset=0)
def pdfStream(pyew, doprint=True, args=None):
""" Show streams list """
l = []
hits = pyew.dosearch(pyew.f, "s", "stream", cols=60, doprint=False, offset=0)
buf = pyew.getBuffer()
for hit in hits:
key, value = hit.keys()[0], hit.values()[0]
if buf[key-1:key] != "d":
l.append(key)
if doprint:
print "HINT[0x%08x]: %s" % (key, value.translate(FILTER))
return l
def pdfSeekObj(pyew, args=None):
""" Seek to one object """
if args == None:
print "An argument is required"
return False
num = args[0].strip(" ")
d = pyew.dosearch(pyew.f, "r", "\d+ \d+ obj.*", cols=60, doprint=False, offset=0)
for element in d:
pos = element.keys()[0]
if element.values()[0].split(" ")[0] == num:
pyew.seek(pos)
return True
print "Object not found"
return False
def pdfSeekStream(pyew, args=None):
""" Seek to one stream """
if not args:
print "An argument is required"
return False
l = pdfStream(pyew, doprint=False)
num = int(args[0])-1
if num > len(l):
print "Last stream is %d" % len(l)
else:
pyew.seek(l[num])
functions = {"pdf":pdfInfo,
"pdfilter":pdfStreams,
"pdfobj":pdfObj,
"pdfstream":pdfStream,
"pdfso":pdfSeekObj,
"pdfss":pdfSeekStream}
if hasEasyGui:
functions["pdfvi"] = pdfViewStreams
functions["pdfview"] = pdfViewGui
| gpl-2.0 | 2,374,352,709,856,762,400 | 30.671296 | 132 | 0.484944 | false |
ric2b/Vivaldi-browser | chromium/third_party/blink/renderer/build/scripts/name_utilities.py | 2 | 2769 | # Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from blinkbuild.name_style_converter import NameStyleConverter
def cpp_bool(value):
if value is True:
return 'true'
if value is False:
return 'false'
# Return value as is, which for example may be a platform-dependent constant
# such as "defaultSelectTrailingWhitespaceEnabled".
return value
def cpp_name(entry):
return entry['ImplementedAs'] or entry['name'].original
def enum_key_for_css_keyword(keyword):
return 'k' + _upper_camel_case(keyword)
def enum_key_for_css_property(property_name):
return 'k' + _upper_camel_case(property_name)
def enum_key_for_css_property_alias(property_name):
return 'kAlias' + property_name.to_upper_camel_case()
# This id is used to build function names returning CSS properties (e.g.
# GetCSSPropertyX(), GetCSSPropertyXInternal(), etc.)
def id_for_css_property(property_name):
return 'CSSProperty' + _upper_camel_case(property_name)
def id_for_css_property_alias(property_name):
return 'CSSPropertyAlias' + property_name.to_upper_camel_case()
def _upper_camel_case(property_name):
converter = NameStyleConverter(property_name) if isinstance(property_name, str) else property_name
return converter.to_upper_camel_case()
| bsd-3-clause | 1,987,616,045,949,472,300 | 38.557143 | 102 | 0.757313 | false |
chitianhao/trafficserver | tests/gold_tests/runroot/runroot_use.test.py | 3 | 2925 | '''
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import time
Test.Summary = '''
Test for using of runroot from traffic_layout.
'''
Test.ContinueOnFail = True
Test.SkipUnless(Test.Variables.BINDIR.startswith(Test.Variables.PREFIX),
"need to guarantee bin path starts with prefix for runroot")
# create two runroot for testing
path = os.path.join(Test.RunDirectory, "runroot")
tr = Test.AddTestRun()
tr.Processes.Default.Command = "$ATS_BIN/traffic_layout init --path " + path
f = tr.Disk.File(os.path.join(path, "runroot.yaml"))
f.Exists = True
path2 = os.path.join(Test.RunDirectory, "runroot2")
tr = Test.AddTestRun()
tr.Processes.Default.Command = "$ATS_BIN/traffic_layout init --path " + path2
f = tr.Disk.File(os.path.join(path2, "runroot.yaml"))
f.Exists = True
# 1. --run-root use path cmd
tr = Test.AddTestRun("use runroot via commandline")
tr.Processes.Default.Command = "$ATS_BIN/traffic_layout info --run-root=" + path
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.All = Testers.ContainsExpression("PREFIX: " + path, "commandline runroot path")
# 2. use cwd as runroot
tr = Test.AddTestRun("use runroot via cwd")
tr.Processes.Default.Command = "cd " + path + ";" + "$ATS_BIN/traffic_layout info"
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.All = Testers.ContainsExpression("PREFIX: " + path, "cwd runroot path")
# 4. use path directly bin
bin_path = Test.Variables.BINDIR[Test.Variables.BINDIR.find(Test.Variables.PREFIX) + len(Test.Variables.PREFIX) + 1:]
tr = Test.AddTestRun("use runroot via bin executable")
tr.Processes.Default.Command = os.path.join(path, os.path.join(bin_path, "traffic_layout") + " info")
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.All = Testers.ContainsExpression("PREFIX: " + path, "bin path")
# 3. TS_RUNROOT ENV variable
tr = Test.AddTestRun("use runroot via TS_RUNROOT")
tr.Processes.Default.Env["TS_RUNROOT"] = path2
tr.Processes.Default.Command = "$ATS_BIN/traffic_layout info"
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.All = Testers.ContainsExpression("PREFIX: " + path2, "$TS_RUNROOT Env path")
| apache-2.0 | -5,781,999,822,426,159,000 | 42.656716 | 117 | 0.742222 | false |
NoBodyCam/TftpPxeBootBareMetal | tools/hacking.py | 1 | 16186 | #!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012, Cloudscaling
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""nova HACKING file compliance testing
built on top of pep8.py
"""
import fnmatch
import inspect
import logging
import os
import re
import subprocess
import sys
import tokenize
import warnings
import pep8
# Don't need this for testing
logging.disable('LOG')
#N1xx comments
#N2xx except
#N3xx imports
#N4xx docstrings
#N5xx dictionaries/lists
#N6xx calling methods
#N7xx localization
#N8xx git commit messages
IMPORT_EXCEPTIONS = ['sqlalchemy', 'migrate', 'nova.db.sqlalchemy.session']
DOCSTRING_TRIPLE = ['"""', "'''"]
VERBOSE_MISSING_IMPORT = False
# Monkey patch broken excluded filter in pep8
def filename_match(filename, patterns, default=True):
"""
Check if patterns contains a pattern that matches filename.
If patterns is unspecified, this always returns True.
"""
if not patterns:
return default
return any(fnmatch.fnmatch(filename, pattern) for pattern in patterns)
def excluded(filename):
"""
Check if options.exclude contains a pattern that matches filename.
"""
basename = os.path.basename(filename)
return any((filename_match(filename, pep8.options.exclude,
default=False),
filename_match(basename, pep8.options.exclude,
default=False)))
def input_dir(dirname, runner=None):
"""
Check all Python source files in this directory and all subdirectories.
"""
dirname = dirname.rstrip('/')
if excluded(dirname):
return
if runner is None:
runner = pep8.input_file
for root, dirs, files in os.walk(dirname):
if pep8.options.verbose:
print('directory ' + root)
pep8.options.counters['directories'] += 1
dirs.sort()
for subdir in dirs[:]:
if excluded(os.path.join(root, subdir)):
dirs.remove(subdir)
files.sort()
for filename in files:
if pep8.filename_match(filename) and not excluded(filename):
pep8.options.counters['files'] += 1
runner(os.path.join(root, filename))
def is_import_exception(mod):
return (mod in IMPORT_EXCEPTIONS or
any(mod.startswith(m + '.') for m in IMPORT_EXCEPTIONS))
def import_normalize(line):
# convert "from x import y" to "import x.y"
# handle "from x import y as z" to "import x.y as z"
split_line = line.split()
if (line.startswith("from ") and "," not in line and
split_line[2] == "import" and split_line[3] != "*" and
split_line[1] != "__future__" and
(len(split_line) == 4 or
(len(split_line) == 6 and split_line[4] == "as"))):
return "import %s.%s" % (split_line[1], split_line[3])
else:
return line
def nova_todo_format(physical_line):
"""Check for 'TODO()'.
nova HACKING guide recommendation for TODO:
Include your name with TODOs as in "#TODO(termie)"
N101
"""
pos = physical_line.find('TODO')
pos1 = physical_line.find('TODO(')
pos2 = physical_line.find('#') # make sure it's a comment
if (pos != pos1 and pos2 >= 0 and pos2 < pos):
return pos, "NOVA N101: Use TODO(NAME)"
def nova_except_format(logical_line):
"""Check for 'except:'.
nova HACKING guide recommends not using except:
Do not write "except:", use "except Exception:" at the very least
N201
"""
if logical_line.startswith("except:"):
return 6, "NOVA N201: no 'except:' at least use 'except Exception:'"
def nova_except_format_assert(logical_line):
"""Check for 'assertRaises(Exception'.
nova HACKING guide recommends not using assertRaises(Exception...):
Do not use overly broad Exception type
N202
"""
if logical_line.startswith("self.assertRaises(Exception"):
return 1, "NOVA N202: assertRaises Exception too broad"
def nova_one_import_per_line(logical_line):
"""Check for import format.
nova HACKING guide recommends one import per line:
Do not import more than one module per line
Examples:
BAD: from nova.rpc.common import RemoteError, LOG
N301
"""
pos = logical_line.find(',')
parts = logical_line.split()
if (pos > -1 and (parts[0] == "import" or
parts[0] == "from" and parts[2] == "import") and
not is_import_exception(parts[1])):
return pos, "NOVA N301: one import per line"
_missingImport = set([])
def nova_import_module_only(logical_line):
"""Check for import module only.
nova HACKING guide recommends importing only modules:
Do not import objects, only modules
N302 import only modules
N303 Invalid Import
N304 Relative Import
"""
def importModuleCheck(mod, parent=None, added=False):
"""
If can't find module on first try, recursively check for relative
imports
"""
current_path = os.path.dirname(pep8.current_file)
try:
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
valid = True
if parent:
if is_import_exception(parent):
return
parent_mod = __import__(parent, globals(), locals(),
[mod], -1)
valid = inspect.ismodule(getattr(parent_mod, mod))
else:
__import__(mod, globals(), locals(), [], -1)
valid = inspect.ismodule(sys.modules[mod])
if not valid:
if added:
sys.path.pop()
added = False
return logical_line.find(mod), ("NOVA N304: No "
"relative imports. '%s' is a relative import"
% logical_line)
return logical_line.find(mod), ("NOVA N302: import only "
"modules. '%s' does not import a module"
% logical_line)
except (ImportError, NameError) as exc:
if not added:
added = True
sys.path.append(current_path)
return importModuleCheck(mod, parent, added)
else:
name = logical_line.split()[1]
if name not in _missingImport:
if VERBOSE_MISSING_IMPORT:
print >> sys.stderr, ("ERROR: import '%s' failed: %s" %
(name, exc))
_missingImport.add(name)
added = False
sys.path.pop()
return
except AttributeError:
# Invalid import
return logical_line.find(mod), ("NOVA N303: Invalid import, "
"AttributeError raised")
# convert "from x import y" to " import x.y"
# convert "from x import y as z" to " import x.y"
import_normalize(logical_line)
split_line = logical_line.split()
if (logical_line.startswith("import ") and "," not in logical_line and
(len(split_line) == 2 or
(len(split_line) == 4 and split_line[2] == "as"))):
mod = split_line[1]
return importModuleCheck(mod)
# TODO(jogo) handle "from x import *"
#TODO(jogo): import template: N305
def nova_import_alphabetical(physical_line, line_number, lines):
"""Check for imports in alphabetical order.
nova HACKING guide recommendation for imports:
imports in human alphabetical order
N306
"""
# handle import x
# use .lower since capitalization shouldn't dictate order
split_line = import_normalize(physical_line.strip()).lower().split()
split_previous = import_normalize(lines[line_number - 2]
).strip().lower().split()
# with or without "as y"
length = [2, 4]
if (len(split_line) in length and len(split_previous) in length and
split_line[0] == "import" and split_previous[0] == "import"):
if split_line[1] < split_previous[1]:
return (0, "NOVA N306: imports not in alphabetical order (%s, %s)"
% (split_previous[1], split_line[1]))
def nova_docstring_start_space(physical_line):
"""Check for docstring not start with space.
nova HACKING guide recommendation for docstring:
Docstring should not start with space
N401
"""
pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE]) # start
if (pos != -1 and len(physical_line) > pos + 1):
if (physical_line[pos + 3] == ' '):
return (pos, "NOVA N401: one line docstring should not start with"
" a space")
def nova_docstring_one_line(physical_line):
"""Check one line docstring end.
nova HACKING guide recommendation for one line docstring:
A one line docstring looks like this and ends in a period.
N402
"""
pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE]) # start
end = max([physical_line[-4:-1] == i for i in DOCSTRING_TRIPLE]) # end
if (pos != -1 and end and len(physical_line) > pos + 4):
if (physical_line[-5] != '.'):
return pos, "NOVA N402: one line docstring needs a period"
def nova_docstring_multiline_end(physical_line):
"""Check multi line docstring end.
nova HACKING guide recommendation for docstring:
Docstring should end on a new line
N403
"""
pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE]) # start
if (pos != -1 and len(physical_line) == pos):
print physical_line
if (physical_line[pos + 3] == ' '):
return (pos, "NOVA N403: multi line docstring end on new line")
FORMAT_RE = re.compile("%(?:"
"%|" # Ignore plain percents
"(\(\w+\))?" # mapping key
"([#0 +-]?" # flag
"(?:\d+|\*)?" # width
"(?:\.\d+)?" # precision
"[hlL]?" # length mod
"\w))") # type
class LocalizationError(Exception):
pass
def check_i18n():
"""Generator that checks token stream for localization errors.
Expects tokens to be ``send``ed one by one.
Raises LocalizationError if some error is found.
"""
while True:
try:
token_type, text, _, _, line = yield
except GeneratorExit:
return
if (token_type == tokenize.NAME and text == "_" and
not line.startswith('def _(msg):')):
while True:
token_type, text, start, _, _ = yield
if token_type != tokenize.NL:
break
if token_type != tokenize.OP or text != "(":
continue # not a localization call
format_string = ''
while True:
token_type, text, start, _, _ = yield
if token_type == tokenize.STRING:
format_string += eval(text)
elif token_type == tokenize.NL:
pass
else:
break
if not format_string:
raise LocalizationError(start,
"NOVA N701: Empty localization string")
if token_type != tokenize.OP:
raise LocalizationError(start,
"NOVA N701: Invalid localization call")
if text != ")":
if text == "%":
raise LocalizationError(start,
"NOVA N702: Formatting operation should be outside"
" of localization method call")
elif text == "+":
raise LocalizationError(start,
"NOVA N702: Use bare string concatenation instead"
" of +")
else:
raise LocalizationError(start,
"NOVA N702: Argument to _ must be just a string")
format_specs = FORMAT_RE.findall(format_string)
positional_specs = [(key, spec) for key, spec in format_specs
if not key and spec]
# not spec means %%, key means %(smth)s
if len(positional_specs) > 1:
raise LocalizationError(start,
"NOVA N703: Multiple positional placeholders")
def nova_localization_strings(logical_line, tokens):
"""Check localization in line.
N701: bad localization call
N702: complex expression instead of string as argument to _()
N703: multiple positional placeholders
"""
gen = check_i18n()
next(gen)
try:
map(gen.send, tokens)
gen.close()
except LocalizationError as e:
return e.args
#TODO(jogo) Dict and list objects
current_file = ""
def readlines(filename):
"""Record the current file being tested."""
pep8.current_file = filename
return open(filename).readlines()
def add_nova():
"""Monkey patch in nova guidelines.
Look for functions that start with nova_ and have arguments
and add them to pep8 module
Assumes you know how to write pep8.py checks
"""
for name, function in globals().items():
if not inspect.isfunction(function):
continue
args = inspect.getargspec(function)[0]
if args and name.startswith("nova"):
exec("pep8.%s = %s" % (name, name))
def once_git_check_commit_title():
"""Check git commit messages.
nova HACKING recommends not referencing a bug or blueprint in first line,
it should provide an accurate description of the change
N801
N802 Title limited to 50 chars
"""
#Get title of most recent commit
subp = subprocess.Popen(['git', 'log', '--pretty=%s', '-1'],
stdout=subprocess.PIPE)
title = subp.communicate()[0]
if subp.returncode:
raise Exception("git log failed with code %s" % subp.returncode)
#From https://github.com/openstack/openstack-ci-puppet
# /blob/master/modules/gerrit/manifests/init.pp#L74
#Changeid|bug|blueprint
git_keywords = (r'(I[0-9a-f]{8,40})|'
'([Bb]ug|[Ll][Pp])[\s\#:]*(\d+)|'
'([Bb]lue[Pp]rint|[Bb][Pp])[\s\#:]*([A-Za-z0-9\\-]+)')
GIT_REGEX = re.compile(git_keywords)
#NOTE(jogo) if match regex but over 3 words, acceptable title
if GIT_REGEX.search(title) is not None and len(title.split()) <= 3:
print ("N801: git commit title ('%s') should provide an accurate "
"description of the change, not just a reference to a bug "
"or blueprint" % title.strip())
if len(title.decode('utf-8')) > 72:
print ("N802: git commit title ('%s') should be under 50 chars"
% title.strip())
if __name__ == "__main__":
#include nova path
sys.path.append(os.getcwd())
#Run once tests (not per line)
once_git_check_commit_title()
#NOVA error codes start with an N
pep8.ERRORCODE_REGEX = re.compile(r'[EWN]\d{3}')
add_nova()
pep8.current_file = current_file
pep8.readlines = readlines
pep8.excluded = excluded
pep8.input_dir = input_dir
try:
pep8._main()
finally:
if len(_missingImport) > 0:
print >> sys.stderr, ("%i imports missing in this test environment"
% len(_missingImport))
| apache-2.0 | 5,683,344,426,554,000,000 | 32.861925 | 79 | 0.575188 | false |
yongshengwang/hue | desktop/core/ext-py/MarkupSafe-0.9.3/setup.py | 38 | 3105 | import os
import sys
from setuptools import setup, Extension, Feature
from distutils.command.build_ext import build_ext
from distutils.errors import CCompilerError, DistutilsExecError, \
DistutilsPlatformError
# fail safe compilation shamelessly stolen from the simplejson
# setup.py file. Original author: Bob Ippolito
speedups = Feature(
'optional C speed-enhancement module',
standard=True,
ext_modules = [
Extension('markupsafe._speedups', ['markupsafe/_speedups.c']),
],
)
ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError)
if sys.platform == 'win32' and sys.version_info > (2, 6):
# 2.6's distutils.msvc9compiler can raise an IOError when failing to
# find the compiler
ext_errors += (IOError,)
extra = {}
if sys.version_info >= (3, 0):
extra['use_2to3'] = True
class BuildFailed(Exception):
pass
class ve_build_ext(build_ext):
"""This class allows C extension building to fail."""
def run(self):
try:
build_ext.run(self)
except DistutilsPlatformError:
raise BuildFailed()
def build_extension(self, ext):
try:
build_ext.build_extension(self, ext)
except ext_errors:
raise BuildFailed()
def echo(msg=''):
sys.stdout.write(msg + '\n')
readme = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
def run_setup(with_binary):
features = {}
if with_binary:
features['speedups'] = speedups
setup(
name='MarkupSafe',
version='0.9.3',
url='http://dev.pocoo.org/',
license='BSD',
author='Armin Ronacher',
author_email='[email protected]',
description='Implements a XML/HTML/XHTML Markup safe string for Python',
long_description=readme,
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing :: Markup :: HTML'
],
packages=['markupsafe'],
test_suite='markupsafe.tests.suite',
include_package_data=True,
cmdclass={'build_ext': ve_build_ext},
features=features,
**extra
)
try:
run_setup(True)
except BuildFailed:
LINE = '=' * 74
BUILD_EXT_WARNING = 'WARNING: The C extension could not be compiled, speedups are not enabled.'
echo(LINE)
echo(BUILD_EXT_WARNING)
echo('Failure information, if any, is above.')
echo('Retrying the build without the C extension now.')
echo()
run_setup(False)
echo(LINE)
echo(BUILD_EXT_WARNING)
echo('Plain-Python installation succeeded.')
echo(LINE)
| apache-2.0 | -3,455,539,869,542,415,000 | 26.723214 | 99 | 0.622866 | false |
hesam-setareh/nest-simulator | topology/pynest/tests/test_dumping.py | 9 | 3718 | # -*- coding: utf-8 -*-
#
# test_dumping.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Tests for topology hl_api dumping functions.
NOTE: These tests only test whether the code runs, it does not check
whether the results produced are correct.
"""
import unittest
import nest
import nest.topology as topo
import sys
import os
import os.path
class PlottingTestCase(unittest.TestCase):
def nest_tmpdir(self):
"""Returns temp dir path from environment, current dir otherwise."""
if 'NEST_DATA_PATH' in os.environ:
return os.environ['NEST_DATA_PATH']
else:
return '.'
def test_DumpNodes(self):
"""Test dumping nodes."""
ldict = {'elements': 'iaf_psc_alpha', 'rows': 3, 'columns': 3,
'extent': [2., 2.], 'edge_wrap': True}
nest.ResetKernel()
l = topo.CreateLayer(ldict)
topo.DumpLayerNodes(l, os.path.join(self.nest_tmpdir(),
'test_DumpNodes.out.lyr'))
self.assertTrue(True)
def test_DumpNodes2(self):
"""Test dumping nodes, two layers."""
ldict = {'elements': 'iaf_psc_alpha', 'rows': 3, 'columns': 3,
'extent': [2., 2.], 'edge_wrap': True}
nest.ResetKernel()
l = topo.CreateLayer(ldict)
topo.DumpLayerNodes(l * 2, os.path.join(self.nest_tmpdir(),
'test_DumpNodes2.out.lyr'))
self.assertTrue(True)
def test_DumpConns(self):
"""Test dumping connections."""
ldict = {'elements': 'iaf_psc_alpha', 'rows': 3, 'columns': 3,
'extent': [2., 2.], 'edge_wrap': True}
cdict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 1.}}}
nest.ResetKernel()
l = topo.CreateLayer(ldict)
topo.ConnectLayers(l, l, cdict)
topo.DumpLayerConnections(l, 'static_synapse',
os.path.join(self.nest_tmpdir(),
'test_DumpConns.out.cnn'))
self.assertTrue(True)
def test_DumpConns2(self):
"""Test dumping connections, 2 layers."""
ldict = {'elements': 'iaf_psc_alpha', 'rows': 3, 'columns': 3,
'extent': [2., 2.], 'edge_wrap': True}
cdict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 1.}}}
nest.ResetKernel()
l = topo.CreateLayer(ldict)
topo.ConnectLayers(l, l, cdict)
topo.DumpLayerConnections(l * 2, 'static_synapse',
os.path.join(self.nest_tmpdir(),
'test_DumpConns2.out.cnn'))
self.assertTrue(True)
def suite():
suite = unittest.makeSuite(PlottingTestCase, 'test')
return suite
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
try:
import matplotlib.pyplot as plt
plt.show()
except ImportError:
pass
| gpl-2.0 | 2,658,483,919,810,561,000 | 31.614035 | 76 | 0.575847 | false |
sethkontny/blaze | blaze/io/sql/ops.py | 4 | 3542 | """SQL implementations of element-wise ufuncs."""
from __future__ import absolute_import, division, print_function
from ...compute.function import BlazeFunc
from ...compute.ops import ufuncs
from .kernel import SQL
from .syntax import Call, Expr, QOrderBy, QWhere, And, Or, Not
def sqlfunction(signature):
def decorator(f):
bf = BlazeFunc('blaze', f.__name__)
# FIXME: Adding a dummy CKERNEL overload to make things work for now
bf.add_overload(signature, None)
bf.add_plugin_overload(signature, f, SQL)
return bf
return decorator
def overload_unop_ufunc(signature, name, op):
"""Add a unary sql overload to a blaze ufunc"""
def unop(x):
return Expr([op, x])
unop.__name__ = name
bf = getattr(ufuncs, name)
bf.add_plugin_overload(signature, unop, SQL)
def overload_binop_ufunc(signature, name, op):
"""Add a binary sql overload to a blaze ufunc"""
def binop(a, b):
return Expr([a, op, b])
binop.__name__ = name
bf = getattr(ufuncs, name)
bf.add_plugin_overload(signature, binop, SQL)
# Arithmetic
overload_binop_ufunc("(T, T) -> T", "add", "+")
overload_binop_ufunc("(T, T) -> T", "multiply", "*")
overload_binop_ufunc("(T, T) -> T", "subtract", "-")
overload_binop_ufunc("(T, T) -> T", "floor_divide", "/")
overload_binop_ufunc("(T, T) -> T", "divide", "/")
overload_binop_ufunc("(T, T) -> T", "true_divide", "/")
overload_binop_ufunc("(T, T) -> T", "mod", "%")
overload_unop_ufunc("(T) -> T", "negative", "-")
# Compare
overload_binop_ufunc("(T, T) -> bool", "equal", "==")
overload_binop_ufunc("(T, T) -> bool", "not_equal", "!=")
overload_binop_ufunc("(T, T) -> bool", "less", "<")
overload_binop_ufunc("(T, T) -> bool", "less_equal", "<=")
overload_binop_ufunc("(T, T) -> bool", "greater", ">")
overload_binop_ufunc("(T, T) -> bool", "greater_equal", ">=")
# Logical
overload_binop_ufunc("(bool, bool) -> bool",
"logical_and", "AND")
overload_binop_ufunc("(bool, bool) -> bool",
"logical_or", "OR")
overload_unop_ufunc("(bool) -> bool", "logical_not", "NOT")
def logical_xor(a, b):
# Potential exponential code generation...
return And(Or(a, b), Not(And(a, b)))
ufuncs.logical_xor.add_plugin_overload("(bool, bool) -> bool",
logical_xor, SQL)
# SQL Functions
@sqlfunction('(A * DType) -> DType')
def sum(col):
return Call('SUM', [col])
@sqlfunction('(A * DType) -> DType')
def avg(col):
return Call('AVG', [col])
@sqlfunction('(A * DType) -> DType')
def min(col):
return Call('MIN', [col])
@sqlfunction('(A * DType) -> DType')
def max(col):
return Call('MAX', [col])
# SQL Join, Where, Group by, Order by
def merge(left, right, how='left', on=None, left_on=None, right_on=None,
left_index=False, right_index=False, sort=True):
"""
Join two tables.
"""
raise NotImplementedError
def index(col, index, order=None):
"""
Index a table or column with a predicate.
view = merge(table1, table2)
result = view[table1.id == table2.id]
or
avg(table1.age[table1.state == 'TX'])
"""
result = sqlindex(col, index)
if order:
result = sqlorder(result, order)
return result
@sqlfunction('(A * S, A * B) -> var * S')
def sqlindex(col, where):
return QWhere(col, where)
@sqlfunction('(A * S, A * B) -> A * S')
def sqlorder(col, by):
if not isinstance(by, (tuple, list)):
by = [by]
return QOrderBy(col, by)
| bsd-3-clause | -5,534,572,950,848,651,000 | 26.457364 | 76 | 0.592038 | false |
trnewman/VT-USRP-daughterboard-drivers_python | gnuradio-core/src/python/gnuradio/blksimpl/wfm_tx.py | 1 | 3136 | #
# Copyright 2005 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import math
from gnuradio import gr, optfir
from gnuradio.blksimpl.fm_emph import fm_preemph
class wfm_tx(gr.hier_block):
def __init__(self, fg, audio_rate, quad_rate, tau=75e-6, max_dev=75e3):
"""
Wide Band FM Transmitter.
Takes a single float input stream of audio samples in the range [-1,+1]
and produces a single FM modulated complex baseband output.
@param fg: flow graph
@param audio_rate: sample rate of audio stream, >= 16k
@type audio_rate: integer
@param quad_rate: sample rate of output stream
@type quad_rate: integer
@param tau: preemphasis time constant (default 75e-6)
@type tau: float
@param max_dev: maximum deviation in Hz (default 75e3)
@type max_dev: float
quad_rate must be an integer multiple of audio_rate.
"""
# FIXME audio_rate and quad_rate ought to be exact rationals
audio_rate = int(audio_rate)
quad_rate = int(quad_rate)
if quad_rate % audio_rate != 0:
raise ValueError, "quad_rate is not an integer multiple of audio_rate"
do_interp = audio_rate != quad_rate
if do_interp:
interp_factor = quad_rate / audio_rate
interp_taps = optfir.low_pass (interp_factor, # gain
quad_rate, # Fs
16000, # passband cutoff
18000, # stopband cutoff
0.1, # passband ripple dB
40) # stopband atten dB
print "len(interp_taps) =", len(interp_taps)
self.interpolator = gr.interp_fir_filter_fff (interp_factor, interp_taps)
self.preemph = fm_preemph (fg, quad_rate, tau=tau)
k = 2 * math.pi * max_dev / quad_rate
self.modulator = gr.frequency_modulator_fc (k)
if do_interp:
fg.connect (self.interpolator, self.preemph, self.modulator)
gr.hier_block.__init__(self, fg, self.interpolator, self.modulator)
else:
fg.connect(self.preemph, self.modulator)
gr.hier_block.__init__(self, fg, self.preemph, self.modulator)
| gpl-3.0 | 8,647,302,850,588,590,000 | 38.696203 | 85 | 0.596301 | false |
akshar-raaj/importd | importd/__init__.py | 1 | 24831 | # -*- coding: utf-8 -*-
"""ImportD django mini framework."""
__version__ = "0.3.3"
__license__ = "BSD"
__author__ = "Amit Upadhyay"
__email__ = "[email protected]"
__url__ = "http://amitu.com/importd"
__source__ = "https://github.com/amitu/importd"
__docformat__ = "html"
# stdlib imports
import copy
import inspect
import os
import sys
import traceback
from datetime import datetime
from getpass import getuser
from platform import python_version
# 3rd party imports
import dj_database_url
import django.core.urlresolvers
import django
from importd import urlconf # lint:ok
from django.conf import settings
from collections import Callable
# custom imports
try:
import importlib
except ImportError:
from django.utils import importlib # lint:ok
try:
import debug_toolbar # lint:ok
except ImportError:
debug_toolbar = None
try:
import werkzeug # lint:ok
import django_extensions # lint:ok
except ImportError:
django_extensions = werkzeug = None
try:
import django_jinja # lint:ok
DJANGO_JINJA = True
except ImportError:
DJANGO_JINJA = False
try:
import coffin # lint:ok
COFFIN = True
except ImportError:
COFFIN = False
try:
import resource
except ImportError:
resource = None
start_time = datetime.now()
if python_version().startswith('3'):
basestring = unicode = str # lint:ok
# coffin is not python 3 compatible library
COFFIN = False
# cannot use django-jinja, coffin both. primary library is coffin.
if COFFIN and DJANGO_JINJA:
DJANGO_JINJA = False
##############################################################################
class SmartReturnMiddleware(object):
"""
Smart response middleware for views.
Converts view return to the following:
HttpResponse - stays the same
string - renders the template named in the string
(string, dict) - renders the template with keyword arguments.
object - renders JSONResponse of the object
"""
def process_view(self, request, view_func, view_args, view_kwargs):
"""Take request and view function and process with arguments."""
from django.shortcuts import render_to_response
from django.template import RequestContext
try:
from django.http.response import HttpResponseBase as RBase
except ImportError:
from django.http import HttpResponse as RBase # lint:ok
from fhurl import JSONResponse
res = view_func(request, *view_args, **view_kwargs)
if isinstance(res, basestring):
res = res, {}
if isinstance(res, RBase):
return res
if isinstance(res, tuple):
template_name, context = res
res = render_to_response(template_name, context,
RequestContext(request))
else:
res = JSONResponse(res)
return res
class Blueprint(object):
"""
Blueprint is way to group urls.
This class is used for save blueprint info.
The instance of blueprint class is used inside D object initialization.
"""
def __init__(self):
"""Init class."""
self.url_prefix, self.namespace, self.app_name = None, None, None
from django.conf.urls import patterns
self.patterns = patterns('')
from smarturls import surl
self.surl = surl
from fhurl import fhurl
self.fhurl = fhurl
def add_view(self, regex, view, app=None, *args, **kw):
"""Take a regular expression and add a view to the app patterns."""
url = self.surl(regex, view, *args, **kw)
self.patterns.append(url)
def add_form(self, regex, form_cls, app=None, *args, **kw):
"""Take a regular expression and add a form to the app patterns."""
url = self.fhurl(regex, form_cls, *args, **kw)
self.patterns.append(url)
def __call__(self, *args, **kw):
"""Call the class instance."""
if isinstance(args[0], Callable):
self.add_view("/{}/".format(args[0].__name__), args[0])
return args[0]
def ddecorator(candidate):
from django.forms import forms # the following is unsafe
if isinstance(candidate, forms.DeclarativeFieldsMetaclass):
self.add_form(args[0], candidate, *args[1:], **kw)
return candidate
self.add_view(args[0], candidate, *args[1:], **kw)
return candidate
return ddecorator
##############################################################################
class D(object):
"""D Main Class."""
def __init__(self):
"""Init class."""
self.blueprint_list = []
@property
def urlpatterns(self):
"""Return the regex patterns."""
return self.get_urlpatterns()
def _is_management_command(self, cmd):
"""Take a string argument and return boolean of its a command."""
return cmd in ("runserver", "shell")
def _handle_management_command(self, cmd, *args, **kw):
"""Take command and arguments and call them using Django."""
if not hasattr(self, "_configured"):
self._configure_django(DEBUG=True)
from django.core import management
management.call_command(cmd, *args, **kw)
def update_regexers(self, regexers):
"""Update regular expressions."""
self.regexers.update(regexers)
def update_urls(self, urls):
"""Update regular urls."""
urlpatterns = self.get_urlpatterns()
urlpatterns += urls
def get_urlpatterns(self):
"""Return url patterns."""
urlconf_module = importlib.import_module(settings.ROOT_URLCONF)
return urlconf_module.urlpatterns
def _import_django(self):
"""Do the Django imports."""
from smarturls import surl # issue #19. manual imports
self.surl = surl
from django.http import HttpResponse, Http404, HttpResponseRedirect
self.HttpResponse = HttpResponse
self.Http404, self.HttpResponseRedirect = Http404, HttpResponseRedirect
from django.shortcuts import (get_object_or_404, get_list_or_404,
render_to_response, render, redirect)
self.get_object_or_404 = get_object_or_404
self.get_list_or_404 = get_list_or_404
self.render_to_response = render_to_response
self.render, self.redirect = render, redirect
from django.template import RequestContext
self.RequestContext = RequestContext
from django import forms
self.forms = forms
from fhurl import RequestForm, fhurl, JSONResponse
self.RequestForm = RequestForm
self.fhurl = fhurl
self.JSONResponse = JSONResponse
try:
from django.core.wsgi import get_wsgi_application
self.wsgi_application = get_wsgi_application()
except ImportError:
import django.core.handlers.wsgi
self.wsgi_application = django.core.handlers.wsgi.WSGIHandler()
try:
# https://devcenter.heroku.com/articles/django-assets
from whitenoise.django import DjangoWhiteNoise
self.wsgi_application = DjangoWhiteNoise(self.wsgi_application)
except ImportError:
pass
try:
from django.conf.urls.defaults import patterns, url
except ImportError:
from django.conf.urls import patterns, url # lint:ok
self.patterns, self.url = patterns, url
def _get_app_dir(self, pth):
"""Return the path of the app."""
return os.path.join(self.APP_DIR, pth)
def dotslash(self, pth):
"""Mimic the unix './' behaviour."""
if hasattr(self, "APP_DIR"):
return self._get_app_dir(pth=pth)
else:
try:
import speaklater
except ImportError:
raise RuntimeError("Configure django, or install speaklater.")
else:
return speaklater.make_lazy_string(self._get_app_dir, pth)
def generate_mount_url(self, regex, v_or_f, mod):
"""The self.mounts can be None, which means no url generation.
url is being managed by urlpatterns.
else self.mounts is a dict, containing app name and where to mount
if where it mount is None then again don't mount this fellow.
"""
if getattr(self, "mounts", None) is None:
return # we don't want to mount anything
if not regex.startswith("/"):
return regex
if not mod:
if isinstance(v_or_f, basestring):
mod = v_or_f
else: # if hasattr(v_or_f, "__module__")?
mod = v_or_f.__module__
best_k, best_v = "", None
for k, v in tuple(self.mounts.items()):
if mod.startswith(k) and len(k) > len(best_k):
best_k, best_v = k, v
if best_k:
if not best_v:
return
if not best_v.endswith("/"):
best_k += "/"
if best_v != "/":
regex = best_v[:-1] + regex
return regex
def add_view(self, regex, view, app=None, *args, **kw):
"""Take a view and add it to the app using regex arguments."""
regex = self.generate_mount_url(regex, view, app)
if regex:
patterns = self.patterns("", self.surl(regex, view, *args, **kw))
urlpatterns = self.get_urlpatterns()
urlpatterns += patterns
django.core.urlresolvers.clear_url_caches()
def add_form(self, regex, form_cls, app=None, *args, **kw):
"""Take a form and add it to the app using regex arguments."""
regex = self.generate_mount_url(regex, form_cls, app)
if regex:
urlpatterns = self.get_urlpatterns()
urlpatterns.append(self.fhurl(regex, form_cls, *args, **kw))
django.core.urlresolvers.clear_url_caches()
def get_secret_key(self):
"""Get a django secret key,try to read provided one,or generate it."""
try:
with open(self.dotslash("secret.txt"), "r") as f:
secret = f.readlines()[0].strip()
except (IOError, IndexError):
with open(self.dotslash("secret.txt"), "w") as f:
from string import ascii_letters, digits
from random import sample
secret = "".join(sample(ascii_letters + digits, 50))
f.write(secret)
finally:
return secret
def _fix_coffin_pre(self):
try:
from django.template import add_to_builtins
except ImportError:
from django.template.base import (
add_to_builtins, import_library, Origin,
InvalidTemplateLibrary, builtins, get_library)
import django.template
django.template.add_to_builtins = add_to_builtins
django.template.import_library = import_library
django.template.Origin = Origin
django.template.InvalidTemplateLibrary = InvalidTemplateLibrary
django.template.builtins = builtins
django.template.get_library = get_library
def _fix_coffin_post(self):
try:
from django.template.loaders.app_directories import (
app_template_dirs)
except ImportError:
from django.template.utils import get_app_template_dirs
import django.template.loaders.app_directories
django.template.loaders.app_directories.app_template_dirs = (
get_app_template_dirs('templates')
)
else:
app_template_dirs = app_template_dirs
def _configure_django(self, **kw):
"""Auto-Configure Django using arguments."""
from django.conf import settings, global_settings
self.settings = settings
if settings.configured:
return
self.APP_DIR, app_filename = os.path.split(
os.path.realpath(inspect.stack()[2][1])
)
if "regexers" in kw:
self.update_regexers(kw.pop("regexers"))
self.mounts = kw.pop("mounts", {})
if not kw.get("dont_configure", False):
kw["ROOT_URLCONF"] = "importd.urlconf"
if "TEMPLATE_DIRS" not in kw:
kw["TEMPLATE_DIRS"] = (self.dotslash("templates"), )
if "STATIC_URL" not in kw:
kw["STATIC_URL"] = "/static/"
if "STATIC_ROOT" not in kw:
kw["STATIC_ROOT"] = self.dotslash("staticfiles")
if "STATICFILES_DIRS" not in kw:
kw["STATICFILES_DIRS"] = [self.dotslash("static")]
if "MEDIA_URL" not in kw:
kw["MEDIA_URL"] = "/static/media/"
if "lr" in kw:
self.lr = kw.pop("lr")
if "db" in kw:
if isinstance(kw["db"], basestring):
kw["DATABASES"] = {
"default": dj_database_url.parse(kw.pop("db"))
}
else:
db = kw.pop("db")
default = dj_database_url.parse(db[0])
default.update(db[1])
kw["DATABASES"] = dict(default=default)
if "DATABASES" not in kw:
kw["DATABASES"] = {
"default": {
'ENGINE': "django.db.backends.sqlite3",
'NAME': self.dotslash("db.sqlite")
}
}
self.smart_return = False
if kw.pop("SMART_RETURN", True):
self.smart_return = True
if "MIDDLEWARE_CLASSES" not in kw:
kw["MIDDLEWARE_CLASSES"] = (
global_settings.MIDDLEWARE_CLASSES
)
kw["MIDDLEWARE_CLASSES"] = list(kw["MIDDLEWARE_CLASSES"])
kw["MIDDLEWARE_CLASSES"].insert(
0, "importd.SmartReturnMiddleware"
)
installed = list(kw.setdefault("INSTALLED_APPS", []))
admin_url = kw.pop("admin", "^admin/")
if admin_url:
if "django.contrib.auth" not in installed:
installed.append("django.contrib.auth")
if "django.contrib.contenttypes" not in installed:
installed.append("django.contrib.contenttypes")
if "django.contrib.auth" not in installed:
installed.append("django.contrib.auth")
if "django.contrib.messages" not in installed:
installed.append("django.contrib.messages")
if "django.contrib.sessions" not in installed:
installed.append("django.contrib.sessions")
# check session middleware installed
# https://docs.djangoproject.com/en/1.7/topics/http/sessions/#enabling-sessions
last_position = len(kw["MIDDLEWARE_CLASSES"])
kw["MIDDLEWARE_CLASSES"] = list(kw["MIDDLEWARE_CLASSES"])
kw["MIDDLEWARE_CLASSES"].insert(
last_position,
"django.contrib.sessions.middleware.SessionMiddleware"
)
if "django.contrib.admin" not in installed:
installed.append("django.contrib.admin")
if "django.contrib.humanize" not in installed:
installed.append("django.contrib.humanize")
if "django.contrib.staticfiles" not in installed:
installed.append("django.contrib.staticfiles")
if "debug_toolbar" not in installed and debug_toolbar:
installed.append("debug_toolbar")
if 'INTERNAL_IPS' not in kw:
kw['INTERNAL_IPS'] = ('127.0.0.1', '0.0.0.0')
kw['MIDDLEWARE_CLASSES'].insert(
1,
'debug_toolbar.middleware.DebugToolbarMiddleware')
kw['DEBUG_TOOLBAR_PANELS'] = (
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.staticfiles.StaticFilesPanel',
'debug_toolbar.panels.templates.TemplatesPanel',
'debug_toolbar.panels.cache.CachePanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar.panels.redirects.RedirectsPanel',
)
# This one gives 500 if its Enabled without previous syncdb
# 'debug_toolbar.panels.request_vars.RequestVarsDebugPanel',
if django_extensions and werkzeug:
installed.append('django_extensions')
# django-jinja 1.0.4 support
if DJANGO_JINJA:
installed.append("django_jinja")
kw['TEMPLATE_LOADERS'] = list(kw.get('TEMPLATE_LOADERS', []))
kw['TEMPLATE_LOADERS'] += (
'django_jinja.loaders.AppLoader',
'django_jinja.loaders.FileSystemLoader',
)
# coffin 0.3.8 support
if COFFIN:
installed.append('coffin')
kw['TEMPLATE_LOADERS'] = list(kw.get('TEMPLATE_LOADERS', []))
kw['TEMPLATE_LOADERS'] += (
'coffin.contrib.loader.AppLoader',
'coffin.contrib.loader.FileSystemLoader',
)
kw['INSTALLED_APPS'] = installed
if "DEBUG" not in kw:
kw["DEBUG"] = kw["TEMPLATE_DEBUG"] = True
if "APP_DIR" not in kw:
kw["APP_DIR"] = self.APP_DIR
if "SECRET_KEY" not in kw:
kw["SECRET_KEY"] = self.get_secret_key()
# admins and managers
if "ADMINS" not in kw:
kw["ADMINS"] = kw["MANAGERS"] = ((getuser(), ""), )
autoimport = kw.pop("autoimport", True)
kw["SETTINGS_MODULE"] = kw.get("SETTINGS_MODULE", "importd")
# self._fix_coffin_pre()
settings.configure(**kw)
if hasattr(django, "setup"):
django.setup()
self._import_django()
# self._fix_coffin_post()
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
urlpatterns = self.get_urlpatterns()
urlpatterns += staticfiles_urlpatterns()
if autoimport:
# django depends on INSTALLED_APPS's model
for app in settings.INSTALLED_APPS:
self._import_app_module("{}.admin", app)
self._import_app_module("{}.models", app)
if admin_url:
from django.contrib import admin
try:
from django.conf.urls import include
except ImportError:
from django.conf.urls.defaults import include # lint:ok
admin.autodiscover()
self.add_view(admin_url, include(admin.site.urls))
if autoimport:
# import .views and .forms for each installed app
for app in settings.INSTALLED_APPS:
self._import_app_module("{}.forms", app)
self._import_app_module("{}.views", app)
self._import_app_module("{}.signals", app)
# import blueprints from config
self.blueprints = kw.pop("blueprints", {})
for namespace, meta in self.blueprints.items():
if isinstance(meta, basestring):
meta = {"blueprint": meta}
mod_path, bp_name = meta["blueprint"].rsplit(".", 1)
mod = importlib.import_module(mod_path)
bp = getattr(mod, bp_name)
self.register_blueprint(
bp, url_prefix=meta.get("url_prefix", namespace + "/"),
namespace=namespace, app_name=meta.get("app_name", ""))
self._configured = True
def _import_app_module(self, fmt, app):
"""Try to import an app module."""
try:
__import__(fmt.format(app)) # lint:ok
except ImportError:
pass
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback)
raise SystemExit(-1)
def __call__(self, *args, **kw):
"""Call instance class."""
if args:
if not hasattr(self, "_configured"):
self._configure_django(DEBUG=True)
if isinstance(args[0], dict) and len(args) == 2:
for bp in self.blueprint_list:
self.apply_blueprint(bp)
return self.wsgi_application(*args)
if self._is_management_command(args[0]):
self._handle_management_command(*args, **kw)
return self
if isinstance(args[0], list):
self.update_urls(args[0])
return self
if isinstance(args[0], Callable):
self.add_view("/{}/".format(args[0].__name__), args[0])
return args[0]
def ddecorator(candidate):
from django.forms import forms
# the following is unsafe
if isinstance(candidate, forms.DeclarativeFieldsMetaclass):
self.add_form(args[0], candidate, *args[1:], **kw)
return candidate
self.add_view(args[0], candidate, *args[1:], **kw)
return candidate
return ddecorator
else:
self._configure_django(**kw)
return self
def _act_as_manage(self, *args):
"""Mimic Djangos manage.py."""
if not hasattr(self, "_configured"):
self._configure_django(DEBUG=True)
from django.core import management
management.execute_from_command_line([sys.argv[0]] + list(args))
def register_blueprint(self, bp, url_prefix, namespace, app_name=''):
"""
Interface to register blueprint.
See django url namespace.
https://docs.djangoproject.com/en/1.7/topics/http/urls/#url-namespaces
"""
clone_bp = copy.deepcopy(bp)
clone_bp.url_prefix = url_prefix
clone_bp.namespace = namespace
clone_bp.app_name = app_name
self.blueprint_list.append(clone_bp)
def _apply_blueprint(self, bp):
"""Apply a Blueprint."""
try:
from django.conf.urls import include
except ImportError:
from django.conf.urls.defaults import include # lint:ok
url = self.surl(bp.url_prefix, include(bp.patterns,
namespace=bp.namespace,
app_name=bp.app_name))
urlpatterns = self.get_urlpatterns()
urlpatterns.append(url)
django.core.urlresolvers.clear_url_caches()
def main(self):
"""Wrapper for calling do."""
if len(sys.argv) == 1:
self.do(self._get_runserver_cmd())
else:
self.do()
def do(self, *args):
"""Run Django with ImportD."""
for bp in self.blueprint_list:
self._apply_blueprint(bp)
if not args:
args = sys.argv[1:]
if len(args) == 0:
return self._handle_management_command(
self._get_runserver_cmd(), "8000")
if 'livereload' in sys.argv:
if not hasattr(self, "lr"):
print("Livereload setting, lr not configured.")
return
from livereload import Server
server = Server(self)
for pat, cmd in self.lr.items():
parts = pat.split(",")
for part in parts:
server.watch(part, cmd)
server.serve(port=8000)
return
return self._act_as_manage(*args)
def _get_runserver_cmd(self):
"""Return a proper runserver command."""
return 'runserver_plus' if django_extensions else 'runserver'
application = d = D()
| bsd-3-clause | -6,445,214,912,893,763,000 | 35.786667 | 99 | 0.551488 | false |
scottwernervt/cloudstorage | tests/test_drivers_amazon.py | 1 | 8614 | from http import HTTPStatus
import pytest
import requests
from pathlib import Path
from cloudstorage.drivers.amazon import S3Driver
from cloudstorage.exceptions import (
CloudStorageError,
CredentialsError,
IsNotEmptyError,
NotFoundError,
)
from cloudstorage.helpers import file_checksum
from tests import settings
from tests.helpers import random_container_name, uri_validator
pytestmark = pytest.mark.skipif(
not bool(settings.AMAZON_KEY), reason="settings missing key and secret"
)
@pytest.fixture(scope="module")
def storage():
driver = S3Driver(
settings.AMAZON_KEY, settings.AMAZON_SECRET, settings.AMAZON_REGION
)
yield driver
for container in driver: # cleanup
if container.name.startswith(settings.CONTAINER_PREFIX):
for blob in container:
blob.delete()
container.delete()
def test_driver_validate_credentials():
driver = S3Driver(
settings.AMAZON_KEY, settings.AMAZON_SECRET, settings.AMAZON_REGION
)
assert driver.validate_credentials() is None
driver = S3Driver(settings.AMAZON_KEY, "invalid-secret", settings.AMAZON_REGION)
with pytest.raises(CredentialsError) as excinfo:
driver.validate_credentials()
assert excinfo.value
assert excinfo.value.message
# noinspection PyShadowingNames
def test_driver_create_container(storage):
container_name = random_container_name()
container = storage.create_container(container_name)
assert container_name in storage
assert container.name == container_name
# noinspection PyShadowingNames
def test_driver_create_container_invalid_name(storage):
# noinspection PyTypeChecker
with pytest.raises(CloudStorageError):
storage.create_container("?!<>container-name<>!?") # noqa: W605
# noinspection PyShadowingNames
def test_driver_get_container(storage, container):
container_existing = storage.get_container(container.name)
assert container_existing.name in storage
assert container_existing == container
# noinspection PyShadowingNames
def test_driver_get_container_invalid(storage):
container_name = random_container_name()
# noinspection PyTypeChecker
with pytest.raises(NotFoundError):
storage.get_container(container_name)
# noinspection PyShadowingNames
def test_container_delete(storage):
container_name = random_container_name()
container = storage.create_container(container_name)
container.delete()
assert container.name not in storage
def test_container_delete_not_empty(container, text_blob):
assert text_blob in container
# noinspection PyTypeChecker
with pytest.raises(IsNotEmptyError):
container.delete()
def test_container_enable_cdn(container):
assert not container.enable_cdn(), "S3 does not support enabling CDN."
def test_container_disable_cdn(container):
assert not container.disable_cdn(), "S3 does not support disabling CDN."
def test_container_cdn_url(container):
container.enable_cdn()
cdn_url = container.cdn_url
assert uri_validator(cdn_url)
assert container.name in cdn_url
def test_container_generate_upload_url(container, binary_stream):
form_post = container.generate_upload_url(
settings.BINARY_FORM_FILENAME, **settings.BINARY_OPTIONS
)
assert "url" in form_post and "fields" in form_post
assert uri_validator(form_post["url"])
url = form_post["url"]
fields = form_post["fields"]
multipart_form_data = {
"file": (settings.BINARY_FORM_FILENAME, binary_stream, "image/png"),
}
response = requests.post(url, data=fields, files=multipart_form_data)
assert response.status_code == HTTPStatus.NO_CONTENT, response.text
blob = container.get_blob(settings.BINARY_FORM_FILENAME)
assert blob.meta_data == settings.BINARY_OPTIONS["meta_data"]
assert blob.content_type == settings.BINARY_OPTIONS["content_type"]
assert blob.content_disposition == settings.BINARY_OPTIONS["content_disposition"]
assert blob.cache_control == settings.BINARY_OPTIONS["cache_control"]
def test_container_generate_upload_url_expiration(container, text_stream):
form_post = container.generate_upload_url(settings.TEXT_FORM_FILENAME, expires=-10)
assert "url" in form_post and "fields" in form_post
assert uri_validator(form_post["url"])
url = form_post["url"]
fields = form_post["fields"]
multipart_form_data = {"file": text_stream}
response = requests.post(url, data=fields, files=multipart_form_data)
assert response.status_code == HTTPStatus.FORBIDDEN, response.text
def test_container_get_blob(container, text_blob):
blob = container.get_blob(text_blob.name)
assert blob == text_blob
def test_container_get_blob_invalid(container):
blob_name = random_container_name()
# noinspection PyTypeChecker
with pytest.raises(NotFoundError):
container.get_blob(blob_name)
def test_blob_upload_path(container, text_filename):
blob = container.upload_blob(text_filename)
assert blob.name == settings.TEXT_FILENAME
assert blob.checksum == settings.TEXT_MD5_CHECKSUM
def test_blob_upload_pathlib_path(container, text_filename):
blob = container.upload_blob(Path(text_filename))
assert blob.name == settings.TEXT_FILENAME
assert blob.checksum == settings.TEXT_MD5_CHECKSUM
def test_blob_upload_stream(container, binary_stream):
blob = container.upload_blob(
filename=binary_stream,
blob_name=settings.BINARY_STREAM_FILENAME,
**settings.BINARY_OPTIONS,
)
assert blob.name == settings.BINARY_STREAM_FILENAME
assert blob.checksum == settings.BINARY_MD5_CHECKSUM
def test_blob_upload_options(container, binary_stream):
blob = container.upload_blob(
filename=binary_stream,
blob_name=settings.BINARY_STREAM_FILENAME,
**settings.BINARY_OPTIONS,
)
assert blob.name == settings.BINARY_STREAM_FILENAME
assert blob.checksum == settings.BINARY_MD5_CHECKSUM
assert blob.meta_data == settings.BINARY_OPTIONS["meta_data"]
assert blob.content_type == settings.BINARY_OPTIONS["content_type"]
assert blob.content_disposition == settings.BINARY_OPTIONS["content_disposition"]
assert blob.cache_control == settings.BINARY_OPTIONS["cache_control"]
def test_blob_delete(container, text_blob):
text_blob.delete()
assert text_blob not in container
def test_blob_download_path(binary_blob, temp_file):
binary_blob.download(temp_file)
hash_type = binary_blob.driver.hash_type
download_hash = file_checksum(temp_file, hash_type=hash_type)
assert download_hash.hexdigest() == settings.BINARY_MD5_CHECKSUM
def test_blob_download_pathlib_path(binary_blob, temp_file):
binary_blob.download(Path(temp_file))
hash_type = binary_blob.driver.hash_type
download_hash = file_checksum(temp_file, hash_type=hash_type)
assert download_hash.hexdigest() == settings.BINARY_MD5_CHECKSUM
def test_blob_download_stream(binary_blob, temp_file):
with open(temp_file, "wb") as download_file:
binary_blob.download(download_file)
hash_type = binary_blob.driver.hash_type
download_hash = file_checksum(temp_file, hash_type=hash_type)
assert download_hash.hexdigest() == settings.BINARY_MD5_CHECKSUM
def test_blob_cdn_url(container, binary_blob):
container.enable_cdn()
cdn_url = binary_blob.cdn_url
assert uri_validator(cdn_url)
assert binary_blob.container.name in cdn_url
assert binary_blob.name in cdn_url
def test_blob_generate_download_url(binary_blob, temp_file):
content_disposition = settings.BINARY_OPTIONS.get("content_disposition")
download_url = binary_blob.generate_download_url(
content_disposition=content_disposition
)
assert uri_validator(download_url)
response = requests.get(download_url)
assert response.status_code == HTTPStatus.OK, response.text
assert response.headers["content-disposition"] == content_disposition
with open(temp_file, "wb") as f:
for chunk in response.iter_content(chunk_size=128):
f.write(chunk)
hash_type = binary_blob.driver.hash_type
download_hash = file_checksum(temp_file, hash_type=hash_type)
assert download_hash.hexdigest() == settings.BINARY_MD5_CHECKSUM
def test_blob_generate_download_url_expiration(binary_blob):
download_url = binary_blob.generate_download_url(expires=-10)
assert uri_validator(download_url)
response = requests.get(download_url)
assert response.status_code == HTTPStatus.FORBIDDEN, response.text
| mit | 7,527,081,261,133,218,000 | 32.130769 | 87 | 0.72626 | false |
yteraoka/googleapps-directory-tools | calendar-acl.py | 1 | 8051 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import pprint
from apiclient.discovery import build
import httplib2
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
from oauth2client import tools
import argparse
import simplejson as json
from const import *
from utils import *
def show_resource(resource):
print "ruleId: %s" % resource['id']
print "role: %s" % resource['role']
print "scope: %s %s" % (resource['scope']['type'], resource['scope']['value'])
def show_resource_list(resources, verbose):
for resource in resources:
if verbose:
show_resource(resource)
print ""
else:
print "%s,%s,%s,%s" % (resource['id'],
resource['role'],
resource['scope']['type'],
resource['scope']['value'])
def main(argv):
parser = argparse.ArgumentParser(parents=[tools.argparser])
subparsers = parser.add_subparsers(help='sub command')
#-------------------------------------------------------------------------
# LIST / SEARCH
#-------------------------------------------------------------------------
parser_list = subparsers.add_parser('list', help='Returns the rules in the access control list for the calendar')
parser_list.add_argument('calendarId', help='calendear id')
parser_list.add_argument('--maxResults', type=int, help='Acceptable values are 1 to 250')
parser_list.add_argument('-v', '--verbose', action='store_true', help='show updated user data')
parser_list.add_argument('--json', action='store_true', help='output in JSON')
parser_list.add_argument('--jsonPretty', action='store_true', help='output in pretty JSON')
#-------------------------------------------------------------------------
# GET
#-------------------------------------------------------------------------
parser_get = subparsers.add_parser('get', help='Returns an access control rule')
parser_get.add_argument('calendarId', help='calendar id')
parser_get.add_argument('ruleId', help='rule id')
parser_get.add_argument('--json', action='store_true', help='output in JSON')
parser_get.add_argument('--jsonPretty', action='store_true', help='output in pretty JSON')
#-------------------------------------------------------------------------
# INSERT
#-------------------------------------------------------------------------
parser_insert = subparsers.add_parser('insert', help='Creates an access control rule')
parser_insert.add_argument('calendarId', help='calendar id')
parser_insert.add_argument('role', choices=['none', 'freeBusyReader', 'reader', 'writer', 'owner'])
parser_insert.add_argument('type', choices=['default', 'user', 'group', 'domain'])
parser_insert.add_argument('--value', help='email address or domain name')
parser_insert.add_argument('-v', '--verbose', action='store_true', help='show created user data')
parser_insert.add_argument('--json', action='store_true', help='output in JSON')
parser_insert.add_argument('--jsonPretty', action='store_true', help='output in pretty JSON')
#-------------------------------------------------------------------------
# PATCH
#-------------------------------------------------------------------------
parser_patch = subparsers.add_parser('patch', help='Updates an access control rule')
parser_patch.add_argument('calendarId', help='calendar id')
parser_patch.add_argument('ruleId', help='rule id')
parser_patch.add_argument('role', choices=['none', 'freeBusyReader', 'reader', 'writer', 'owner'])
parser_patch.add_argument('type', choices=['default', 'user', 'group', 'domain'])
parser_patch.add_argument('--value', help='email address or domain name')
parser_patch.add_argument('-v', '--verbose', action='store_true', help='show updated user data')
parser_patch.add_argument('--json', action='store_true', help='output in JSON')
parser_patch.add_argument('--jsonPretty', action='store_true', help='output in pretty JSON')
#-------------------------------------------------------------------------
# DELETE
#-------------------------------------------------------------------------
parser_delete = subparsers.add_parser('delete', help='Deletes an access control rule')
parser_delete.add_argument('calendarId', help='calendar id')
parser_delete.add_argument('ruleId', help='rule id')
args = parser.parse_args(argv[1:])
FLOW = flow_from_clientsecrets(CLIENT_SECRETS,
scope=SCOPES,
message=MISSING_CLIENT_SECRETS_MESSAGE)
storage = Storage(CREDENTIALS_PATH)
credentials = storage.get()
if credentials is None or credentials.invalid:
print 'invalid credentials'
# Save the credentials in storage to be used in subsequent runs.
credentials = tools.run_flow(FLOW, storage, args)
# Create an httplib2.Http object to handle our HTTP requests and authorize it
# with our good Credentials.
http = httplib2.Http()
http = credentials.authorize(http)
service = build('calendar', 'v3', http=http)
sv = service.acl()
command = argv[1]
if command == 'list':
acls = []
pageToken = None
params = {}
params['calendarId'] = args.calendarId
while True:
if args.maxResults:
params['maxResults'] = args.maxResults
if pageToken:
params['pageToken'] = pageToken
r = sv.list(**params).execute()
if r.has_key('items'):
if args.jsonPretty or args.json:
for acl in r['items']:
acls.append(acl)
else:
show_resource_list(r['items'], args.verbose)
if r.has_key('nextPageToken'):
pageToken = r['nextPageToken']
else:
break
if args.jsonPretty:
if len(acls) == 1:
print to_pretty_json(acls[0])
else:
print to_pretty_json(acls)
elif args.json:
if len(acls) == 1:
print to_json(acls[0])
else:
print to_json(acls)
elif command == 'get':
r = sv.get(calendarId=args.calendarId, ruleId=args.ruleId).execute()
if args.jsonPretty:
print to_pretty_json(r)
elif args.json:
print to_json(r)
else:
show_resource(r)
elif command == 'insert':
body = { 'role': args.role, 'scope': { 'type': args.type } }
if args.type != 'default':
if args.value is None:
print '--value is required'
sys.exit(1)
body['scope']['value'] = args.value
r = sv.insert(calendarId=args.calendarId, body=body).execute()
if args.verbose:
if args.jsonPretty:
print to_pretty_json(r)
elif args.json:
print to_json(r)
else:
show_resource(r)
elif command == 'patch':
body = { 'role': args.role, 'scope': { 'type': args.type } }
if args.type != 'default':
if args.value is None:
print '--value is required'
sys.exit(1)
body['scope']['value'] = args.value
r = sv.patch(calendarId=args.calendarId, ruleId=args.ruleId, body=body).execute()
if args.verbose:
if args.jsonPretty:
print to_pretty_json(r)
elif args.json:
print to_json(r)
else:
show_resource(r)
elif command == 'delete':
r = sv.delete(calendarId=args.calendarId, ruleId=args.ruleId).execute()
else:
print "unknown command '%s'" % command
return
if __name__ == '__main__':
main(sys.argv)
| apache-2.0 | -3,583,292,375,005,266,000 | 39.457286 | 117 | 0.532729 | false |
qitta/libhugin | hugin/analyze/stopwords/__init__.py | 2 | 1717 | #!/usr/bin/env python
# encoding: utf-8
"""
Overview
~~~~~~~~
Interface for loading stopwords from a set of 12 languages that
are packaged along libmunin.
The stopwords can be used to split text into important and unimportant words.
Additionally text language can be guessed through the ``guess_language`` module.
Reference
~~~~~~~~~
"""
import os
import pkgutil
__path__ = os.path.dirname(pkgutil.extend_path(__file__, __name__))
# Cache all already loaded stopwords, since loading them takes a tad longer.
STOPWORD_CACHE = {}
def parse_stopwords(handle):
"""Parse a file with stopwords in it into a list of stopwords.
:param handle: an readable file handle.
:returns: An iterator that will yield stopwords.
"""
for line in handle:
yield line.strip().lower()
def load_stopwords(language_code):
"""Load a stopwordlist from the data directory.
Returns a frozenset with all stopwords or an empty set if
the language_code was not recognized.
:param language_code: A ISO-639 Alpha2 language code
:returns: A frozenset of words.
"""
global STOPWORD_CACHE
if language_code in STOPWORD_CACHE:
return STOPWORD_CACHE[language_code]
relative_path = os.path.join(__path__, 'data', language_code)
try:
with open(relative_path, 'r') as handle:
stopwords = frozenset(parse_stopwords(handle))
STOPWORD_CACHE[language_code] = stopwords
return stopwords
except OSError:
return frozenset([])
if __name__ == '__main__':
import sys
import guess_language
if '--cli' in sys.argv:
code = guess_language.guess_language(sys.argv[2])
print(load_stopwords(code))
| gpl-3.0 | -7,410,330,641,471,257,000 | 23.884058 | 80 | 0.669773 | false |
cedricbonhomme/Stegano | bin/lsb.py | 1 | 4377 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Stegano - Stegano is a pure Python steganography module.
# Copyright (C) 2010-2021 Cédric Bonhomme - https://www.cedricbonhomme.org
#
# For more information : https://github.com/cedricbonhomme/Stegano
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
__author__ = "Cedric Bonhomme"
__version__ = "$Revision: 0.8 $"
__date__ = "$Date: 2016/08/04 $"
__revision__ = "$Date: 2019/06/01 $"
__license__ = "GPLv3"
import argparse
try:
from stegano import lsb
except:
print("Install Stegano: pipx install Stegano")
from stegano import tools
def main():
parser = argparse.ArgumentParser(prog='stegano-lsb')
subparsers = parser.add_subparsers(help='sub-command help', dest='command')
# Subparser: Hide
parser_hide = subparsers.add_parser('hide', help='hide help')
# Original image
parser_hide.add_argument("-i", "--input", dest="input_image_file",
required=True, help="Input image file.")
parser_hide.add_argument("-e", "--encoding", dest="encoding",
choices=tools.ENCODINGS.keys(), default='UTF-8',
help="Specify the encoding of the message to hide." +
" UTF-8 (default) or UTF-32LE.")
group_secret = parser_hide.add_mutually_exclusive_group(required=True)
# Non binary secret message to hide
group_secret.add_argument("-m", dest="secret_message",
help="Your secret message to hide (non binary).")
# Binary secret message to hide
group_secret.add_argument("-f", dest="secret_file",
help="Your secret to hide (Text or any binary file).")
# Image containing the secret
parser_hide.add_argument("-o", "--output", dest="output_image_file",
required=True, help="Output image containing the secret.")
# Shift the message to hide
parser_hide.add_argument("-s", "--shift", dest="shift", default=0,
help="Shift for the message to hide")
# Subparser: Reveal
parser_reveal = subparsers.add_parser('reveal', help='reveal help')
parser_reveal.add_argument("-i", "--input", dest="input_image_file",
required=True, help="Input image file.")
parser_reveal.add_argument("-e", "--encoding", dest="encoding",
choices=tools.ENCODINGS.keys(), default='UTF-8',
help="Specify the encoding of the message to reveal." +
" UTF-8 (default) or UTF-32LE.")
parser_reveal.add_argument("-o", dest="secret_binary",
help="Output for the binary secret (Text or any binary file).")
# Shift the message to reveal
parser_reveal.add_argument("-s", "--shift", dest="shift", default=0,
help="Shift for the reveal")
arguments = parser.parse_args()
if arguments.command == 'hide':
if arguments.secret_message != None:
secret = arguments.secret_message
elif arguments.secret_file != None:
secret = tools.binary2base64(arguments.secret_file)
img_encoded = lsb.hide(arguments.input_image_file, secret,
arguments.encoding, int(arguments.shift))
try:
img_encoded.save(arguments.output_image_file)
except Exception as e:
# If hide() returns an error (Too long message).
print(e)
elif arguments.command == 'reveal':
secret = lsb.reveal(arguments.input_image_file, arguments.encoding,
int(arguments.shift))
if arguments.secret_binary != None:
data = tools.base642binary(secret)
with open(arguments.secret_binary, "wb") as f:
f.write(data)
else:
print(secret)
| gpl-3.0 | 5,437,824,163,409,713,000 | 40.283019 | 83 | 0.626828 | false |
ClaudioGranatiero/ClyphX_CG | ClyphX/ClyphXControlSurfaceActions.py | 1 | 16560 | """
# Copyright (C) 2013-2015 Stray <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# For questions regarding this module contact
# Stray <[email protected]>
"""
# emacs-mode: -*- python-*-
# -*- coding: utf-8 -*-
import Live
from _Framework.ControlSurfaceComponent import ControlSurfaceComponent
from _Framework.ControlSurface import ControlSurface
from _Framework.SessionComponent import SessionComponent
from _Framework.MixerComponent import MixerComponent
from _Framework.DeviceComponent import DeviceComponent
from consts import *
if IS_LIVE_9:
from ClyphXPushActions import ClyphXPushActions
from ClyphXPXTActions import ClyphXPXTActions
from ClyphXMXTActions import ClyphXMXTActions
class ClyphXControlSurfaceActions(ControlSurfaceComponent):
__module__ = __name__
__doc__ = ' Actions related to control surfaces '
def __init__(self, parent):
ControlSurfaceComponent.__init__(self)
self._parent = parent
self._push_actions = None
if IS_LIVE_9:
self._push_actions = ClyphXPushActions(parent)
self._pxt_actions = ClyphXPXTActions(parent)
self._mxt_actions = ClyphXMXTActions(parent)
self._scripts = {}
def disconnect(self):
self._scripts = {}
self._parent = None
self._push_actions = None
self._pxt_actions = None
self._mxt_actions = None
if IS_LIVE_9:
ControlSurfaceComponent.disconnect(self)
def on_enabled_changed(self):
pass
def update(self):
pass
def connect_script_instances(self, instanciated_scripts):
""" Build dict of connected scripts and their components, doesn't work with non-Framework scripts, but does work with User Remote Scripts """
self._scripts = {}
for index in range (len(instanciated_scripts)):
script = instanciated_scripts[index]
self._scripts[index] = {'script' : script, 'name' : None, 'repeat' : False, 'mixer' : None, 'device' : None, 'last_ring_pos' : None,
'session' : None, 'track_link' : False, 'scene_link' : False, 'centered_link' : False, 'color' : False}
if isinstance (script, ControlSurface):
script_name = script.__class__.__name__
if script_name == 'GenericScript':
script_name = script._suggested_input_port
if script_name == 'Push' and IS_LIVE_9:
self._push_actions.set_script(script)
if script_name.startswith('PXT_Live') and IS_LIVE_9:
self._pxt_actions.set_script(script)
if script_name == 'MXT_Live' and IS_LIVE_9:
self._mxt_actions.set_script(script)
if not script_name.startswith('ClyphX'):
if (IS_LIVE_9 and script._components == None) or script.components == None:
return
else:
self._scripts[index]['name'] = script_name.upper()
for c in script.components:
if isinstance (c, SessionComponent):
self._scripts[index]['session'] = c
if script_name.startswith('APC'):
self._scripts[index]['color'] = {'GREEN' : (1, 2), 'RED' : (3, 4), 'AMBER' : (5, 6)}
self._scripts[index]['metro'] = {'controls' : c._stop_track_clip_buttons, 'component' : None, 'override' : None}
if script_name == 'Launchpad':
self._scripts[index]['color'] = {'GREEN' : (52, 56), 'RED' : (7, 11), 'AMBER' : (55, 59)}
self._scripts[index]['metro'] = {'controls' : script._selector._side_buttons, 'component' : None, 'override' : script._selector}
if isinstance (c, MixerComponent):
self._scripts[index]['mixer'] = c
if isinstance (c, DeviceComponent):
self._scripts[index]['device'] = c
def dispatch_push_action(self, track, xclip, ident, action, args):
""" Dispatch Push-related actions to PushActions. """
if self._push_actions:
self._push_actions.dispatch_action(track, xclip, ident, action, args)
def dispatch_pxt_action(self, track, xclip, ident, action, args):
""" Dispatch PXT-related actions to PXTActions. """
if self._pxt_actions:
self._pxt_actions.dispatch_action(track, xclip, ident, action, args)
def dispatch_mxt_action(self, track, xclip, ident, action, args):
""" Dispatch MXT-related actions to MXTActions. """
if self._mxt_actions:
self._mxt_actions.dispatch_action(track, xclip, ident, action, args)
def dispatch_cs_action(self, track, xclip, ident, action, args):
""" Dispatch appropriate control surface actions """
script = self._get_script_to_operate_on(action)
if script != None:
if 'METRO ' in args and self._scripts[script].has_key('metro'):
self.handle_visual_metro(self._scripts[script], args)
elif 'RINGLINK ' in args and self._scripts[script]['session']:
self.handle_ring_link(self._scripts[script]['session'], script, args[9:])
elif 'RING ' in args and self._scripts[script]['session']:
self.handle_session_offset(script, self._scripts[script]['session'], args[5:])
elif 'COLORS ' in args and self._scripts[script]['session'] and self._scripts[script]['color']:
self.handle_session_colors(self._scripts[script]['session'], self._scripts[script]['color'], args[7:])
elif 'DEV LOCK' in args and self._scripts[script]['device']:
self._scripts[script]['device'].canonical_parent.toggle_lock()
elif 'BANK ' in args and self._scripts[script]['mixer']:
self.handle_track_bank(xclip, ident, self._scripts[script]['mixer'], self._scripts[script]['session'], args[5:])
elif 'RPT' in args and IS_LIVE_9:
self.handle_note_repeat(self._scripts[script]['script'], script, args)
else:
if self._scripts[script]['mixer'] and '/' in args[:4]:
self.handle_track_action(self._scripts[script]['mixer'], xclip, ident, args)
def _get_script_to_operate_on(self, script_info):
""" Returns the script index to operate on, which can be specified in terms of its index
or its name. Also, can use SURFACE (legacy) or CS (new) to indicate a surface action. """
script = None
try:
script_spec = None
if 'SURFACE' in script_info:
script_spec = script_info.strip('SURFACE')
elif 'CS' in script_info:
script_spec = script_info.strip('CS')
if len(script_spec) == 1:
script = int(script_spec) - 1
if not self._scripts.has_key(script):
script = None
else:
script_spec = script_spec.strip('"').strip()
for k, v in self._scripts.items():
if v['name'] == script_spec:
script = k
except: script = None
return script
def handle_note_repeat(self, script, script_index, args):
""" Set note repeat for the given surface """
args = args.replace('RPT', '').strip()
if args in REPEAT_STATES:
if args == 'OFF':
script._c_instance.note_repeat.enabled = False
self._scripts[script_index]['repeat'] = False
else:
script._c_instance.note_repeat.repeat_rate = REPEAT_STATES[args]
script._c_instance.note_repeat.enabled = True
self._scripts[script_index]['repeat'] = True
else:
self._scripts[script_index]['repeat'] = not self._scripts[script_index]['repeat']
script._c_instance.note_repeat.enabled = self._scripts[script_index]['repeat']
def handle_track_action(self, mixer, xclip, ident, args):
""" Get control surface track(s) to operate on and call main action dispatch """
track_start = None
track_end = None
track_range = args.split('/')[0]
actions = str(args[args.index('/')+1:].strip()).split()
new_action = actions[0]
new_args = ''
if len(actions) > 1:
new_args = ' '.join(actions[1:])
if 'ALL' in track_range:
track_start = 0
track_end = len(mixer._channel_strips)
elif '-' in track_range:
track_range = track_range.split('-')
try:
track_start = int(track_range[0]) - 1
track_end = int(track_range[1])
except:
track_start = None
track_end = None
else:
try:
track_start = int(track_range) - 1
track_end = track_start + 1
except:
track_start = None
track_end = None
if track_start != None and track_end != None:
if track_start in range (len(mixer._channel_strips) + 1) and track_end in range (len(mixer._channel_strips) + 1) and track_start < track_end:
track_list = []
for index in range (track_start, track_end):
if index + mixer._track_offset in range (len(mixer.tracks_to_use())):
track_list.append(mixer.tracks_to_use()[index + mixer._track_offset])
if track_list:
self._parent.action_dispatch(track_list, xclip, new_action, new_args, ident)
def handle_track_bank(self, xclip, ident, mixer, session, args):
""" Move track bank (or session bank) and select first track in bank...this works even with controllers without banks like User Remote Scripts """
new_offset = None
if args == 'FIRST':
new_offset = 0
elif args == 'LAST':
new_offset = len(mixer.tracks_to_use()) - len(mixer._channel_strips)
else:
try:
offset = int(args)
if offset + mixer._track_offset in range (len(mixer.tracks_to_use())):
new_offset = offset + mixer._track_offset
except: new_offset = None
if new_offset >= 0:
if session:
session.set_offsets(new_offset, session._scene_offset)
else:
mixer.set_track_offset(new_offset)
self.handle_track_action(mixer, xclip, ident, '1/SEL')
def handle_session_offset(self, script_key, session, args):
""" Handle moving session offset absolutely or relatively as well as storing/recalling its last position. """
try:
new_track = session._track_offset
new_scene = session._scene_offset
if args.strip() == 'LAST':
last_pos = self._scripts[script_key]['last_ring_pos']
if last_pos:
session.set_offsets(last_pos[0], last_pos[1])
return
else:
self._scripts[script_key]['last_ring_pos'] = (new_track, new_scene)
new_track, args = self._parse_ring_spec('T', args, new_track, self.song().tracks)
new_scene, args = self._parse_ring_spec('S', args, new_scene, self.song().scenes)
if new_track == -1 or new_scene == -1:
return
session.set_offsets(new_track, new_scene)
except: pass
def _parse_ring_spec(self, spec_id, arg_string, default_index, list_to_search):
""" Parses a ring action specification and returns the specified track/scene index
as well as the arg_string without the specification that was parsed. """
index = default_index
arg_array = arg_string.split()
for a in arg_array:
if a.startswith(spec_id):
if a[1].isdigit():
index = int(a.strip(spec_id)) - 1
arg_string = arg_string.replace(a, '', 1).strip()
break
elif a[1] in ('<', '>'):
index += self._parent.get_adjustment_factor(a.strip(spec_id))
arg_string = arg_string.replace(a, '', 1).strip()
break
elif a[1] == '"':
name_start_pos = arg_string.index(spec_id + '"')
name = arg_string[name_start_pos + 2:]
name_end_pos = name.index('"')
name = name[:name_end_pos]
for i, item in enumerate(list_to_search):
if name == item.name.upper():
index = i
break
arg_string = arg_string.replace(spec_id + '"' + name + '"', '', 1).strip()
break
return (index, arg_string)
def handle_ring_link(self, session, script_index, args):
""" Handles linking/unliking session offsets to the selected track or scene with centering if specified. """
self._scripts[script_index]['track_link'] = args == 'T' or 'T ' in args or ' T' in args
self._scripts[script_index]['scene_link'] = 'S' in args
self._scripts[script_index]['centered_link'] = 'CENTER' in args
def handle_session_colors(self, session, colors, args):
""" Handle changing clip launch LED colors """
args = args.split()
if len(args) == 3:
for a in args:
if not a in colors:
return
for scene_index in range(session.height()):
scene = session.scene(scene_index)
for track_index in range(session.width()):
clip_slot = scene.clip_slot(track_index)
clip_slot.set_started_value(colors[args[0]][0])
clip_slot.set_triggered_to_play_value(colors[args[0]][1])
clip_slot.set_recording_value(colors[args[1]][0])
clip_slot.set_triggered_to_record_value(colors[args[1]][1])
clip_slot.set_stopped_value(colors[args[2]][0])
clip_slot.update()
def handle_visual_metro(self, script, args):
""" Handle visual metro for APCs and Launchpad. """
if 'ON' in args and not script['metro']['component']:
m = VisualMetro(self._parent, script['metro']['controls'], script['metro']['override'])
script['metro']['component'] = m
elif 'OFF' in args and script['metro']['component']:
script['metro']['component'].disconnect()
script['metro']['component'] = None
def on_selected_track_changed(self):
""" Moves the track offset of all track linked surfaces to the selected track with centering if specified. """
trk = self.song().view.selected_track
if trk in self.song().tracks:
trk_id = list(self.song().visible_tracks).index(trk)
for k, v in self._scripts.items():
if v['track_link']:
new_trk_id = trk_id
try:
session = self._scripts[k]['session']
if self._scripts[k]['centered_link']:
mid_point = (session.width() / 2)
if new_trk_id < mid_point:
if session._track_offset <= new_trk_id:
return
else:
new_trk_id = 0
else:
centered_id = new_trk_id - mid_point
if centered_id in range(len(self.song().visible_tracks)):
new_trk_id = centered_id
session.set_offsets(new_trk_id, session._scene_offset)
except: pass
def on_selected_scene_changed(self):
""" Moves the scene offset of all scene linked surfaces to the selected scene with centering if specified. """
scn_id = list(self.song().scenes).index(self.song().view.selected_scene)
for k, v in self._scripts.items():
if v['scene_link']:
new_scn_id = scn_id
try:
session = self._scripts[k]['session']
if self._scripts[k]['centered_link']:
mid_point = (session.height() / 2)
if new_scn_id < mid_point:
if session._scene_offset <= new_scn_id:
return
else:
new_scn_id = 0
else:
centered_id = new_scn_id - mid_point
if centered_id in range(len(self.song().scenes)):
new_scn_id = centered_id
session.set_offsets(session._track_offset, new_scn_id)
except: pass
class VisualMetro(ControlSurfaceComponent):
__module__ = __name__
__doc__ = ' Visual metro for APCs and Launchpad '
def __init__(self, parent, controls, override):
ControlSurfaceComponent.__init__(self)
self._parent = parent
self._controls = controls
self._override = override
self._last_beat = -1
self.song().add_current_song_time_listener(self.on_time_changed)
self.song().add_is_playing_listener(self.on_time_changed)
def disconnect(self):
if self._controls:
self.clear()
self._controls = None
self.song().remove_current_song_time_listener(self.on_time_changed)
self.song().remove_is_playing_listener(self.on_time_changed)
self._override = None
self._parent = None
if IS_LIVE_9:
ControlSurfaceComponent.disconnect(self)
def on_enabled_changed(self):
pass
def update(self):
pass
def on_time_changed(self):
""" Show visual metronome via control LEDs upon beat changes (will not be shown if in Launchpad User 1) """
if self.song().is_playing and (not self._override or (self._override and self._override._mode_index != 1)):
time = str(self.song().get_current_beats_song_time()).split('.')
if self._last_beat != int(time[1])-1:
self._last_beat = int(time[1])-1
self.clear()
if self._last_beat < len(self._controls):
self._controls[self._last_beat].turn_on()
else:
self._controls[len(self._controls)-1].turn_on()
else:
self.clear()
def clear(self):
""" Clear all control LEDs """
for c in self._controls:
c.turn_off()
# local variables:
# tab-width: 4 | gpl-2.0 | 5,056,214,246,186,629,000 | 37.071264 | 147 | 0.653502 | false |
nagaozen/my-os-customizations | home/nagaozen/gedit-2.30.4/plugins/externaltools/tools/filelookup.py | 2 | 4761 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2009-2010 Per Arneng <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import os
import gio
import gedit
class FileLookup:
"""
This class is responsible for looking up files given a part or the whole
path of a real file. The lookup is delegated to providers wich use different
methods of trying to find the real file.
"""
def __init__(self):
self.providers = []
self.providers.append(AbsoluteFileLookupProvider())
self.providers.append(CwdFileLookupProvider())
self.providers.append(OpenDocumentRelPathFileLookupProvider())
self.providers.append(OpenDocumentFileLookupProvider())
def lookup(self, path):
"""
Tries to find a file specified by the path parameter. It delegates to
different lookup providers and the first match is returned. If no file
was found then None is returned.
path -- the path to find
"""
found_file = None
for provider in self.providers:
found_file = provider.lookup(path)
if found_file is not None:
break
return found_file
class FileLookupProvider:
"""
The base class of all file lookup providers.
"""
def lookup(self, path):
"""
This method must be implemented by subclasses. Implementors will be
given a path and will try to find a matching file. If no file is found
then None is returned.
"""
raise NotImplementedError("need to implement a lookup method")
class AbsoluteFileLookupProvider(FileLookupProvider):
"""
This file tries to see if the path given is an absolute path and that the
path references a file.
"""
def lookup(self, path):
if os.path.isabs(path) and os.path.isfile(path):
return gio.File(path)
else:
return None
class CwdFileLookupProvider(FileLookupProvider):
"""
This lookup provider tries to find a file specified by the path relative to
the current working directory.
"""
def lookup(self, path):
try:
cwd = os.getcwd()
except OSError:
cwd = os.getenv('HOME')
real_path = os.path.join(cwd, path)
if os.path.isfile(real_path):
return gio.File(real_path)
else:
return None
class OpenDocumentRelPathFileLookupProvider(FileLookupProvider):
"""
Tries to see if the path is relative to any directories where the
currently open documents reside in. Example: If you have a document opened
'/tmp/Makefile' and a lookup is made for 'src/test2.c' then this class
will try to find '/tmp/src/test2.c'.
"""
def lookup(self, path):
if path.startswith('/'):
return None
for doc in gedit.app_get_default().get_documents():
if doc.is_local():
location = doc.get_location()
if location:
rel_path = location.get_parent().get_path()
joined_path = os.path.join(rel_path, path)
if os.path.isfile(joined_path):
return gio.File(joined_path)
return None
class OpenDocumentFileLookupProvider(FileLookupProvider):
"""
Makes a guess that the if the path that was looked for matches the end
of the path of a currently open document then that document is the one
that is looked for. Example: If a document is opened called '/tmp/t.c'
and a lookup is made for 't.c' or 'tmp/t.c' then both will match since
the open document ends with the path that is searched for.
"""
def lookup(self, path):
if path.startswith('/'):
return None
for doc in gedit.app_get_default().get_documents():
if doc.is_local():
location = doc.get_location()
if location and location.get_uri().endswith(path):
return location
return None
# ex:ts=4:et:
| gpl-3.0 | 157,991,573,610,847,140 | 31.834483 | 80 | 0.634741 | false |
GenericStudent/home-assistant | tests/util/test_yaml.py | 5 | 16219 | """Test Home Assistant yaml loader."""
import io
import logging
import os
import unittest
import pytest
from homeassistant.config import YAML_CONFIG_FILE, load_yaml_config_file
from homeassistant.exceptions import HomeAssistantError
import homeassistant.util.yaml as yaml
from homeassistant.util.yaml import loader as yaml_loader
from tests.async_mock import patch
from tests.common import get_test_config_dir, patch_yaml_files
@pytest.fixture(autouse=True)
def mock_credstash():
"""Mock credstash so it doesn't connect to the internet."""
with patch.object(yaml_loader, "credstash") as mock_credstash:
mock_credstash.getSecret.return_value = None
yield mock_credstash
def test_simple_list():
"""Test simple list."""
conf = "config:\n - simple\n - list"
with io.StringIO(conf) as file:
doc = yaml_loader.yaml.safe_load(file)
assert doc["config"] == ["simple", "list"]
def test_simple_dict():
"""Test simple dict."""
conf = "key: value"
with io.StringIO(conf) as file:
doc = yaml_loader.yaml.safe_load(file)
assert doc["key"] == "value"
def test_unhashable_key():
"""Test an unhashable key."""
files = {YAML_CONFIG_FILE: "message:\n {{ states.state }}"}
with pytest.raises(HomeAssistantError), patch_yaml_files(files):
load_yaml_config_file(YAML_CONFIG_FILE)
def test_no_key():
"""Test item without a key."""
files = {YAML_CONFIG_FILE: "a: a\nnokeyhere"}
with pytest.raises(HomeAssistantError), patch_yaml_files(files):
yaml.load_yaml(YAML_CONFIG_FILE)
def test_environment_variable():
"""Test config file with environment variable."""
os.environ["PASSWORD"] = "secret_password"
conf = "password: !env_var PASSWORD"
with io.StringIO(conf) as file:
doc = yaml_loader.yaml.safe_load(file)
assert doc["password"] == "secret_password"
del os.environ["PASSWORD"]
def test_environment_variable_default():
"""Test config file with default value for environment variable."""
conf = "password: !env_var PASSWORD secret_password"
with io.StringIO(conf) as file:
doc = yaml_loader.yaml.safe_load(file)
assert doc["password"] == "secret_password"
def test_invalid_environment_variable():
"""Test config file with no environment variable sat."""
conf = "password: !env_var PASSWORD"
with pytest.raises(HomeAssistantError):
with io.StringIO(conf) as file:
yaml_loader.yaml.safe_load(file)
def test_include_yaml():
"""Test include yaml."""
with patch_yaml_files({"test.yaml": "value"}):
conf = "key: !include test.yaml"
with io.StringIO(conf) as file:
doc = yaml_loader.yaml.safe_load(file)
assert doc["key"] == "value"
with patch_yaml_files({"test.yaml": None}):
conf = "key: !include test.yaml"
with io.StringIO(conf) as file:
doc = yaml_loader.yaml.safe_load(file)
assert doc["key"] == {}
@patch("homeassistant.util.yaml.loader.os.walk")
def test_include_dir_list(mock_walk):
"""Test include dir list yaml."""
mock_walk.return_value = [["/test", [], ["two.yaml", "one.yaml"]]]
with patch_yaml_files({"/test/one.yaml": "one", "/test/two.yaml": "two"}):
conf = "key: !include_dir_list /test"
with io.StringIO(conf) as file:
doc = yaml_loader.yaml.safe_load(file)
assert doc["key"] == sorted(["one", "two"])
@patch("homeassistant.util.yaml.loader.os.walk")
def test_include_dir_list_recursive(mock_walk):
"""Test include dir recursive list yaml."""
mock_walk.return_value = [
["/test", ["tmp2", ".ignore", "ignore"], ["zero.yaml"]],
["/test/tmp2", [], ["one.yaml", "two.yaml"]],
["/test/ignore", [], [".ignore.yaml"]],
]
with patch_yaml_files(
{
"/test/zero.yaml": "zero",
"/test/tmp2/one.yaml": "one",
"/test/tmp2/two.yaml": "two",
}
):
conf = "key: !include_dir_list /test"
with io.StringIO(conf) as file:
assert (
".ignore" in mock_walk.return_value[0][1]
), "Expecting .ignore in here"
doc = yaml_loader.yaml.safe_load(file)
assert "tmp2" in mock_walk.return_value[0][1]
assert ".ignore" not in mock_walk.return_value[0][1]
assert sorted(doc["key"]) == sorted(["zero", "one", "two"])
@patch("homeassistant.util.yaml.loader.os.walk")
def test_include_dir_named(mock_walk):
"""Test include dir named yaml."""
mock_walk.return_value = [
["/test", [], ["first.yaml", "second.yaml", "secrets.yaml"]]
]
with patch_yaml_files({"/test/first.yaml": "one", "/test/second.yaml": "two"}):
conf = "key: !include_dir_named /test"
correct = {"first": "one", "second": "two"}
with io.StringIO(conf) as file:
doc = yaml_loader.yaml.safe_load(file)
assert doc["key"] == correct
@patch("homeassistant.util.yaml.loader.os.walk")
def test_include_dir_named_recursive(mock_walk):
"""Test include dir named yaml."""
mock_walk.return_value = [
["/test", ["tmp2", ".ignore", "ignore"], ["first.yaml"]],
["/test/tmp2", [], ["second.yaml", "third.yaml"]],
["/test/ignore", [], [".ignore.yaml"]],
]
with patch_yaml_files(
{
"/test/first.yaml": "one",
"/test/tmp2/second.yaml": "two",
"/test/tmp2/third.yaml": "three",
}
):
conf = "key: !include_dir_named /test"
correct = {"first": "one", "second": "two", "third": "three"}
with io.StringIO(conf) as file:
assert (
".ignore" in mock_walk.return_value[0][1]
), "Expecting .ignore in here"
doc = yaml_loader.yaml.safe_load(file)
assert "tmp2" in mock_walk.return_value[0][1]
assert ".ignore" not in mock_walk.return_value[0][1]
assert doc["key"] == correct
@patch("homeassistant.util.yaml.loader.os.walk")
def test_include_dir_merge_list(mock_walk):
"""Test include dir merge list yaml."""
mock_walk.return_value = [["/test", [], ["first.yaml", "second.yaml"]]]
with patch_yaml_files(
{"/test/first.yaml": "- one", "/test/second.yaml": "- two\n- three"}
):
conf = "key: !include_dir_merge_list /test"
with io.StringIO(conf) as file:
doc = yaml_loader.yaml.safe_load(file)
assert sorted(doc["key"]) == sorted(["one", "two", "three"])
@patch("homeassistant.util.yaml.loader.os.walk")
def test_include_dir_merge_list_recursive(mock_walk):
"""Test include dir merge list yaml."""
mock_walk.return_value = [
["/test", ["tmp2", ".ignore", "ignore"], ["first.yaml"]],
["/test/tmp2", [], ["second.yaml", "third.yaml"]],
["/test/ignore", [], [".ignore.yaml"]],
]
with patch_yaml_files(
{
"/test/first.yaml": "- one",
"/test/tmp2/second.yaml": "- two",
"/test/tmp2/third.yaml": "- three\n- four",
}
):
conf = "key: !include_dir_merge_list /test"
with io.StringIO(conf) as file:
assert (
".ignore" in mock_walk.return_value[0][1]
), "Expecting .ignore in here"
doc = yaml_loader.yaml.safe_load(file)
assert "tmp2" in mock_walk.return_value[0][1]
assert ".ignore" not in mock_walk.return_value[0][1]
assert sorted(doc["key"]) == sorted(["one", "two", "three", "four"])
@patch("homeassistant.util.yaml.loader.os.walk")
def test_include_dir_merge_named(mock_walk):
"""Test include dir merge named yaml."""
mock_walk.return_value = [["/test", [], ["first.yaml", "second.yaml"]]]
files = {
"/test/first.yaml": "key1: one",
"/test/second.yaml": "key2: two\nkey3: three",
}
with patch_yaml_files(files):
conf = "key: !include_dir_merge_named /test"
with io.StringIO(conf) as file:
doc = yaml_loader.yaml.safe_load(file)
assert doc["key"] == {"key1": "one", "key2": "two", "key3": "three"}
@patch("homeassistant.util.yaml.loader.os.walk")
def test_include_dir_merge_named_recursive(mock_walk):
"""Test include dir merge named yaml."""
mock_walk.return_value = [
["/test", ["tmp2", ".ignore", "ignore"], ["first.yaml"]],
["/test/tmp2", [], ["second.yaml", "third.yaml"]],
["/test/ignore", [], [".ignore.yaml"]],
]
with patch_yaml_files(
{
"/test/first.yaml": "key1: one",
"/test/tmp2/second.yaml": "key2: two",
"/test/tmp2/third.yaml": "key3: three\nkey4: four",
}
):
conf = "key: !include_dir_merge_named /test"
with io.StringIO(conf) as file:
assert (
".ignore" in mock_walk.return_value[0][1]
), "Expecting .ignore in here"
doc = yaml_loader.yaml.safe_load(file)
assert "tmp2" in mock_walk.return_value[0][1]
assert ".ignore" not in mock_walk.return_value[0][1]
assert doc["key"] == {
"key1": "one",
"key2": "two",
"key3": "three",
"key4": "four",
}
@patch("homeassistant.util.yaml.loader.open", create=True)
def test_load_yaml_encoding_error(mock_open):
"""Test raising a UnicodeDecodeError."""
mock_open.side_effect = UnicodeDecodeError("", b"", 1, 0, "")
with pytest.raises(HomeAssistantError):
yaml_loader.load_yaml("test")
def test_dump():
"""The that the dump method returns empty None values."""
assert yaml.dump({"a": None, "b": "b"}) == "a:\nb: b\n"
def test_dump_unicode():
"""The that the dump method returns empty None values."""
assert yaml.dump({"a": None, "b": "привет"}) == "a:\nb: привет\n"
FILES = {}
def load_yaml(fname, string):
"""Write a string to file and return the parsed yaml."""
FILES[fname] = string
with patch_yaml_files(FILES):
return load_yaml_config_file(fname)
class FakeKeyring:
"""Fake a keyring class."""
def __init__(self, secrets_dict):
"""Store keyring dictionary."""
self._secrets = secrets_dict
# pylint: disable=protected-access
def get_password(self, domain, name):
"""Retrieve password."""
assert domain == yaml._SECRET_NAMESPACE
return self._secrets.get(name)
class TestSecrets(unittest.TestCase):
"""Test the secrets parameter in the yaml utility."""
# pylint: disable=protected-access,invalid-name
def setUp(self):
"""Create & load secrets file."""
config_dir = get_test_config_dir()
yaml.clear_secret_cache()
self._yaml_path = os.path.join(config_dir, YAML_CONFIG_FILE)
self._secret_path = os.path.join(config_dir, yaml.SECRET_YAML)
self._sub_folder_path = os.path.join(config_dir, "subFolder")
self._unrelated_path = os.path.join(config_dir, "unrelated")
load_yaml(
self._secret_path,
"http_pw: pwhttp\n"
"comp1_un: un1\n"
"comp1_pw: pw1\n"
"stale_pw: not_used\n"
"logger: debug\n",
)
self._yaml = load_yaml(
self._yaml_path,
"http:\n"
" api_password: !secret http_pw\n"
"component:\n"
" username: !secret comp1_un\n"
" password: !secret comp1_pw\n"
"",
)
def tearDown(self):
"""Clean up secrets."""
yaml.clear_secret_cache()
FILES.clear()
def test_secrets_from_yaml(self):
"""Did secrets load ok."""
expected = {"api_password": "pwhttp"}
assert expected == self._yaml["http"]
expected = {"username": "un1", "password": "pw1"}
assert expected == self._yaml["component"]
def test_secrets_from_parent_folder(self):
"""Test loading secrets from parent folder."""
expected = {"api_password": "pwhttp"}
self._yaml = load_yaml(
os.path.join(self._sub_folder_path, "sub.yaml"),
"http:\n"
" api_password: !secret http_pw\n"
"component:\n"
" username: !secret comp1_un\n"
" password: !secret comp1_pw\n"
"",
)
assert expected == self._yaml["http"]
def test_secret_overrides_parent(self):
"""Test loading current directory secret overrides the parent."""
expected = {"api_password": "override"}
load_yaml(
os.path.join(self._sub_folder_path, yaml.SECRET_YAML), "http_pw: override"
)
self._yaml = load_yaml(
os.path.join(self._sub_folder_path, "sub.yaml"),
"http:\n"
" api_password: !secret http_pw\n"
"component:\n"
" username: !secret comp1_un\n"
" password: !secret comp1_pw\n"
"",
)
assert expected == self._yaml["http"]
def test_secrets_from_unrelated_fails(self):
"""Test loading secrets from unrelated folder fails."""
load_yaml(os.path.join(self._unrelated_path, yaml.SECRET_YAML), "test: failure")
with pytest.raises(HomeAssistantError):
load_yaml(
os.path.join(self._sub_folder_path, "sub.yaml"),
"http:\n api_password: !secret test",
)
def test_secrets_keyring(self):
"""Test keyring fallback & get_password."""
yaml_loader.keyring = None # Ensure its not there
yaml_str = "http:\n api_password: !secret http_pw_keyring"
with pytest.raises(HomeAssistantError):
load_yaml(self._yaml_path, yaml_str)
yaml_loader.keyring = FakeKeyring({"http_pw_keyring": "yeah"})
_yaml = load_yaml(self._yaml_path, yaml_str)
assert {"http": {"api_password": "yeah"}} == _yaml
@patch.object(yaml_loader, "credstash")
def test_secrets_credstash(self, mock_credstash):
"""Test credstash fallback & get_password."""
mock_credstash.getSecret.return_value = "yeah"
yaml_str = "http:\n api_password: !secret http_pw_credstash"
_yaml = load_yaml(self._yaml_path, yaml_str)
log = logging.getLogger()
log.error(_yaml["http"])
assert {"api_password": "yeah"} == _yaml["http"]
def test_secrets_logger_removed(self):
"""Ensure logger: debug was removed."""
with pytest.raises(HomeAssistantError):
load_yaml(self._yaml_path, "api_password: !secret logger")
@patch("homeassistant.util.yaml.loader._LOGGER.error")
def test_bad_logger_value(self, mock_error):
"""Ensure logger: debug was removed."""
yaml.clear_secret_cache()
load_yaml(self._secret_path, "logger: info\npw: abc")
load_yaml(self._yaml_path, "api_password: !secret pw")
assert mock_error.call_count == 1, "Expected an error about logger: value"
def test_secrets_are_not_dict(self):
"""Did secrets handle non-dict file."""
FILES[
self._secret_path
] = "- http_pw: pwhttp\n comp1_un: un1\n comp1_pw: pw1\n"
yaml.clear_secret_cache()
with pytest.raises(HomeAssistantError):
load_yaml(
self._yaml_path,
"http:\n"
" api_password: !secret http_pw\n"
"component:\n"
" username: !secret comp1_un\n"
" password: !secret comp1_pw\n"
"",
)
def test_representing_yaml_loaded_data():
"""Test we can represent YAML loaded data."""
files = {YAML_CONFIG_FILE: 'key: [1, "2", 3]'}
with patch_yaml_files(files):
data = load_yaml_config_file(YAML_CONFIG_FILE)
assert yaml.dump(data) == "key:\n- 1\n- '2'\n- 3\n"
def test_duplicate_key(caplog):
"""Test duplicate dict keys."""
files = {YAML_CONFIG_FILE: "key: thing1\nkey: thing2"}
with patch_yaml_files(files):
load_yaml_config_file(YAML_CONFIG_FILE)
assert "contains duplicate key" in caplog.text
| apache-2.0 | -6,587,619,202,486,819,000 | 34.00432 | 88 | 0.573333 | false |
thorgate/tg-utils | tg_utils/managers.py | 1 | 2021 | from django.db import models
from django.utils import timezone
from tg_utils.signals import post_modify
class ClosableObjectsQuerySet(models.QuerySet):
""" Provides easy way to mark ClosableModel objects as closed
"""
def close(self, user):
return self.update(closed_at=timezone.now(), closed_by=user)
class NotClosedObjectsManager(models.Manager.from_queryset(ClosableObjectsQuerySet)):
""" Utility manager that excludes items that have non-null closed_by value
"""
def get_queryset(self):
return super().get_queryset().filter(closed_by=None)
class NotifyPostChangeQuerySet(models.QuerySet):
""" Sends out a signal (post_modify) whenever anything modifies the database [*]
Note that the signals are sent out immediately and won't be deferred until the current database transaction (if any)
is committed.
[*] except save() and delete() methods of the model. You should explicitly listen to those yourself.
"""
def delete(self):
return self.with_signal(super().delete())
def create(self, **kwargs):
return self.with_signal(super().create(**kwargs))
def update(self, **kwargs):
return self.with_signal(super().update(**kwargs))
def bulk_create(self, *args, **kwargs):
return self.with_signal(super().bulk_create(*args, **kwargs))
def update_or_create(self, *args, **kwargs):
""" Only sent when not created, since default implementation will
call `self.create` when creating which triggers our signal
already.
"""
obj, created = super().update_or_create(*args, **kwargs)
if not created:
return self.with_signal(result=(obj, created))
return obj, created
def with_signal(self, result):
# Trigger the post_change signal
post_modify.send(sender=self.model)
# Return the original result
return result
NotifyModificationsManager = models.Manager.from_queryset(NotifyPostChangeQuerySet)
| isc | -7,063,999,307,600,676,000 | 32.131148 | 120 | 0.680851 | false |
ThirstyGoat/mvvmFX | addServer.py | 3 | 1809 | #!/usr/bin/env python
# This script file is used for travis-ci snapshot deployment.
# This script creates a maven settings file 'mySettings.xml'.
# If there is an existing settings.xml, it is used as base for the new settings file.
# After that it adds the server configuration for the sonatype maven repository
# The username and password are taken from environment variables named SONATYPE_USERNAME and SONATYPE_PASSWORD
# This script is taken from: https://gist.github.com/neothemachine/4060735
import sys
import os
import os.path
import xml.dom.minidom
if os.environ["TRAVIS_SECURE_ENV_VARS"] == "false":
print "no secure env vars available, skipping deployment"
sys.exit()
homedir = os.path.expanduser("~")
m2 = xml.dom.minidom.parse(homedir + '/.m2/settings.xml')
settings = m2.getElementsByTagName("settings")[0]
serversNodes = settings.getElementsByTagName("servers")
if not serversNodes:
serversNode = m2.createElement("servers")
settings.appendChild(serversNode)
else:
serversNode = serversNodes[0]
sonatypeServerNode = m2.createElement("server")
sonatypeServerId = m2.createElement("id")
sonatypeServerUser = m2.createElement("username")
sonatypeServerPass = m2.createElement("password")
idNode = m2.createTextNode("sonatype-nexus-snapshots")
userNode = m2.createTextNode(os.environ["SONATYPE_USERNAME"])
passNode = m2.createTextNode(os.environ["SONATYPE_PASSWORD"])
sonatypeServerId.appendChild(idNode)
sonatypeServerUser.appendChild(userNode)
sonatypeServerPass.appendChild(passNode)
sonatypeServerNode.appendChild(sonatypeServerId)
sonatypeServerNode.appendChild(sonatypeServerUser)
sonatypeServerNode.appendChild(sonatypeServerPass)
serversNode.appendChild(sonatypeServerNode)
m2Str = m2.toxml()
f = open(homedir + '/.m2/mySettings.xml', 'w')
f.write(m2Str)
f.close()
| apache-2.0 | -8,408,112,794,754,028,000 | 30.736842 | 110 | 0.789939 | false |
apple/swift-lldb | packages/Python/lldbsuite/test/arm/breakpoint-thumb-codesection/TestBreakpointThumbCodesection.py | 5 | 1098 | """
Test that breakpoints correctly work in an thumb function in an arbitrary
named codesection.
"""
from __future__ import print_function
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestBreakpointThumbCodesection(TestBase):
mydir = TestBase.compute_mydir(__file__)
@skipIf(archs=no_match(["arm"]))
@skipIfDarwinEmbedded # codegen on darwin always defaults to thumb for armv7/armv7k targets
def test_breakpoint(self):
self.build()
exe = self.getBuildArtifact("a.out")
line = line_number('main.c', '// Set break point at this line.')
self.runCmd("target create %s" % exe)
bpid = lldbutil.run_break_set_by_file_and_line(self, "main.c", line)
self.runCmd("run")
self.assertIsNotNone(lldbutil.get_one_thread_stopped_at_breakpoint_id(
self.process(), bpid), "Process is not stopped at breakpoint")
self.process().Continue()
self.assertEqual(self.process().GetState(), lldb.eStateExited, PROCESS_EXITED)
| apache-2.0 | -5,055,532,931,843,639,000 | 31.294118 | 97 | 0.689435 | false |
Jumpers/MysoftAutoTest | Step1-PythonBasic/Practices/wangr/6-9/ex8.py | 1 | 1341 | # -*- coding: utf-8 -*-
#
# formatter = "%r %r %r %r"
#
# print formatter % (1,2,3,4)
# print formatter % ("one","two","three","four")
# print formatter % (True,False,False,True)
# print formatter % ("True",'False','''False''',True)
# # true 和 false 加不加引号都是输出原样,有什么区别呢?
# print formatter % (formatter,formatter,formatter,formatter)
# print formatter % (
# "I had this thing.",
# "That you could type up right.",
# "But it didn't sing.",
# "So I said goodnight."
# )
# print "I said:\"say hello to uncle\"to my son" # 前后的双引号都要转义
# print """I said:\"say hello to uncle"to my son""" #只用转义前面的双引号
# # 注意一个双引号与三个双引号中对双引号的转义区别:
#
#
# print "I said:'say hello to uncle'to my son"
#
# print 'I said:\'''say hello to uncle\'''to my son'
#
# print 'I said:\'say hello to uncle\'to my son' # 前后的双引号都要转义
# print '''I said:\'say hello to uncle'to my son''' #只用转义前面的双引号
# # 注意一个双引号与三个双引号中对双引号的转义区别:
#
# print 'I said:"say hello to uncle"to my son'
# print 'I said:\"say hello to uncle"to my son'
| apache-2.0 | 5,468,197,875,144,148,000 | 32.151515 | 64 | 0.562667 | false |
pudo/nomenklatura | nomenklatura/views/matching.py | 2 | 1119 | from random import randint
from flask import Blueprint, request
from apikit import jsonify, Pager, arg_int
from nomenklatura.model.matching import find_matches
from nomenklatura.model import Dataset, Entity
section = Blueprint('matching', __name__)
@section.route('/match', methods=['GET'])
def match():
dataset_arg = request.args.get('dataset')
dataset = Dataset.find(dataset_arg)
matches = find_matches(dataset,
request.args.get('name'),
filter=request.args.get('filter'),
exclude=arg_int('exclude'))
pager = Pager(matches)
return jsonify(pager.to_dict())
@section.route('/datasets/<dataset>/review', methods=['GET'])
def review(dataset):
entities = Entity.all()
dataset = Dataset.find(dataset)
entities = entities.filter_by(dataset=dataset)
entities = entities.filter(Entity.reviewed == False) # noqa
review_count = entities.count()
if review_count == 0:
return jsonify(None)
entities = entities.offset(randint(0, review_count - 1))
return jsonify(entities.first())
| mit | 2,324,831,375,649,056,300 | 30.971429 | 64 | 0.655049 | false |
CIECODE-Madrid/tipi-engine | extractors/spain/deputy_extractors/deputy_extractor.py | 1 | 5354 | import re
from lxml.html import document_fromstring
from tipi_data.models.deputy import Deputy
from tipi_data.utils import generate_id
from urllib.parse import urlparse, parse_qs
class DeputyExtractor():
BASE_URL = 'https://www.congreso.es'
def __init__(self, response):
self.response = response
self.node_tree = document_fromstring(response.text)
self.deputy = Deputy()
def extract(self):
self.deputy['name'] = self.get_text_by_css('.nombre-dip')
self.deputy['parliamentarygroup'] = self.get_abbr_group()
self.deputy['image'] = self.BASE_URL + self.get_src_by_css('.img-dip img')
self.deputy['public_position'] = self.get_public_positions()
self.deputy['party_logo'] = self.get_src_by_css('.logo-partido img')
self.deputy['party_name'] = self.get_text_by_css('.siglas-partido')
self.deputy['url'] = self.response.url
self.deputy['gender'], self.deputy['constituency'] = self.get_gender_and_constituency_from(
self.get_text_by_css('.cargo-dip')
)
self.deputy['id'] = self.generate_id()
self.extract_social_media()
self.extract_extras()
self.extract_dates()
self.extract_from_text()
self.extract_mail()
self.deputy.save()
def get_src_by_css(self, selector):
item = self.get_by_css(selector)
if len(item) == 0:
return ''
return self.clean_str(item[0].get('src'))
def get_text_by_css(self, selector):
item = self.get_by_css(selector)
if len(item) == 0:
return ''
return self.clean_str(item[0].text)
def get_by_css(self, selector):
return self.node_tree.cssselect(selector)
def get_by_xpath(self, xpath):
return self.node_tree.xpath(xpath)
def get_abbr_group(self):
abbr_group_regex = '\(([^)]+)'
group = self.get_text_by_css('.grupo-dip a')
if not group:
return ''
return re.search(abbr_group_regex, group).group(1).strip()
def extract_mail(self):
mail = self.get_text_by_css('.email-dip a')
if mail != '':
self.deputy['email'] = mail
def clean_str(self, string):
return re.sub('\s+', ' ', string).strip()
def get_public_positions(self):
positions = []
for position in self.get_by_css('.cargos:not(.ult-init) li'):
positions.append(self.clean_str(position.text_content()))
return positions
def extract_dates(self):
date_elements = self.get_by_css('.f-alta')
end_date = self.clean_str(date_elements[1].text_content()).replace("Causó baja el ", "")[:28]
self.deputy['start_date'] = self.clean_str(date_elements[0].text_content()).replace("Condición plena: ", "")[:28]
if end_date != '':
self.deputy['end_date'] = end_date
self.deputy['active'] = end_date == ''
def extract_social_media(self):
social_links = self.get_by_css('.rrss-dip a')
for link in social_links:
img_src = link.getchildren()[0].get('src')
if 'twitter' in img_src:
self.deputy['twitter'] = self.get_link_url(link)
if 'facebook' in img_src:
self.deputy['facebook'] = self.get_link_url(link)
if 'web' in img_src:
self.deputy['web'] = self.get_link_url(link)
def get_link_url(self, link):
url = link.get('href')
if url.find('http') != 0:
url = 'http://' + url
return url
def get_gender_and_constituency_from(self, string):
array_string = string.split()
gender = 'Mujer' if array_string[0] == 'Diputada' else 'Hombre'
for _ in range(2):
array_string.pop(0)
constituency = " ".join(array_string)
return gender, constituency
def extract_extras(self):
self.deputy['extra'] = {}
links = self.get_by_css('.declaraciones-dip a')
if links:
self.deputy['extra']['declarations'] = {}
for link in links:
self.deputy['extra']['declarations'][self.clean_str(link.text)] = self.BASE_URL + link.get('href')
def extract_from_text(self):
birthday_paragraph = self.clean_str(self.get_by_xpath("//h3[normalize-space(text()) = 'Ficha personal']/following-sibling::p[1]")[0].text)
birthday = birthday_paragraph.replace("Nacido el ", "").replace("Nacida el ", "")[:29]
if birthday != '':
self.deputy['birthdate'] = birthday
legislatures_paragraph = self.clean_str(self.get_by_xpath("//h3[normalize-space(text()) = 'Ficha personal']/following-sibling::p[2]")[0].text)
self.deputy['legislatures'] = legislatures_paragraph.replace("Diputada", "").replace("Diputado", "").replace(" de la ", "").replace(" Legislaturas", "").replace("y ", "").split(", ")
bio = self.clean_str(self.get_by_xpath("//h3[normalize-space(text()) = 'Ficha personal']/parent::div")[0].text_content())
bio = bio.replace("Ficha personal", "").replace(birthday_paragraph, "").replace(legislatures_paragraph, "")
pos = bio.find(' Condición plena')
self.deputy['bio'] = self.clean_str(bio[:pos]).split('. ')
def generate_id(self):
return generate_id(self.deputy['name'])
| gpl-3.0 | 374,042,878,691,597,630 | 39.233083 | 190 | 0.587367 | false |
rec/echomesh | code/python/external/platform/darwin/numpy/f2py/tests/test_kind.py | 34 | 1076 | import os
import math
from numpy.testing import *
from numpy import array
import util
def _path(*a):
return os.path.join(*((os.path.dirname(__file__),) + a))
from numpy.f2py.crackfortran import _selected_int_kind_func as selected_int_kind
from numpy.f2py.crackfortran import _selected_real_kind_func as selected_real_kind
class TestKind(util.F2PyTest):
sources = [_path('src', 'kind', 'foo.f90'),
]
@dec.slow
def test_all(self):
selectedrealkind = self.module.selectedrealkind
selectedintkind = self.module.selectedintkind
for i in range(40):
assert_(selectedintkind(i) in [selected_int_kind(i),-1],\
'selectedintkind(%s): expected %r but got %r' % (i, selected_int_kind(i), selectedintkind(i)))
for i in range(20):
assert_(selectedrealkind(i) in [selected_real_kind(i),-1],\
'selectedrealkind(%s): expected %r but got %r' % (i, selected_real_kind(i), selectedrealkind(i)))
if __name__ == "__main__":
import nose
nose.runmodule()
| mit | -3,999,098,604,160,743,400 | 30.647059 | 118 | 0.628253 | false |
autosportlabs/RaceCapture_App | autosportlabs/racecapture/views/setup/selectpresetview.py | 1 | 4451 | #
# Race Capture App
#
# Copyright (C) 2014-2017 Autosport Labs
#
# This file is part of the Race Capture App
#
# This is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License for more details. You should
# have received a copy of the GNU General Public License along with
# this code. If not, see <http://www.gnu.org/licenses/>.
import kivy
kivy.require('1.10.0')
from kivy.metrics import dp
from kivy.clock import Clock
from kivy.app import Builder
from kivy.uix.modalview import ModalView
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.label import Label
from fieldlabel import FieldLabel
from kivy.uix.screenmanager import Screen
from autosportlabs.racecapture.views.setup.infoview import InfoView
from autosportlabs.uix.button.betterbutton import BetterToggleButton
from kivy.properties import ObjectProperty, StringProperty
from autosportlabs.racecapture.presets.presetview import PresetItemView
class SelectPresetView(InfoView):
"""
A setup screen that lets users select what device they have.
"""
Builder.load_string("""
<SelectPresetView>:
background_source: 'resource/setup/background_blank.png'
info_text: 'Select a preset'
BoxLayout:
orientation: 'vertical'
padding: (0, dp(20))
spacing: (0, dp(10))
BoxLayout:
size_hint_y: 0.12
ScrollContainer:
canvas.before:
Color:
rgba: ColorScheme.get_dark_background()
Rectangle:
pos: self.pos
size: self.size
do_scroll_x: False
do_scroll_y: True
size_hint_y: 1
size_hint_x: 1
GridLayout:
id: presets
spacing: [0, dp(10)]
row_default_height: dp(130)
size_hint_y: None
height: self.minimum_height
cols: 1
BoxLayout:
size_hint_y: None
height: dp(50)
""")
def __init__(self, **kwargs):
super(SelectPresetView, self).__init__(**kwargs)
self.ids.next.disabled = True
self.ids.next.pulsing = False
self.preset_selected = False
def on_setup_config(self, instance, value):
self._update_ui()
def _update_ui(self):
self.ids.presets.clear_widgets()
device_step = self.get_setup_step('device')
device = device_step.get('device')
self.ids.next.disabled = False
# get the device prefix
device = device.split('_')[0]
print('the device {}'.format(device))
presets = self.preset_manager.get_presets_by_type(device)
if presets is None:
self.select_next()
return
for k, v in presets:
name = v.name
notes = v.notes
self.add_preset(k, v)
def add_preset(self, key, preset):
view = PresetItemView(preset_id=preset.mapping_id,
name=preset.name,
notes=preset.notes,
image_path=preset.local_image_path)
view.bind(on_preset_selected=self._preset_selected)
self.ids.presets.add_widget(view)
def _preset_selected(self, instance, preset_id):
def preset_selected():
self.preset_selected = True
self.ids.next.pulsing = True
self.ids.next.disabled = False
self.ids.next.disabled = True
preset = self.preset_manager.get_preset_by_id(preset_id)
self.rc_config.fromJson(preset.mapping)
self.rc_config.stale = True
self.write_rcp_config('Applying preset {} ... '.format(preset.name), preset_selected)
def select_next(self):
def do_next():
super(SelectPresetView, self).select_next()
if not self.preset_selected:
self.info_popup('You can configure presets later under Setup', do_next)
return
do_next()
| gpl-3.0 | 2,016,710,277,483,842,300 | 32.977099 | 93 | 0.607279 | false |
pvpnvz/internshipsystem | app/auth/views.py | 1 | 1273 | from flask import render_template, redirect, url_for, request, flash
from . import auth
from .form import LoginForm
from ..models import Teacher, Student,Permission
from flask.ext.login import login_required, login_user, logout_user,session,current_user
from .LoginAction import LoginAction
import os
logout_url = 'https://cas.dgut.edu.cn/user/logout?service=http://shixi.dgut.edu.cn'
@auth.route('/login', methods=['GET', 'POST'])
def login():
loginAction = LoginAction()
params = request.args.items()
d = {}
for i,token in params:
d[i] = token
redirect_link = loginAction.service(d.get('token'))
return redirect(redirect_link)
# # DEBUG
# teacher = Teacher.query.filter_by(teaId='20149062').first()
# login_user(teacher)
# return redirect(url_for('main.index'))
@auth.route('lg',methods=['GET','POST'])
def lg():
if not current_user.is_authenticated:
flash('此用户信息暂未录入本系统!')
return render_template('index.html',Permission=Permission)
else:
return redirect(url_for('main.index'))
@auth.route('/logout', methods=['GET', 'POST'])
@login_required
def logout():
logout_user()
flash('登出成功!')
return redirect(url_for('main.index', isLogout=1))
| mit | 33,430,227,792,179,736 | 29.170732 | 88 | 0.678254 | false |
PatrickgHayes/nnClassify | src/Predictor_.py | 1 | 9708 | import cv2
import numpy as np
from src.MyUtils_ import MyUtils
from src.ImageCropper_ import ImageCropper
from src.Evaluator_ import Evaluator
from src.Pickler_ import Pickler
from src import Constants
from src.Transfer_Calculator_ import Transfer_Calculator as tc
import pickle
import os
# This class contains the logic displaying our predictions to the user
class Predictor:
### METHODS ################################################################
@staticmethod
def __test_setup(testset_path):
pickles_path = os.path.join(testset_path, Constants.PICKLES)
if not os.path.exists(pickles_path):
os.makedirs(pickles_path)
Pickler.pickle_wells(testset_path)
results_path = os.path.join(testset_path, Constants.RESULTS_DIR)
if not os.path.exists(results_path):
os.makedirs(results_path)
transfer_path = os.path.join(testset_path, Constants.TRANSFER)
if not os.path.exists(transfer_path):
os.makedirs(transfer_path)
tc.get_all_transfer_values(testset_path)
return
@staticmethod
def __pred_setup(predset_path):
pickles_path = os.path.join(predset_path, Constants.PICKLES)
if not os.path.exists(pickles_path):
os.makedirs(pickles_path)
Pickler.pickle_wells_no_category(predset_path)
results_path = os.path.join(predset_path, Constants.RESULTS_DIR)
if not os.path.exists(results_path):
os.makedirs(results_path)
transfer_path = os.path.join(predset_path, Constants.TRANSFER)
if not os.path.exists(transfer_path):
os.makedirs(transfer_path)
tc.get_well_transfer_values(predset_path)
return
# Purpose - Takes in folder with all the images for a well (uncropped)
# It first crops all the images, then it predicts the number
# of eyes for each image. It prints these results to file. At
# the top of this file is the prediction for the entire well
#
# Takes - folder_path: location of the well to predict
# encoding: used for converting from one-hot to text
# model: the neural network to use for prediction
# op_params: Contrast is an optional parameter.
# if they pass contrast as a parameter increase
# the contrast of the test images
#
# Returns - a string with the results for the well.
# Also creates a file and writes the results there
@staticmethod
def predict_well(imgs_path, results_path, encoding, model, well_name):
images = pickle.load(open(imgs_path,"rb"))
pred = model.predict(images)
# Write the results file
with open(results_path,'w') as results:
#Write the header
results.write("Name\tPredicted Category")
for cat in encoding:
results.write("\t" + cat)
results.write('\n')
# Uses the confindencies of the predictions and the number of
# frames predicted normal to predict if a well is normal.
pred_cat = Evaluator.is_it_normal(pred)
well_results = well_name + "\t" + pred_cat
results.write(well_results)
#Write the results for each individual image
for i in range(0,len(pred)):
results.write('\n')
results.write(str(i+1)+"_img" + '\t'
+ Evaluator.max_pred(pred[i], encoding))
for confidence in pred[i]:
results.write('\t' + str(confidence))
return well_results, pred_cat
@staticmethod
def predict_wells(predset_path, encoding, model):
"""This method takes a prediction set and predicts
the label for each well"""
all_well_results = list()
predicted_labels = list()
Predictor.__pred_setup(predset_path)
wells = MyUtils.listdir_nohidden(os.path.join(
predset_path, "Transfer_Values"))
for well in wells:
print (" Predicting well " + well )
#Get the well results from each well
trans_value_path = (os.path.join(predset_path, "Transfer_Values"
, well))
results_path = (os.path.join(predset_path, "Results",
well + ".txt"))
well_results, pred_cat = Predictor.predict_well(
trans_value_path,
results_path,
encoding,
model,
well)
#Add the well results to a list with all the well results
#append at the end of the line what category the well actually
#is
all_well_results.append(well_results)
predicted_labels.append(pred_cat)
# Write the results for the test set in a new file
with open(os.path.join(
predset_path, "Results","Pred_Set.txt"),'w') as results:
# Individual Well Results Header
results.write("Well\tPredicted_Category\n")
for well_results in all_well_results:
split_results = well_results.split('\t')
well = split_results[0]
pred_cat = split_results[1]
results.write(well + '\t'
+ pred_cat + '\n')
return
# Purpose - Predict all the wells in test set
#
# Takes - testset_path: the absolute path of the test set
#
# Returns - nothing, creates a file and writes the results there
@staticmethod
def test_wells(testset_path, encoding, model):
all_well_results = list()
actual_labels = list()
predicted_labels = list()
Predictor.__test_setup(testset_path)
categories = MyUtils.listdir_nohidden(os.path.join(testset_path
,"Transfer_Values"))
for category in categories:
print ("Predicting " + category)
if not os.path.exists(
os.path.join(testset_path, "Results", category)):
os.makedirs(os.path.join(testset_path, "Results", category))
wells = MyUtils.listdir_nohidden(os.path.join(testset_path
,"Transfer_Values", category))
for well in wells:
print (" Predicting well " + well )
#Get the well results from each well
trans_value_path = (os.path.join(
testset_path, "Transfer_Values"
, category, well))
results_path = (os.path.join(testset_path, "Results"
,category, well + ".txt"))
well_results, pred_cat = Predictor.predict_well(
trans_value_path,
results_path,
encoding,
model,
well)
#Add the well results to a list with all the well results
#append at the end of the line what category the well actually
#is
all_well_results.append(well_results + '\t' + category)
actual_labels.append(category)
predicted_labels.append(pred_cat)
# Calculate the Balanced Error Rate
BER, TP, FP, TN, FN, NS = Evaluator.calc_balanced_accuracy(
actual_labels,
predicted_labels)
# Write the results for the test set in a new file
with open(os.path.join(
testset_path, "Results", "Test_Set.txt"),'w') as results:
results.write("All Well Results\n")
results.write("Balanced Error Rate: " + str(BER) + '\n')
results.write("True Positive Count "
+ "(Worm predicted abnormal and actually was): "
+ str(TP) + '\n')
results.write("True Negative Count "
+ "(Worm predicted normal and actually was): "
+ str(TN) + '\n')
results.write("False Positive Count "
+ "(Worm predicted abnormal but it was normal): "
+ str(FP) + '\n')
results.write("False Negative Count "
+ "(Worm predicted normal but it was abnormal): "
+ str(FN) + '\n')
results.write("Unsure Count "
+ "(Model lacked the confidence to make a prediction): "
+ str(NS) + '\n')
# Empty line between overall results and individual well results
results.write('\n')
# Individual Well Results Header
results.write("Well\tActual_Category\tPredicted_Category\n")
for well_results in all_well_results:
split_results = well_results.split('\t')
well = split_results[0]
pred_cat = split_results[1]
act_cat = split_results[len(split_results)-1]
results.write(well + '\t'
+ act_cat + '\t' + pred_cat + '\n')
| mit | 7,380,742,079,694,068,000 | 41.025974 | 78 | 0.522559 | false |
arrabito/DIRAC | Resources/Storage/GFAL2_StorageBase.py | 1 | 67689 | """ :mod: GFAL2_StorageBase
=================
.. module: python
:synopsis: GFAL2 class from StorageElement using gfal2. Other modules can inherit from this use the gfal2 methods.
"""
# pylint: disable=arguments-differ
# # imports
import os
import datetime
import errno
from stat import S_ISREG, S_ISDIR, S_IXUSR, S_IRUSR, S_IWUSR, \
S_IRWXG, S_IRWXU, S_IRWXO
import gfal2# pylint: disable=import-error
# # from DIRAC
from DIRAC import gLogger, gConfig, S_OK, S_ERROR
from DIRAC.Core.Utilities import DErrno
from DIRAC.Resources.Storage.Utilities import checkArgumentFormat
from DIRAC.Resources.Storage.StorageBase import StorageBase
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOForGroup
from DIRAC.Core.Utilities.File import getSize
from DIRAC.Core.Utilities.Pfn import pfnparse, pfnunparse
# # RCSID
__RCSID__ = "$Id$"
class GFAL2_StorageBase( StorageBase ):
""" .. class:: GFAL2_StorageBase
SRM v2 interface to StorageElement using gfal2
"""
def __init__( self, storageName, parameters ):
""" c'tor
:param self: self reference
:param str storageName: SE name
:param dict parameters: storage parameters
"""
StorageBase.__init__( self, storageName, parameters )
self.log = gLogger.getSubLogger( "GFAL2_StorageBase", True )
# Some storages have problem to compute the checksum with xroot while getting the files
# This allows to disable the checksum calculation while transferring, and then we compare the
# file size
self.disableTransferChecksum = True if ( parameters.get( 'DisableChecksum' ) == 'True' ) else False
# Different levels or verbosity:
# gfal2.verbose_level.normal,
# gfal2.verbose_level.verbose,
# gfal2.verbose_level.debug,
# gfal2.verbose_level.trace
dlevel = self.log.getLevel()
if dlevel == 'DEBUG':
gLogger.enableLogsFromExternalLibs()
gfal2.set_verbose( gfal2.verbose_level.trace )
self.isok = True
# # gfal2 API
self.ctx = gfal2.creat_context()
# by default turn off BDII checks
self.ctx.set_opt_boolean( "BDII", "ENABLE", False )
# FIXME: Avoid caching because of a bug in globus (https://its.cern.ch/jira/browse/DMC-853)
self.ctx.set_opt_boolean( "GRIDFTP PLUGIN", "SESSION_REUSE", False )
# Enable IPV6 for gsiftp
self.ctx.set_opt_boolean( "GRIDFTP PLUGIN", "IPV6", True )
# spaceToken used for copying from and to the storage element
self.spaceToken = parameters.get( 'SpaceToken', '' )
# stageTimeout, default timeout to try and stage/pin a file
self.stageTimeout = gConfig.getValue( '/Resources/StorageElements/StageTimeout', 12 * 60 * 60 )
# gfal2Timeout, amount of time it takes until an operation times out
self.gfal2Timeout = gConfig.getValue( "/Resources/StorageElements/GFAL_Timeout", 100 )
# set the gfal2 default protocols, e.g. used when trying to retrieve transport url
self.defaultLocalProtocols = gConfig.getValue( '/Resources/StorageElements/DefaultProtocols', [] )
# # set checksum type, by default this is 0 (GFAL_CKSM_NONE)
self.checksumType = gConfig.getValue( "/Resources/StorageElements/ChecksumType", '0' )
if self.checksumType == '0':
self.checksumType = None
self.log.debug( 'GFAL2_StorageBase: using %s checksum' % self.checksumType )
self.voName = None
ret = getProxyInfo( disableVOMS = True )
if ret['OK'] and 'group' in ret['Value']:
self.voName = getVOForGroup( ret['Value']['group'] )
self.MAX_SINGLE_STREAM_SIZE = 1024 * 1024 * 10 # 10 MB ???
self.MIN_BANDWIDTH = 0.5 * ( 1024 * 1024 ) # 0.5 MB/s ???
# This is the list of extended metadata to query the server for.
# It is used by getSingleMetadata.
# If set to None, No extended metadata will be queried
# If the list is empty, all of them will be queried
self._defaultExtendedAttributes = []
def exists( self, path ):
""" Check if the path exists on the storage
:param self: self reference
:param str path: path or list of paths to be checked
:returns Failed dictionary: {pfn : error message}
Successful dictionary: {pfn : bool}
S_ERROR in case of argument problems
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "GFAL2_StorageBase.exists: Checking the existence of %s path(s)" % len( urls ) )
successful = {}
failed = {}
for url in urls:
res = self.__singleExists( url )
if res['OK']:
successful[url] = res['Value']
else: # something went wrong with the query
failed[url] = res['Message']
resDict = { 'Failed':failed, 'Successful':successful }
return S_OK( resDict )
def _estimateTransferTimeout( self, fileSize ):
""" Dark magic to estimate the timeout for a transfer
The values are set empirically and seem to work fine.
They were evaluated with gfal1 and SRM.
:param fileSize: size of the file in bytes we want to transfer
:return: timeout in seconds
"""
return int( fileSize / self.MIN_BANDWIDTH * 4 + 310 )
def __singleExists( self, path ):
""" Check if :path: exists on the storage
:param self: self reference
:param str: path to be checked (srm://...)
:returns: S_OK ( boolean exists ) a boolean whether it exists or not
S_ERROR( errStr ) there is a problem with getting the information
"""
log = self.log.getSubLogger( "GFAL2_StorageBase._singleExists" )
log.debug( "Determining whether %s exists or not" % path )
try:
self.ctx.stat( path ) # If path doesn't exist this will raise an error - otherwise path exists
log.debug( "path exists" )
return S_OK( True )
except gfal2.GError as e:
if e.code == errno.ENOENT:
log.debug( "Path does not exist" )
return S_OK( False )
else:
log.debug( "GFAL2_StorageBase.__singleExists: %s" % repr( e ) )
return S_ERROR( e.code, e.message )
### methods for manipulating files ###
def isFile( self, path ):
""" Check if the path provided is a file or not
:param self: self reference
:param str: path or list of paths to be checked ( 'srm://...')
:returns Failed dict: {path : error message}
Successful dict: {path : bool}
S_ERROR in case of argument problems
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "GFAL2_StorageBase.isFile: checking whether %s path(s) are file(s)." % len( urls ) )
successful = {}
failed = {}
for url in urls:
res = self.__isSingleFile( url )
if res['OK']:
successful[url] = res['Value']
else:
failed[url] = res['Message']
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def __isSingleFile( self, path ):
""" Checking if :path: exists and is a file
:param self: self reference
:param str path: single path on the storage (srm://...)
:returns: S_ERROR if there is a fatal error
S_OK ( boolean) if it is a file or not
"""
log = self.log.getSubLogger( "GFAL2_StorageBase.__isSingleFile" )
log.debug( "Determining whether %s is a file or not." % path )
try:
statInfo = self.ctx.stat( path )
# instead of return S_OK( S_ISDIR( statInfo.st_mode ) ) we use if/else. So we can use the log.
if S_ISREG( statInfo.st_mode ):
log.debug( "Path is a file" )
return S_OK( True )
else:
log.debug( "Path is not a file" )
return S_OK( False )
except gfal2.GError as e:
log.debug( "GFAL2_StorageBase.__isSingleFile: %s" % repr( e ) )
return S_ERROR( e.code, e.message )
def putFile( self, path, sourceSize = 0 ):
""" Put a copy of a local file or a file on another srm storage to a directory on the
physical storage.
:param path: dictionary { lfn (srm://...) : localFile }
:param sourceSize: size of the file in byte. Mandatory for third party copy (WHY ???)
Also, this parameter makes it essentially a non bulk operation for
third party copy, unless all files have the same size...
:returns: Successful dict: { path : size }
Failed dict: { path : error message }
S_ERROR in case of argument problems
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
failed = {}
successful = {}
for dest_url, src_file in urls.iteritems():
if not src_file:
errStr = "GFAL2_StorageBase.putFile: Source file not set. Argument must be a dictionary \
(or a list of a dictionary) {url : local path}"
self.log.debug( errStr )
return S_ERROR( errStr )
res = self.__putSingleFile( src_file, dest_url, sourceSize )
if res['OK']:
successful[dest_url] = res['Value']
else:
failed[dest_url] = res['Message']
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def __putSingleFile( self, src_file, dest_url, sourceSize ):
"""Put a copy of the local file to the current directory on the
physical storage
For gfal2 version 2.7.8 and lower the environment variable GLOBUS_THREAD_MODEL has to be
set to 'pthread' otherwise dirac-dms-add-file will go in a deadlock. This is fixed with gfal2 2.8.
:param str src_file: local file to copy
:param str dest_file: pfn (srm://...)
:param int sourceSize: size of the source file
:returns: S_OK( fileSize ) if everything went fine, S_ERROR otherwise
"""
log = self.log.getSubLogger( "GFAL2_StorageBase.__putSingleFile" )
log.debug( "trying to upload %s to %s" % ( src_file, dest_url ) )
# in case src_file is not set (this is also done in putFile but some methods directly access this method,
# so that's why it's checked here one more time
if not src_file:
errStr = 'no source defined, please check argument format { destination : localfile }'
return S_ERROR( errStr )
# check whether the source is local or on another storage
if any( src_file.startswith( protocol + ':' ) for protocol in self.protocolParameters['InputProtocols'] ):
src_url = src_file
if not sourceSize:
errStr = "For file replication the source file size in bytes must be provided."
log.debug( errStr, src_file )
return S_ERROR( errno.EINVAL, errStr )
# file is local so we can set the protocol and determine source size accordingly
else:
if not os.path.isfile( src_file ):
errStr = "The local source file does not exist or is a directory"
log.debug( errStr, src_file )
return S_ERROR( errno.ENOENT, errStr )
if not src_file.startswith( 'file://' ):
src_url = 'file://%s' % os.path.abspath( src_file )
sourceSize = getSize( src_file )
if sourceSize == -1:
errStr = "Failed to get file size"
log.debug( errStr, src_file )
return S_ERROR( DErrno.EFILESIZE, errStr )
if sourceSize == 0:
errStr = "Source file size is zero."
log.debug( errStr, src_file )
return S_ERROR( DErrno.EFILESIZE, errStr )
# source is OK, creating the destination folder
dest_path = os.path.dirname( dest_url )
res = self.__createSingleDirectory( dest_path )
if not res['OK']:
log.debug( "Failed to create destination folder %s" % dest_path, res['Message'] )
return res
# folder is created and file exists, setting known copy parameters
params = self.ctx.transfer_parameters()
params.timeout = self._estimateTransferTimeout( sourceSize )
if sourceSize > self.MAX_SINGLE_STREAM_SIZE:
params.nbstreams = 4
else:
params.nbstreams = 1
params.overwrite = True # old gfal removed old file first, gfal2 can just overwrite it with this flag set to True
if self.spaceToken:
params.dst_spacetoken = self.spaceToken
params.checksum_check = bool( self.checksumType )
if self.checksumType:
params.set_user_defined_checksum(self.checksumType, '')
# Params set, copying file now
try:
self.ctx.filecopy( params, src_url, dest_url )
if self.checksumType:
# checksum check is done by gfal2
return S_OK( sourceSize )
# no checksum check, compare file sizes for verfication
else:
res = self.__getSingleFileSize( dest_url )
# In case of failure, we set destSize to None
# so that the cleaning of the file happens
if not res['OK']:
destSize = None
else:
destSize = res['Value']
log.debug( 'destSize: %s, sourceSize: %s' % ( destSize, sourceSize ) )
if destSize == sourceSize:
return S_OK( destSize )
else:
log.debug( "Source and destination file size don't match. \
Trying to remove destination file" )
res = self.__removeSingleFile( dest_url )
if not res['OK']:
log.debug( "Failed to remove destination file", res['Message'] )
return res
errStr = "Source and destination file size don't match. Removed destination file"
log.debug( errStr, {sourceSize : destSize} )
return S_ERROR( "%s srcSize: %s destSize: %s" % ( errStr, sourceSize, destSize ) )
except gfal2.GError as e:
# ##
# extended error message because otherwise we could only guess what the error could be when we copy
# from another srm to our srm-SE '''
errStr = "Exception while copying"
detailMsg = "Failed to copy file %s to destination url %s: [%d] %s" % ( src_file, dest_url, e.code, e.message )
log.debug( errStr, detailMsg )
return S_ERROR( e.code, detailMsg )
def getFile( self, path, localPath = False ):
""" Make a local copy of storage :path:
:param self: self reference
:param str path: path (or list of paths) on storage (srm://...)
:param localPath: destination folder. Default is from current directory
:returns Successful dict: {path : size}
Failed dict: {path : errorMessage}
S_ERROR in case of argument problems
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "GFAL2_StorageBase.getFile: Trying to download %s files." % len( urls ) )
failed = {}
successful = {}
for src_url in urls:
fileName = os.path.basename( src_url )
dest_file = os.path.join( localPath if localPath else os.getcwd(), fileName )
res = self._getSingleFile( src_url, dest_file, disableChecksum = self.disableTransferChecksum )
if not res['OK']:
failed[src_url] = res['Message']
else:
successful[src_url] = res['Value']
return S_OK( { 'Failed' : failed, 'Successful' : successful} )
def _getSingleFile( self, src_url, dest_file, disableChecksum = False ):
""" Copy a storage file :src_url: to a local fs under :dest_file:
:param self: self reference
:param str src_url: SE url that is to be copied (srm://...)
:param str dest_file: local fs path
:param bool disableChecksum: There are problems with xroot comparing checksums after
copying a file so with this parameter we can disable checksum
checks for xroot
:returns: S_ERROR( errStr ) in case of an error
S_OK( size of file ) if copying is successful
"""
log = self.log.getSubLogger( "GFAL2_StorageBase._getSingleFile" )
log.info( "Trying to download %s to %s" % ( src_url, dest_file ) )
if disableChecksum:
log.warn( "checksum calculation disabled for transfers!" )
destDir = os.path.dirname( dest_file )
if not os.path.exists( destDir ):
log.debug( "Local directory does not yet exist. Creating it", destDir )
try:
os.makedirs( destDir )
except OSError as error:
errStr = "Error while creating the destination folder"
log.exception( errStr, lException = error )
return S_ERROR( "%s: %s" % ( errStr, repr( error ) ) )
res = self.__getSingleFileSize( src_url )
if not res['OK']:
log.debug( "Error while determining file size", res['Message'] )
return res
remoteSize = res['Value']
# Set gfal2 copy parameters
# folder is created and file exists, setting known copy parameters
params = self.ctx.transfer_parameters()
params.timeout = self._estimateTransferTimeout( remoteSize )
if remoteSize > self.MAX_SINGLE_STREAM_SIZE:
params.nbstreams = 4
else:
params.nbstreams = 1
params.overwrite = True # old gfal removed old file first, gfal2 can just overwrite it with this flag set to True
if self.spaceToken:
params.src_spacetoken = self.spaceToken
params.checksum_check = bool( self.checksumType and not disableChecksum )
if self.checksumType:
params.set_user_defined_checksum(self.checksumType, '')
# Params set, copying file now
try:
# gfal2 needs a protocol to copy local which is 'file:'
if not dest_file.startswith( 'file://' ):
dest = 'file://%s' % os.path.abspath( dest_file )
self.ctx.filecopy( params, src_url, dest )
if params.checksum_check:
# gfal2 did a checksum check, so we should be good
return S_OK( remoteSize )
else:
# No checksum check was done so we compare file sizes
destSize = getSize( dest_file )
if destSize == remoteSize:
return S_OK( destSize )
else:
errStr = "File sizes don't match. Something went wrong. Removing local file %s" % dest_file
log.debug( errStr, {remoteSize : destSize} )
if os.path.exists( dest_file ):
os.remove( dest_file )
return S_ERROR( errStr )
except gfal2.GError as e:
errStr = 'Could not copy %s to %s, [%d] %s' % ( src_url, dest, e.code, e.message )
log.debug( errStr )
return S_ERROR( e.code, errStr )
def removeFile( self, path ):
""" Physically remove the file specified by path
A non existing file will be considered as successfully removed
:param str path: path (or list of paths) on storage (srm://...)
:returns: Successful dict {path : True}
Failed dict {path : error message}
S_ERROR in case of argument problems
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "GFAL2_StorageBase.removeFile: Attempting to remove %s files" % len( urls ) )
failed = {}
successful = {}
for url in urls:
res = self.__removeSingleFile( url )
if res['OK']:
successful[url] = res['Value']
else:
failed [url] = res['Message']
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def __removeSingleFile( self, path ):
""" Physically remove the file specified by path
:param str path: path on storage (srm://...)
:returns: S_OK( True ) if the removal was successful (also if file didn't exist in the first place)
S_ERROR( errStr ) if there was a problem removing the file
"""
log = self.log.getSubLogger( "GFAL2_StorageBase.__removeSingleFile" )
log.debug( "Attempting to remove single file %s" % path )
try:
status = self.ctx.unlink( path )
if status == 0:
log.debug( "File successfully removed" )
return S_OK( True )
elif status < 0:
errStr = 'return status < 0. Error occurred.'
return S_ERROR( errStr )
except gfal2.GError as e:
# file doesn't exist so operation was successful
if e.code == errno.ENOENT:
log.debug( "File does not exist." )
return S_OK( True )
elif e.code == errno.EISDIR:
log.debug( "Path is a directory." )
return S_ERROR( errno.EISDIR, errStr )
else:
errStr = "Failed to remove file."
log.debug( "Failed to remove file: [%d] %s" % ( e.code, e.message ) )
return S_ERROR( e.code, repr( e ) )
def getFileSize( self, path ):
"""Get the physical size of the given file
:param self: self reference
:param path: path (or list of path) on storage (pfn : srm://...)
:returns: Successful dict {path : size}
Failed dict {path : error message }
S_ERROR in case of argument problems
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "GFAL2_StorageBase.getFileSize: Trying to determine file size of %s files" % len( urls ) )
failed = {}
successful = {}
for url in urls:
res = self.__getSingleFileSize( url )
if not res['OK']:
failed[url] = res['Message']
else:
successful[url] = res['Value']
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def __getSingleFileSize( self, path ):
""" Get the physical size of the given file
:param self: self reference
:param path: single path on the storage (srm://...)
:returns: S_OK( filesize ) when successfully determined filesize
S_ERROR( errStr ) filesize could not be determined
"""
log = self.log.getSubLogger( "GFAL2_StorageBase.__getSingleFileSize" )
log.debug( "Determining file size of %s" % path )
try:
statInfo = self.ctx.stat( path ) # keeps info like size, mode.
# If it is not a file
if not S_ISREG( statInfo.st_mode ):
errStr = "Path is not a file"
self.log.debug( errStr )
return S_ERROR( errno.EISDIR, errStr )
self.log.debug( "File size successfully determined %s" % statInfo.st_size )
return S_OK( long ( statInfo.st_size ) )
except gfal2.GError as e:
errStr = "Failed to determine file size."
self.log.debug( errStr, repr( e ) )
return S_ERROR( e.code, "%s: %s" % ( errStr, repr( e ) ) )
def getFileMetadata( self, path ):
""" Get metadata associated to the file(s)
:param self: self reference
:param str path: path (or list of paths) on the storage (srm://...)
:returns: successful dict { path : metadata }
failed dict { path : error message }
S_ERROR in case of argument problems
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( 'GFAL2_StorageBase.getFileMetadata: trying to read metadata for %s paths' % len( urls ) )
failed = {}
successful = {}
for url in urls:
res = self._getSingleFileMetadata( url )
if not res['OK']:
failed[url] = res['Message']
else:
successful[url] = res['Value']
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def _getSingleFileMetadata( self, path ):
""" Fetch the metadata associated to the file
:param self: self reference
:param path: path (only 1) on storage (srm://...)
:returns:
S_OK (MetadataDict) if we could get the metadata
S_ERROR (errorMsg) if there was a problem getting the metadata or if it is not a file
"""
self.log.debug( 'GFAL2_StorageBase._getSingleFileMetadata: trying to read metadata for %s' % path )
res = self.__getSingleMetadata( path )
if not res['OK']:
return res
metaDict = res['Value']
if not metaDict['File']:
errStr = "GFAL2_StorageBase._getSingleFileMetadata: supplied path is not a file"
self.log.debug( errStr, path )
return S_ERROR( errno.EISDIR, errStr )
return S_OK( metaDict )
def _updateMetadataDict( self, _metadataDict, _attributeDict ):
""" Updating the metadata dictionary with protocol specific attributes
Dummy implementation
:param self: self reference
:param dict: metadataDict we want add the specific attributes to
:param dict: attributeDict contains the special attributes
"""
return
def __getSingleMetadata( self, path ):
""" Fetches the metadata of a single file or directory via gfal2.stat
and getExtendedAttributes
:param self: self reference
:param path: path (only 1) on storage (srm://...)
:returns:
S_OK ( MetadataDict ) if we could get the metadata
S_ERROR ( errorMsg ) if there was a problem getting the metadata
"""
log = self.log.getSubLogger( 'GFAL2_StorageBase.__getSingleMetadata' )
log.debug( 'Reading metadata for %s' % path )
try:
statInfo = self.ctx.stat( path )
except gfal2.GError as e:
errStr = "Failed to retrieve metadata"
self.log.debug( errStr, repr( e ) )
return S_ERROR( e.code, "%s %s" % ( errStr, repr( e ) ) )
metadataDict = self.__parseStatInfoFromApiOutput( statInfo )
if metadataDict['File'] and self.checksumType:
res = self.__getChecksum( path, self.checksumType )
if not res['OK']:
log.warn( "Could not get checksum:%s" % res['Message'] )
metadataDict['Checksum'] = res.get( 'Value', '' )
metadataDict = self._addCommonMetadata( metadataDict )
if self._defaultExtendedAttributes is not None:
res = self._getExtendedAttributes( path, attributes = self._defaultExtendedAttributes )
if not res['OK']:
log.warn( "Could not get extended attributes: %s" % res['Message'] )
else:
attributeDict = res['Value']
# add extended attributes to the dict if available
self._updateMetadataDict( metadataDict, attributeDict )
return S_OK ( metadataDict )
def prestageFile( self, path, lifetime = 86400 ):
""" Issue prestage request for file(s)
:param self: self reference
:param str path: path or list of paths to be prestaged
:param int lifetime: prestage lifetime in seconds (default 24h)
:return: succesful dict { url : token }
failed dict { url : message }
S_ERROR in case of argument problems
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( 'GFAL2_StorageBase.prestageFile: Attempting to issue stage requests for %s file(s).' % len( urls ) )
failed = {}
successful = {}
for url in urls:
res = self.__prestageSingleFile( url, lifetime )
if not res['OK']:
failed[url] = res['Message']
else:
successful[url] = res['Value']
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def __prestageSingleFile( self, path, lifetime ):
""" Issue prestage for single file
:param self: self reference
:param str path: path to be prestaged
:param int lifetime: prestage lifetime in seconds (default 24h)
:return: S_ structure
S_OK( token ) ) if status >= 0 (0 - staging is pending, 1 - file is pinned)
S_ERROR( errMsg ) ) in case of an error: status -1
"""
log = self.log.getSubLogger( "GFAL2_StorageBase.__prestageSingleFile" )
log.debug( "Attempting to issue stage request for single file: %s" % path )
try:
( status, token ) = self.ctx.bring_online( path, lifetime, self.stageTimeout, True )
log.debug( "Staging issued - Status: %s" % status )
if status >= 0:
return S_OK( token )
else:
return S_ERROR( 'An error occured while issuing prestaging.' )
except gfal2.GError as e:
errStr = "Error occured while prestaging file"
log.debug( errStr, "%s %s" % ( path, repr( e ) ) )
return S_ERROR( e.code, "%s %s" % ( errStr, repr( e ) ) )
def prestageFileStatus( self, path ):
""" Checking the staging status of file(s) on the storage
:param self: self reference
:param dict path: dict { url : token }
:return: succesful dict { url : bool }
failed dict { url : message }
S_ERROR in case of argument problems
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( 'GFAL2_StorageBase.prestageFileStatus: Checking the staging status for %s file(s).' % len( urls ) )
failed = {}
successful = {}
for path, token in urls.iteritems():
res = self.__prestageSingleFileStatus( path, token )
if not res['OK']:
failed[path] = res['Message']
else:
successful[path] = res['Value']
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def __prestageSingleFileStatus( self, path, token ):
""" Check prestage status for single file
:param self: self reference
:param str path: path to be checked
:param str token: token of the file
:return: S_ structure
S_OK( True ) if file is staged
S_OK( False ) if file is not staged yet
S_ERROR( errMsg ) ) in case of an error: status -1
"""
log = self.log.getSubLogger( "GFAL2_StorageBase.__prestageSingleFileStatus" )
log.debug( "Checking prestage file status for %s" % path )
# also allow int as token - converting them to strings
if not isinstance( token, basestring ):
token = str( token )
try:
self.ctx.set_opt_boolean( "BDII", "ENABLE", True )
status = self.ctx.bring_online_poll( path, token )
if status == 0:
log.debug( "File not staged" )
return S_OK( False )
elif status == 1:
log.debug( "File is staged" )
return S_OK( True )
else:
return S_ERROR( 'An error occured while checking prestage status.' )
except gfal2.GError as e:
if e.code == errno.EAGAIN:
log.debug( "File not staged" )
return S_OK( False )
elif e.code == errno.ETIMEDOUT:
errStr = 'Polling request timed out'
log.debug( errStr )
return S_ERROR( e.code, "%s %s" % ( errStr, repr( e ) ) )
else:
errStr = "Error occured while polling for prestaging file"
log.debug( errStr, "%s %s" % ( path, repr( e ) ) )
return S_ERROR( e.code, "%s %s" % ( errStr, repr( e ) ) )
finally:
self.ctx.set_opt_boolean( "BDII", "ENABLE", False )
def pinFile( self, path, lifetime = 86400 ):
""" Pin a staged file
:param self: self reference
:param str path: path of list of paths to be pinned
:param int lifetime: pinning time in seconds (default 24h)
:return successful dict {url : token},
failed dict {url : message}
S_ERROR in case of argument problems
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( 'GFAL2_StorageBase.pinFile: Attempting to pin %s file(s).' % len( urls ) )
failed = {}
successful = {}
for url in urls:
res = self.__pinSingleFile( url, lifetime )
if not res['OK']:
failed[url] = res['Message']
else:
successful[url] = res['Value']
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def __pinSingleFile( self, path, lifetime ):
""" Pin a single staged file
:param self: self reference
:param str path: path to be pinned
:param int lifetime: pinning lifetime in seconds (default 24h)
:return: S_OK( token ) ) if status >= 0 (0 - staging is pending, 1 - file is pinned). EAGAIN is also considered pending
S_ERROR( errMsg ) ) in case of an error: status -1
"""
log = self.log.getSubLogger( "GFAL2_StorageBase.__pinSingleFile" )
log.debug( "Attempting to issue pinning request for single file: %s" % path )
try:
self.ctx.set_opt_boolean( "BDII", "ENABLE", True )
status, token = self.ctx.bring_online( path, lifetime, self.stageTimeout, True )
log.debug( "Pinning issued - Status: %s" % status )
if status >= 0:
return S_OK( token )
else:
return S_ERROR( 'An error occured while issuing pinning.' )
except gfal2.GError as e:
errStr = "GFAL2_StorageBase.__pinSingleFile: Error occured while pinning file"
log.debug( errStr, "%s %s" % ( path, repr( e ) ) )
return S_ERROR( e.code, "%s %s" % ( errStr, repr( e ) ) )
finally:
self.ctx.set_opt_boolean( "BDII", "ENABLE", False )
def releaseFile( self, path ):
""" Release a pinned file
:param self: self reference
:param str path: PFN path { pfn : token } - pfn can be an empty string, then all files that have that same token get released.
Just as you can pass an empty token string and a directory as pfn which then releases all the files in the directory
an its subdirectories
:return successful dict {url : token},
failed dict {url : message}
S_ERROR in case of argument problems
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "GFAL2_StorageBase.releaseFile: Attempting to release %s file(s)." % len( urls ) )
failed = {}
successful = {}
for path, token in urls.iteritems():
res = self.__releaseSingleFile( path, token )
if not res['OK']:
failed[path] = res['Message']
else:
successful[path] = res['Value']
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def __releaseSingleFile( self, path, token ):
""" release a single pinned file
:param self: self reference
:param str path: path to the file to be released
:token str token: token belonging to the path
:returns: S_OK( token ) when releasing was successful, S_ERROR( errMessage ) in case of an error
"""
log = self.log.getSubLogger( "GFAL2_StorageBase.__releaseSingleFile" )
log.debug( "Attempting to release single file: %s" % path )
if not isinstance( token, basestring ):
token = str( token )
try:
self.ctx.set_opt_boolean( "BDII", "ENABLE", True )
status = self.ctx.release( path, token )
if status >= 0:
return S_OK( token )
else:
errStr = "Error occured: Return status < 0"
log.debug( errStr, "path %s token %s" % ( path, token ) )
return S_ERROR( errStr )
except gfal2.GError as e:
errStr = "Error occured while releasing file"
self.log.debug( errStr, "%s %s" % ( path, repr( e ) ) )
return S_ERROR( e.code, "%s %s" % ( errStr, repr( e ) ) )
finally:
self.ctx.set_opt_boolean( "BDII", "ENABLE", False )
def __getChecksum( self, path, checksumType = None ):
""" Calculate the checksum (ADLER32 by default) of a file on the storage
:param self: self reference
:param str path: path to single file on storage (srm://...)
:returns: S_OK( checksum ) if checksum could be calculated
S_ERROR( errMsg ) if something failed
"""
log = self.log.getSubLogger( GFAL2_StorageBase.__getChecksum )
log.debug( 'Trying to calculate checksum of file %s' % path )
if not checksumType:
errStr = "No checksum type set by the storage element. Can't retrieve checksum"
log.debug( errStr, path )
return S_ERROR( errStr )
res = self.__isSingleFile( path )
if not res['OK']:
return res
try:
log.debug( "using %s checksum" % checksumType )
fileChecksum = self.ctx.checksum( path, checksumType )
return S_OK( fileChecksum )
except gfal2.GError as e:
errStr = 'Failed to calculate checksum.'
log.debug( errStr, repr( e ) )
return S_ERROR( e.code, "%s %s" % ( errStr, repr( e ) ) )
def __parseStatInfoFromApiOutput( self, statInfo ):
""" Fill the metaDict with the information obtained with gfal2.stat()
returns metaDict with following keys:
st_dev: ID of device containing file
st_ino: file serial number
st_mode: mode of file
st_nlink: number of links to the file
st_uid: user ID of file
st_gid: group ID of file
st_size: file size in bytes
st_atime: time of last access
st_mtime: time of last modification
st_ctime: time of last status chage
File (bool): whether object is a file or not
Directory (bool): whether object is a directory or not
"""
metaDict = {}
# to identify whether statInfo are from file or directory
metaDict['File'] = S_ISREG( statInfo.st_mode )
metaDict['Directory'] = S_ISDIR( statInfo.st_mode )
if metaDict['File'] :
metaDict['FileSerialNumber'] = statInfo.st_ino
metaDict['Mode'] = statInfo.st_mode & ( S_IRWXU | S_IRWXG | S_IRWXO )
metaDict['Links'] = statInfo.st_nlink
metaDict['UserID'] = statInfo.st_uid
metaDict['GroupID'] = statInfo.st_gid
metaDict['Size'] = long( statInfo.st_size )
metaDict['LastAccess'] = self.__convertTime( statInfo.st_atime ) if statInfo.st_atime else 'Never'
metaDict['ModTime'] = self.__convertTime( statInfo.st_mtime ) if statInfo.st_mtime else 'Never'
metaDict['StatusChange'] = self.__convertTime( statInfo.st_ctime ) if statInfo.st_ctime else 'Never'
metaDict['Executable'] = bool( statInfo.st_mode & S_IXUSR )
metaDict['Readable'] = bool( statInfo.st_mode & S_IRUSR )
metaDict['Writeable'] = bool( statInfo.st_mode & S_IWUSR )
elif metaDict['Directory']:
metaDict['Mode'] = statInfo.st_mode & ( S_IRWXU | S_IRWXG | S_IRWXO )
return metaDict
@staticmethod
def __convertTime( time ):
""" Converts unix time to proper time format
:param self: self reference
:param time: unix time
:return Date in following format: 2014-10-29 14:32:10
"""
return datetime.datetime.fromtimestamp( time ).strftime( '%Y-%m-%d %H:%M:%S' )
### methods for manipulating directories ###
def createDirectory( self, path ):
""" Create directory on the storage
:param self: self reference
:param str path: path to be created on the storage (pfn : srm://...)
:returns: Successful dict {path : True }
Failed dict {path : error message }
S_ERROR in case of argument problems
"""
urls = checkArgumentFormat( path )
if not urls['OK']:
return urls
urls = urls['Value']
successful = {}
failed = {}
self.log.debug( "createDirectory: Attempting to create %s directories." % len( urls ) )
for url in urls:
res = self.__createSingleDirectory( url )
if res['OK']:
successful[url] = True
else:
failed[url] = res['Message']
return S_OK( { 'Failed' : failed, 'Successful' : successful } )
def __createSingleDirectory( self, path ):
""" Create directory :path: on the storage
if no exception is caught the creation was successful. Also if the
directory already exists we return S_OK().
:param self: self reference
:param str path: path to be created (srm://...)
:returns: S_OK() if creation was successful or directory already exists
S_ERROR() in case of an error during creation
"""
log = self.log.getSubLogger( "GFAL2_StorageBase.__createSingleDirectory" )
try:
log.debug( "Creating %s" % path )
status = self.ctx.mkdir_rec( path, 755 )
if status >= 0:
log.debug( 'Successfully created directory' )
return S_OK()
else:
errStr = 'Failled to create directory. Status return > 0.'
log.debug( errStr, status )
return S_ERROR( errStr )
except gfal2.GError as e:
# error: directory already exists
if e.code == errno.EEXIST: # or e.code == errno.EACCES:
log.debug( "Directory already exists" )
return S_OK()
# any other error: failed to create directory
else:
errStr = "Failed to create directory."
log.debug( errStr, repr( e ) )
return S_ERROR( e.code, repr( e ) )
def isDirectory( self, path ):
""" check if the path provided is a directory or not
:param self: self reference
:param str: path or list of paths to be checked ( 'srm://...')
:returns: dict 'Failed' : failed, 'Successful' : succesful
S_ERROR in case of argument problems
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "GFAL2_StorageBase.isDirectory: checking whether %s path(s) are directory(ies)." % len( urls ) )
successful = {}
failed = {}
for url in urls:
res = self.__isSingleDirectory( url )
if res['OK']:
successful[url] = res['Value']
else:
failed[url] = res['Message']
resDict = { 'Failed' : failed, 'Successful' : successful }
return S_OK( resDict )
def __isSingleDirectory( self, path ):
""" Checking if :path: exists and is a directory
:param self: self reference
:param str path: single path on the storage (srm://...)
:returns: S_OK ( boolean) if it is a directory or not
S_ERROR ( errStr ) when there was a problem getting the info
"""
log = self.log.getSubLogger( "GFAL2_StorageBase.__isSingleDirectory" )
log.debug( "Determining whether %s is a directory or not." % path )
try:
statInfo = self.ctx.stat( path )
# instead of return S_OK( S_ISDIR( statInfo.st_mode ) ) we use if/else. So we can use the log.
if S_ISDIR( statInfo.st_mode ):
log.debug( "Path is a directory" )
return S_OK ( True )
else:
log.debug( "Path is not a directory" )
return S_OK( False )
except gfal2.GError as e:
errStr = "Failed to determine if path %s is a directory." % path
log.debug( errStr, repr( e ) )
return S_ERROR( e.code, repr( e ) )
def listDirectory( self, path ):
""" List the content of the path provided
:param str path: single or list of paths (srm://...)
:return: failed dict {path : message }
successful dict { path : {'SubDirs' : subDirs, 'Files' : files} }.
They keys are the paths, the values are the dictionary 'SubDirs' and 'Files'.
Each are dictionaries with path as key and metadata as values
S_ERROR in case of argument problems
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "GFAL2_StorageBase.listDirectory: Attempting to list %s directories" % len( urls ) )
res = self.isDirectory( urls )
if not res['OK']:
return res
successful = {}
failed = res['Value']['Failed']
directories = []
for url, isDirectory in res['Value']['Successful'].iteritems():
if isDirectory:
directories.append( url )
else:
errStr = "GFAL2_StorageBase.listDirectory: path is not a directory"
gLogger.error( errStr, url )
failed[url] = errStr
for directory in directories:
res = self.__listSingleDirectory( directory )
if not res['OK']:
failed[directory] = res['Message']
else:
successful[directory] = res['Value']
resDict = { 'Failed' : failed, 'Successful' : successful }
return S_OK( resDict )
def __listSingleDirectory( self, path, internalCall = False ):
""" List the content of the single directory provided
:param self: self reference
:param str path: single path on storage (srm://...)
:param bool internalCall: if we call this method from another internal method we want
to work with the full pfn. Used for __getSingleDirectory and
__removeSingleDirectory
:returns: S_ERROR( errStr ) if there is an error
S_OK( dictionary ): Key: SubDirs and Files
The values of the Files are dictionaries with filename as key and metadata as value
The values of SubDirs are just the dirnames as key and True as value
"""
log = self.log.getSubLogger( "GFAL2_StorageBase.__listSingleDirectory" )
log.debug( "Attempting to list content of %s" % path )
try:
listing = self.ctx.listdir( path )
except gfal2.GError as e:
errStr = 'Could not list directory content.'
log.debug( errStr, e.message )
return S_ERROR( e.code, "%s %s" % ( errStr, repr( e ) ) )
files = {}
subDirs = {}
res = pfnparse( path, srmSpecific = self.srmSpecificParse )
if not res['OK']:
return res
pathDict = res['Value']
for entry in listing:
nextEntry = dict( pathDict )
nextEntry['FileName'] = os.path.join( pathDict['FileName'], entry )
res = pfnunparse( nextEntry, srmSpecific = self.srmSpecificParse )
if not res['OK']:
log.debug( "Cannot generate url for next entry", res )
continue
nextUrl = res['Value']
res = self.__getSingleMetadata( nextUrl )
if res['OK']:
metadataDict = res['Value']
if internalCall:
subPathLFN = nextUrl
else:
# If it is not an internal call, we return the LFN
# We cannot use a simple replace because of the double slash
# that might be at the start
basePath = os.path.normpath( self.protocolParameters['Path'] )
startBase = nextEntry['Path'].find( basePath )
lfnStart = nextEntry['Path'][startBase + len( basePath ):]
if not lfnStart:
lfnStart = '/'
subPathLFN = os.path.join( lfnStart , nextEntry['FileName'] )
if metadataDict['Directory']:
subDirs[subPathLFN] = metadataDict
elif metadataDict['File']:
files[subPathLFN] = metadataDict
else:
log.debug( "Found item which is neither file nor directory", nextUrl )
else:
log.debug( "Could not stat content", "%s %s" % ( nextUrl, res['Message'] ) )
return S_OK( {'SubDirs' : subDirs, 'Files' : files} )
def getDirectory( self, path, localPath = False ):
""" get a directory from the SE to a local path with all its files and subdirectories
:param str path: path (or list of paths) on the storage (srm://...)
:param str localPath: local path where the content of the remote directory will be saved,
if not defined it takes current working directory.
:return: successful and failed dictionaries. The keys are the paths,
the values are dictionary {'Files': amount of files downloaded, 'Size' : amount of data downloaded}
S_ERROR in case of argument problems
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
log = self.log.getSubLogger( "GFAL2_StorageBase.getDirectory" )
log.debug( "Attempting to get local copies of %s directories. %s" % ( len( urls ), urls ) )
failed = {}
successful = {}
for src_dir in urls:
res = pfnparse( src_dir, srmSpecific = self.srmSpecificParse )
if not res['OK']:
log.debug( "cannot parse src_url", res )
continue
srcUrlDict = res['Value']
dirName = srcUrlDict['FileName']
dest_dir = os.path.join( localPath if localPath else os.getcwd(), dirName )
res = self.__getSingleDirectory( src_dir, dest_dir )
if res['OK']:
if res['Value']['AllGot']:
log.debug( "Successfully got local copy of %s" % src_dir )
successful[src_dir] = {'Files':res['Value']['Files'], 'Size':res['Value']['Size']}
else:
log.debug( "Failed to get entire directory.", src_dir )
failed[src_dir] = {'Files':res['Value']['Files'], 'Size':res['Value']['Size']}
else:
log.debug( "Completely failed to get local copy of directory.", src_dir )
failed[src_dir] = {'Files':0, 'Size':0}
return S_OK( {'Failed' : failed, 'Successful' : successful } )
def __getSingleDirectory( self, src_dir, dest_dir ):
"""Download a single directory recursively
:param self: self reference
:param src_dir : remote directory to download (srm://...)
:param dest_dir: local destination path
:returns: S_ERROR if there is a fatal error
S_OK if we could download something :
'AllGot': boolean of whether we could download everything
'Files': amount of files received
'Size': amount of data received
"""
log = self.log.getSubLogger( "GFAL2_StorageBase.__getSingleDirectory" )
log.debug( "Attempting to download directory %s at %s" % ( src_dir, dest_dir ) )
filesReceived = 0
sizeReceived = 0
res = self.__isSingleDirectory( src_dir )
if not res['OK']:
log.debug( "Failed to find the source directory: %s %s" % ( res['Message'], src_dir ) )
return res
# res['Value'] is False if it's not a directory
if not res['Value']:
errStr = 'The path provided is not a directory'
log.debug( errStr, src_dir )
return S_ERROR( errno.ENOTDIR, errStr )
if not os.path.exists( dest_dir ):
try:
os.makedirs( dest_dir )
except OSError, error:
errStr = 'Error trying to create destination directory %s' % error
log.exception( errStr, lException = error )
return S_ERROR( errStr )
# Get the remote directory contents
res = self.__listSingleDirectory( src_dir, internalCall = True )
if not res['OK']:
return res
sFilesDict = res['Value']['Files']
subDirsDict = res['Value']['SubDirs']
# Get all the files in the directory
receivedAllFiles = True
log.debug( 'Trying to download the %s files' % len( sFilesDict ) )
for sFile in sFilesDict:
# Getting the last filename
res = pfnparse( sFile, srmSpecific = self.srmSpecificParse )
if not res['OK']:
log.debug( "Cannot unparse target file. Skipping", res )
receivedAllFiles = False
continue
filename = res['Value']['FileName']
# Returns S_OK(fileSize) if successful
res = self._getSingleFile( sFile, os.path.join( dest_dir, filename ), disableChecksum = self.disableTransferChecksum )
if res['OK']:
filesReceived += 1
sizeReceived += res['Value']
else:
receivedAllFiles = False
# recursion to get contents of sub directoryies
receivedAllDirs = True
log.debug( 'Trying to recursively download the %s directories' % len( subDirsDict ) )
for subDir in subDirsDict:
# Getting the last filename
res = pfnparse( subDir, srmSpecific = self.srmSpecificParse )
if not res['OK']:
log.debug( "Cannot unparse target dir. Skipping", res )
receivedAllDirs = False
continue
subDirName = res['Value']['FileName']
localPath = os.path.join( dest_dir, subDirName )
res = self.__getSingleDirectory( subDir, localPath )
if not res['OK']:
receivedAllDirs = False
else:
if not res['Value']['AllGot']:
receivedAllDirs = False
filesReceived += res['Value']['Files']
sizeReceived += res['Value']['Size']
allGot = receivedAllDirs and receivedAllFiles
resDict = { 'AllGot' : allGot, 'Files' : filesReceived, 'Size' : sizeReceived }
return S_OK( resDict )
def putDirectory( self, path ):
""" Puts one or more local directories to the physical storage together with all its files
:param self: self reference
:param str path: dictionary { srm://... (destination) : localdir (source dir) }
:return: successful and failed dictionaries. The keys are the paths,
the values are dictionary {'Files' : amount of files uploaded, 'Size' : amount of data upload }
S_ERROR in case of argument problems
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
log = self.log.getSubLogger( "GFAL2_StorageBase.putDirectory" )
log.debug( 'Attempting to put %s directories to remote storage' % len( urls ) )
successful = {}
failed = {}
for destDir, sourceDir in urls.iteritems():
if not sourceDir:
errStr = 'No source directory set, make sure the input format is correct { dest. dir : source dir }'
return S_ERROR( errStr )
res = self.__putSingleDirectory( sourceDir, destDir )
if res['OK']:
if res['Value']['AllPut']:
log.debug( "Successfully put directory to remote storage: %s" % destDir )
successful[destDir] = { 'Files' : res['Value']['Files'], 'Size' : res['Value']['Size']}
else:
log.debug( "Failed to put entire directory to remote storage.", destDir )
failed[destDir] = { 'Files' : res['Value']['Files'], 'Size' : res['Value']['Size']}
else:
log.debug( "Completely failed to put directory to remote storage.", destDir )
failed[destDir] = { "Files" : 0, "Size" : 0 }
return S_OK( { "Failed" : failed, "Successful" : successful } )
def __putSingleDirectory( self, src_directory, dest_directory ):
""" puts one local directory to the physical storage together with all its files and subdirectories
:param self: self reference
:param src_directory : the local directory to copy
:param dest_directory: pfn (srm://...) where to copy
:returns: S_ERROR if there is a fatal error
S_OK if we could upload something :
'AllPut': boolean of whether we could upload everything
'Files': amount of files uploaded
'Size': amount of data uploaded
"""
log = self.log.getSubLogger( "GFAL2_StorageBase.__putSingleDirectory" )
log.debug( 'Trying to upload %s to %s' % ( src_directory, dest_directory ) )
filesPut = 0
sizePut = 0
if not os.path.isdir( src_directory ):
errStr = 'The supplied source directory does not exist or is not a directory.'
log.debug( errStr, src_directory )
return S_ERROR( errno.ENOENT, errStr )
contents = os.listdir( src_directory )
allSuccessful = True
directoryFiles = {}
res = pfnparse( dest_directory, srmSpecific = self.srmSpecificParse )
if not res['OK']:
return res
destDirParse = res['Value']
for fileName in contents:
localPath = os.path.join( src_directory, fileName )
nextUrlDict = dict( destDirParse )
nextUrlDict['FileName'] = os.path.join( destDirParse['FileName'], fileName )
res = pfnunparse( nextUrlDict, srmSpecific = self.srmSpecificParse )
if not res['OK']:
log.debug( "Cannot unparse next url dict. Skipping", res )
allSuccessful = False
continue
remoteUrl = res['Value']
# if localPath is not a directory put it to the files dict that needs to be uploaded
if not os.path.isdir( localPath ):
directoryFiles[remoteUrl] = localPath
# localPath is another folder, start recursion
else:
res = self.__putSingleDirectory( localPath, remoteUrl )
if not res['OK']:
log.debug( 'Failed to put directory to storage. Skipping', res['Message'] )
allSuccessful = False
else:
if not res['Value']['AllPut']:
allSuccessful = False
filesPut += res['Value']['Files']
sizePut += res['Value']['Size']
if directoryFiles:
res = self.putFile( directoryFiles )
if not res['OK']:
log.debug( 'Failed to put files to storage.', res['Message'] )
allSuccessful = False
else:
for fileSize in res['Value']['Successful'].itervalues():
filesPut += 1
sizePut += fileSize
if res['Value']['Failed']:
allSuccessful = False
return S_OK( { 'AllPut' : allSuccessful, 'Files' : filesPut, 'Size' : sizePut} )
def removeDirectory( self, path, recursive = False ):
"""Remove a directory on the physical storage together with all its files and
subdirectories.
:param path: single or list of path (srm://..)
:param recursive: if True, we recursively delete the subdir
:return: successful and failed dictionaries. The keys are the pathes,
the values are dictionary {'Files': amount of files deleted, 'Size': amount of data deleted}
S_ERROR in case of argument problems
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
log = self.log.getSubLogger( "GFAL2_StorageBase.removeDirectory" )
log.debug( "Attempting to remove %s directories." % len( urls ) )
successful = {}
failed = {}
for url in urls:
res = self.__removeSingleDirectory( url, recursive )
if res['OK']:
if res['Value']['AllRemoved']:
log.debug( "Successfully removed %s" % url )
successful[url] = {'FilesRemoved':res['Value']['FilesRemoved'], 'SizeRemoved':res['Value']['SizeRemoved']}
else:
log.debug( "Failed to remove entire directory.", path )
failed[url] = {'FilesRemoved':res['Value']['FilesRemoved'], 'SizeRemoved':res['Value']['SizeRemoved']}
else:
log.debug( "Completely failed to remove directory.", url )
failed[url] = res['Message'] # {'FilesRemoved':0, 'SizeRemoved':0}
return S_OK( {'Failed' : failed, 'Successful' : successful } )
def __removeSingleDirectory( self, path, recursive = False ):
"""Remove a directory on the physical storage together with all its files and
subdirectories.
:param path: pfn (srm://...) of a directory to remove
:param recursive : if True, we recursively delete the subdir
:returns: S_ERROR if there is a fatal error
S_OK (statistics dictionary ) if we could upload something :
'AllRemoved': boolean of whether we could delete everything
'FilesRemoved': amount of files deleted
'SizeRemoved': amount of data deleted
"""
log = self.log.getSubLogger( "GFAL2_StorageBase.__removeSingleDirectory" )
filesRemoved = 0
sizeRemoved = 0
# Check the remote directory exists
res = self.__isSingleDirectory( path )
if not res['OK']:
return res
# res['Value'] is True if it is a directory
if not res['Value']:
errStr = "The supplied path is not a directory."
log.debug( errStr, path )
return S_ERROR( errno.ENOTDIR, errStr )
# Get the remote directory contents
res = self.__listSingleDirectory( path, internalCall = True )
if not res['OK']:
return res
sFilesDict = res['Value']['Files']
subDirsDict = res['Value']['SubDirs']
removedAllFiles = True
removedAllDirs = True
# if recursive, we call ourselves on all the subdirs
if recursive:
# Recursively remove the sub directories
log.debug( "Trying to recursively remove %s folder." % len( subDirsDict ) )
for subDirUrl in subDirsDict:
res = self.__removeSingleDirectory( subDirUrl, recursive )
if not res['OK']:
log.debug( "Recursive removal failed", res )
removedAllDirs = False
else:
if not res['Value']['AllRemoved']:
removedAllDirs = False
filesRemoved += res['Value']['FilesRemoved']
sizeRemoved += res['Value']['SizeRemoved']
# Remove all the files in the directory
log.debug( "Trying to remove %s files." % len( sFilesDict ) )
for sFile in sFilesDict:
# Returns S__OK(Filesize) if it worked
res = self.__removeSingleFile( sFile )
if res['OK']:
filesRemoved += 1
sizeRemoved += sFilesDict[sFile]['Size']
else:
removedAllFiles = False
# Check whether all the operations were successful
allRemoved = removedAllDirs and removedAllFiles
# Now we try to remove the directory itself
# We do it only if :
# If we wanted to remove recursively and everything was deleted
# We didn't want to remove recursively but we deleted all the files and there are no subfolders
if ( recursive and allRemoved ) or ( not recursive and removedAllFiles and not subDirsDict ):
try:
status = self.ctx.rmdir( path )
if status < 0:
errStr = "Error occured while removing directory. Status: %s" % status
log.debug( errStr )
allRemoved = False
except gfal2.GError as e:
# How would that be possible...
if e.code == errno.ENOENT:
errStr = 'Files does not exist'
log.debug( errStr )
else:
errStr = 'Failed to remove directory %s' % path
log.debug( errStr )
allRemoved = False
resDict = {'AllRemoved': allRemoved, 'FilesRemoved': filesRemoved, 'SizeRemoved': sizeRemoved}
return S_OK( resDict )
def getDirectorySize( self, path ):
""" Get the size of the directory on the storage
.. warning:: it is not recursive
:param self: self reference
:param str path: path or list of paths on storage (srm://...)
:returns: list of successful and failed dictionaries, both indexed by the path
* In the failed, the value is the error message
* In the successful the values are dictionaries:
* Files : amount of files in the dir
* Size : summed up size of all files
* subDirs : amount of sub dirs
* S_ERROR in case of argument problems
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( 'GFAL2_StorageBase.getDirectorySize: Attempting to get size of %s directories' % len( urls ) )
failed = {}
successful = {}
for url in urls:
res = self.__getSingleDirectorySize( url )
if not res['OK']:
failed[url] = res['Message']
else:
successful[url] = res['Value']
return S_OK( { 'Failed' : failed, 'Successful' : successful} )
def __getSingleDirectorySize( self, path ):
""" Get the size of the directory on the storage
CAUTION : the size is not recursive, and does not go into subfolders
:param self: self reference
:param path: path (single) on storage (srm://...)
:return: S_ERROR in case of problem
S_OK (Dictionary) Files : amount of files in the directory
Size : summed up size of files
subDirs : amount of sub directories
"""
self.log.debug( "GFAL2_StorageBase.__getSingleDirectorySize: Attempting to get the size of directory %s" % path )
res = self.__listSingleDirectory( path )
if not res['OK']:
return res
directorySize = 0
directoryFiles = 0
# itervalues returns a list of values of the dictionnary
for fileDict in res['Value']['Files'].itervalues():
directorySize += fileDict['Size']
directoryFiles += 1
self.log.debug( "GFAL2_StorageBase.__getSingleDirectorySize: Successfully obtained size of %s." % path )
subDirectories = len( res['Value']['SubDirs'] )
return S_OK( { 'Files' : directoryFiles, 'Size' : directorySize, 'SubDirs' : subDirectories } )
def getDirectoryMetadata( self, path ):
""" Get metadata for the directory(ies) provided
:param self: self reference
:param str path: path (or list of paths) on storage (srm://...)
:returns: Successful dict {path : metadata}
Failed dict {path : errStr}
S_ERROR in case of argument problems
"""
res = checkArgumentFormat( path )
if not res['OK']:
return res
urls = res['Value']
self.log.debug( "GFAL2_StorageBase.getDirectoryMetadata: Attempting to fetch metadata." )
failed = {}
successful = {}
for url in urls:
res = self.__getSingleDirectoryMetadata( url )
if not res['OK']:
failed[url] = res['Message']
else:
successful[url] = res['Value']
return S_OK( { 'Failed' : failed, 'Successful' : successful} )
def __getSingleDirectoryMetadata( self, path ):
""" Fetch the metadata of the provided path
:param self: self reference
:param str path: path (only 1) on the storage (srm://...)
:returns: S_OK( metadataDict ) if we could get the metadata
S_ERROR( errStr )if there was a problem getting the metadata or path isn't a directory
"""
self.log.debug( "GFAL2_StorageBase.__getSingleDirectoryMetadata: Fetching metadata of directory %s." % path )
res = self.__getSingleMetadata( path )
if not res['OK']:
return res
metadataDict = res['Value']
if not metadataDict['Directory']:
errStr = "GFAL2_StorageBase.__getSingleDirectoryMetadata: Provided path is not a directory."
self.log.debug( errStr, path )
return S_ERROR( errno.ENOTDIR, errStr )
return S_OK( metadataDict )
### methods for manipulating the client ###
#
# def isPfnForProtocol( self, *parms, **kws ):
# """ check if PFN :pfn: is valid for :self.protocol:
# """
# return S_ERROR( "GFAL2_StorageBase.isPfnForProtocol: Implement me!" )
##################################################################
#
# ALL INHERITED FROM StorageBase.py
#
##################################################################
# def isOK( self ):
# return self.isok
#
# def changeDirectory( self, newdir ):
# """ Change the current directory
# """
# self.cwd = newdir
# return S_OK()
#
# def getCurrentDirectory( self ):
# """ Get the current directory
# """
# return S_OK( self.cwd )
#
# def getName( self ):
# """ The name with which the storage was instantiated
# """
# return S_OK( self.name )
#
# def setParameters( self, parameters ):
# """ Set extra storage parameters, non-mandatory method
# """
# return S_OK()
##################################################################
def _getExtendedAttributes( self, path, attributes = None ):
""" Get all the available extended attributes of path
:param self: self reference
:param str path: path of which we wan't extended attributes
:param str list attributes: list of extended attributes we want to receive
:return: S_OK( attributeDict ) if successful. Where the keys of the dict are the attributes and values the respective values
"""
log = self.log.getSubLogger( "GFAL2_StorageBase._getExtendedAttributes" )
log.debug( "Checking %s attributes for %s" % ( attributes, path ) )
attributeDict = {}
# get all the extended attributes from path
try:
if not attributes:
attributes = self.ctx.listxattr( path )
# get all the respective values of the extended attributes of path
for attribute in attributes:
log.debug( "Fetching %s" % attribute )
attributeDict[attribute] = self.ctx.getxattr( path, attribute )
return S_OK( attributeDict )
# simple error messages, the method that is calling them adds the source of error.
except gfal2.GError as e:
errStr = 'Something went wrong while checking for extended attributes.'
log.debug( errStr, e.message )
return S_ERROR( e.code, "%s %s" % ( errStr, repr( e ) ) )
| gpl-3.0 | 5,363,154,736,559,909,000 | 34.199688 | 137 | 0.615861 | false |
kennethreitz-archive/macspoof | macspoof/interfaces.py | 1 | 1440 | import netifaces
import os
BLACK_ID = ('lo0', 'gif0')
BLACK_MAC = ('', '::1')
class Interface(object):
"""Network Interface Object"""
def __init__(self, id):
super(Interface, self).__init__()
self.id = id
_meta = netifaces.ifaddresses(self.id)
self.mac = _meta[18][0]['addr']
try:
self.broadcase = _meta[2][0]['broadcast']
self.netmask = _meta[2][0]['netmask']
self.addr = _meta[2][0]['addr']
except KeyError, e:
pass
try:
self.ip6_netmask = _meta[30][0]['netmask']
self.ip6_addr = _meta[30][0]['addr']
except KeyError, e:
pass
def __repr__(self):
return ('<interface: %s>' % self.id)
def __str__(self):
return self.id
def spoof(self, mac, air=False):
"""Spoofs given MAC address"""
if air:
os.system(
'sudo '
'/System/Library/PrivateFrameworks'
'/Apple80211.framework/Versions'
'/Current/Resources/airport -z'
)
_status = os.system('sudo ifconfig %s ether %s' % (self.id, mac))
return 'Interface %s (%s) => (%s)' % (self.id, self.mac, mac)
def fetch():
"""Returns a list of interfaces objects."""
_interfaces = [Interface(iface) for iface in netifaces.interfaces()]
for iface in _interfaces:
if (iface.id in BLACK_ID) or (iface.mac in BLACK_MAC) or (len(iface.mac) < 5):
_interfaces.remove(iface)
return _interfaces
def list():
"""Lists available interface names."""
return netifaces.interfaces()
| mit | -7,330,067,767,098,584,000 | 19.28169 | 80 | 0.609028 | false |
bbulkow/MagnusFlora | flask/led_task.py | 1 | 2997 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# led_task.py
#
# This module provides a Clery task interface to set specified led states
# via the Fadecandy server
#
# the functions in this module should interact directly with Fadecandy
#
# the Celery tasks defined in this module should be intended to be run as the
# result of a http request, and may use state shared in redis to enable
# external control of running tasks.
#
import time
from celery import Celery
import redis
import opc
REDIS = redis.StrictRedis(host='localhost', port=6379, db=0)
app = Celery('flower_web', backend ='redis://localhost:6379/1', broker='redis://localhost:6379/0')
app.conf.CELERY_ACCEPT_CONTENT = ['pickle', 'json']
app.conf.TASK_SERIALIZER = ['pickle']
class FCInterface():
'''manages the configuration for a FadeCandy interface'''
def __init__(self, entity_name, led_count, opc_address):
self._name = entity_name
self._count = led_count
self.state_key = entity_name+'/led_state'
self.chase_key = entity_name+'/led_run'
self.opc = opc_address
self.DEBUG = True
self.set_state('initialized', True)
@property
def state(self):
return REDIS.hgetall(self.state_key)
def set_state(self, key, value):
REDIS.hmset(self.state_key, {key:value})
# the tasks are defined outside of the class, due to a limitation in Celery
# each task takes an "objIn" argument, which behaves like the 'self' argument
# for instance methods. The serializer argument specifies how the objIn is
# sent into this method from other processes
@app.task(serializer='pickle')
def dochase(objIn, numChase):
'''an example long-running task with external control.
Light each LED in sequence, and repeat.'''
objIn.set_state("chase","running")
REDIS.set(objIn.chase_key, 1)
while REDIS.get(objIn.chase_key) != '0': #redis values come back as strings
for i in xrange(objIn._count):
pixels = [ (150,50,50) ] * objIn._count
# pixels[i] = (5, 5, 155)
# pool = cycle(pixels)
for j in xrange(numChase):
if i+j <= objIn._count:
pixels[i+j-1] = (5, 5, 155)
if objIn.DEBUG:
print(i+j)
else:
objIn.opc.put_pixels(pixels)
time.sleep(0.3)
@app.task(serializer='pickle')
def stop_chase(objIn):
'''an example short running task, which modifies shared state'''
objIn.set_state("chase", "stopped")
REDIS.set(objIn.chase_key, 0)
@app.task(serializer='pickle')
def get_state(objIn):
'''an example task with a return value'''
return objIn.state
# individual entities must be defined to be passed into the tasks
# it makes sense to do this in the same file that the tasks are defined in
# but we could break this out into a new file when we have a lot of entities
PETAL0 = FCInterface('PETAL0', 24, opc.Client('192.168.4.15:7890'))
| mit | 6,758,847,182,765,939,000 | 33.848837 | 98 | 0.650984 | false |
SUSE/azure-sdk-for-python | azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/operations/protection_policy_operation_results_operations.py | 2 | 4784 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
import uuid
from .. import models
class ProtectionPolicyOperationResultsOperations(object):
"""ProtectionPolicyOperationResultsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client Api Version. Constant value: "2016-12-01".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-12-01"
self.config = config
def get(
self, vault_name, resource_group_name, policy_name, operation_id, custom_headers=None, raw=False, **operation_config):
"""Provides the result of an operation.
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:param resource_group_name: The name of the resource group where the
recovery services vault is present.
:type resource_group_name: str
:param policy_name: Backup policy name whose operation's result needs
to be fetched.
:type policy_name: str
:param operation_id: Operation ID which represents the operation whose
result needs to be fetched.
:type operation_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ProtectionPolicyResource
<azure.mgmt.recoveryservicesbackup.models.ProtectionPolicyResource>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/Subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupPolicies/{policyName}/operationResults/{operationId}'
path_format_arguments = {
'vaultName': self._serialize.url("vault_name", vault_name, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'policyName': self._serialize.url("policy_name", policy_name, 'str'),
'operationId': self._serialize.url("operation_id", operation_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ProtectionPolicyResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
| mit | 3,101,230,408,185,767,400 | 43.71028 | 197 | 0.6551 | false |
chjp2046/proxygen | proxygen/lib/utils/gen_trace_event_constants.py | 22 | 6840 | #!/bin/env python
# @lint-avoid-python-3-compatibility-imports
import os
import re
import sys
import optparse
def main(argv):
# args parser
parser = optparse.OptionParser()
parser.add_option("--install_dir", dest="install_dir", type="string",
default=None, help="Absolute path to generate files")
parser.add_option("--fbcode_dir", dest="fbcode_dir", type="string",
default=None, help="Absolute path to fbcode directory")
parser.add_option("--input_files", dest="input_files", type="string",
default=None, help="Relative path of input file")
parser.add_option("--output_scope", dest="output_scope", type="string",
default=None, help="namespace / package of output file")
parser.add_option("--output_type", dest="output_type", type="choice",
choices=["java", "cpp"], default=None,
help="File type to generate")
parser.add_option("--header_path", dest="header_path", type="string",
default=None, help="Relative path to cpp header")
options, _ = parser.parse_args()
assert options.install_dir is not None, "Missing arg: --install_dir"
assert options.fbcode_dir is not None, "Missing arg: --fbcode_dir"
assert options.input_files is not None, "Missing arg: --input_files"
assert options.output_scope is not None, "Missing arg: --output_scope"
assert options.output_type is not None, "Missing arg: --output_type"
file_names = options.input_files.split(",")
for file_name in file_names:
# strip the file extension and use the file name for class name
class_name = os.path.basename(file_name).split(".")[0]
# parse items from source
items = []
with open(file_name, 'r') as inf:
for line in inf:
sp = re.match(r'(.*), \"(.*)\"', line, re.I)
if sp:
items.append((sp.group(1), sp.group(2)))
if options.output_type == "java":
gen_java(items,
class_name,
options.install_dir,
options.output_scope)
elif options.output_type == "cpp":
assert options.header_path is not None, "Missing arg: --header_path"
gen_cpp_header(items,
class_name,
options.install_dir,
options.output_scope)
gen_cpp_source(items,
class_name,
options.install_dir,
options.header_path,
options.output_scope)
"""
Generate java interface class
"""
def gen_java(items, class_name, install_dir, output_scope):
packages = output_scope.split(".")
file_name = "%s.java" % class_name
file_path = os.path.join(*([install_dir, "src"] + packages))
output_file = os.path.join(file_path, file_name)
if not os.path.exists(file_path):
os.makedirs(file_path)
with open(output_file, 'w+') as outf:
outf.write('// Copyright 2004-present Facebook. All Rights Reserved.\n')
outf.write('// ** AUTOGENERATED FILE. DO NOT HAND-EDIT **\n\n')
outf.write('package %s;\n\n' % ".".join(packages))
outf.write('public interface %s {\n' % class_name)
for item in items:
outf.write(' public static final String %s = "%s";\n' %
(item[0], item[1]))
outf.write('}\n')
"""
Generate cpp enum class and provide convert function from / to string
"""
def gen_cpp_header(items, class_name, install_dir, output_scope):
namespaces = output_scope.split("::")
file_name = "%s.h" % class_name
output_file = os.path.join(install_dir, file_name)
with open(output_file, 'w+') as outf:
outf.write('// Copyright 2004-present Facebook. All Rights Reserved.\n')
outf.write('// ** AUTOGENERATED FILE. DO NOT HAND-EDIT **\n\n')
outf.write('#pragma once\n\n')
outf.write('#include <string>\n\n')
for ns in namespaces:
outf.write('namespace %s { ' % ns)
outf.write('\n\n')
# generate enum class
outf.write('enum class %s {\n' % class_name)
for item in items:
outf.write(' %s,\n' % item[0])
outf.write('};\n\n')
# enum to string convert function
outf.write('extern const std::string& get%sString(%s);\n'
% (class_name, class_name))
outf.write('extern %s get%sFromString(const std::string&);\n'
% (class_name, class_name))
for ns in namespaces:
outf.write('}')
outf.write('\n\n')
"""
Generate cpp const string and implement convert function
"""
def gen_cpp_source(items, class_name, install_dir, header_path, output_scope):
namespaces = output_scope.split("::")
file_name = "%s.cpp" % class_name
output_file = os.path.join(install_dir, file_name)
with open(output_file, 'w+') as outf:
outf.write('// Copyright 2004-present Facebook. All Rights Reserved.\n')
outf.write('// ** AUTOGENERATED FILE. DO NOT HAND-EDIT **\n\n')
outf.write('#include "%s/%s.h"\n\n' % (header_path, class_name))
outf.write('#include <stdexcept>\n\n')
for ns in namespaces:
outf.write('namespace %s { ' % ns)
outf.write('\n\n')
# const string names
for item in items:
outf.write('static const std::string k%s%s = "%s";\n' %
(class_name, item[0], item[1]))
# generate enum to string convert function
outf.write('const std::string& get%sString(%s type) {\n' %
(class_name, class_name))
outf.write(' static const std::string k%sInvalidType = "";\n' %
class_name)
outf.write('\n switch (type) {\n')
for item in items:
outf.write(' case %s::%s : return k%s%s;\n' %
(class_name, item[0], class_name, item[0]))
outf.write(' }\n')
outf.write(' return k%sInvalidType;\n' % class_name)
outf.write('};\n\n')
outf.write(' %s get%sFromString(const std::string& str) {\n'
% (class_name, class_name))
for item in items:
outf.write(' if (str == k%s%s) return %s::%s;\n' %
(class_name, item[0], class_name, item[0]))
outf.write(' throw std::invalid_argument'
' ("No matching %s from string");\n'
% (class_name))
outf.write('};\n')
outf.write('\n\n')
for ns in namespaces:
outf.write('}')
outf.write('\n\n')
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause | -1,223,221,361,531,553,800 | 38.085714 | 80 | 0.54883 | false |
hansmeets/bio2rdf-scripts | linkedSPLs/LinkedSPLs-update/load-dailymed-spls/dailymed_rss.py | 1 | 4530 | '''
Created 03/15/2012
@auther: gag30
@summary: Download and extract all of the xml files for drugs updated on Dailymed within the past 7 days.
Extracted xml files are saved to ./spls and ./spls/updates, both of which are created if they
don't exist.
'''
import feedparser
from lxml.html import fromstring
import os
import shutil
import string
import sys
import time
import urllib
import urllib2
from zipfile import ZipFile, is_zipfile
import pdb
## Remove the zip files that were downloaded from Dailymed/any
## contents of tmpdir
def clean(tmpdir):
files = [tmpdir + f for f in os.listdir(tmpdir)]
for f in files:
try:
os.remove(f)
except OSError, err:
print "Couldn't delete " + f + ": " + err
try:
os.rmdir(tmpdir)
except OSError, err:
print "Couldn't delete " + tmpdir + ": " + err
## After the zip files have been downloaded and extracted to
## the temp folder, copy them to ./updateDir
## (./spls/updates by default)
def copy_xml(tmpdir, spldir):
tmpFiles = [tmpdir + f for f in os.listdir(tmpdir)]
updateDir = os.path.join(spldir, "updates")
for f in os.listdir(updateDir):
os.remove(os.path.join(updateDir, f))
for tmpFile in tmpFiles:
if tmpFile.endswith(".xml"):
shutil.copy(tmpFile, updateDir)
## Display download progress, updated on a single line
def dl_progress(current, total):
percent = int(float(current) / float(total) * 100)
message = " ".join([str(percent) + "%", "downloaded (" + str(current), "of", str(total) + ")"])
if percent == 100:
message += "...done\n"
sys.stdout.write("\r\x1b[K" + message)
sys.stdout.flush()
## No longer used
def get_download_name(title):
name = ""
for char in title:
if char in string.uppercase:
name += char
elif char == " ":
name += "%20"
else:
return name.strip("%20")
## Parse html for a link to xml file for a single drug
def get_xml_url(url):
usock = urllib2.urlopen(url)
html = usock.read()
usock.close()
baseurl = "http://dailymed.nlm.nih.gov"
root = fromstring(html)
for div in root.iter("div"):
if div.get("id") == "options":
for link in div.iter("a"):
href = link.get("href")
if "getFile.cfm?id" in href and "type=zip" in href:
return baseurl + href
return None
##Try to get the xml file url from url num
##times before failing
def get_xml_url_retry(url, num):
cnt = 0
while cnt < num:
cnt += 1
xmlUrl = get_xml_url(url)
if xmlUrl:
return xmlUrl
time.sleep(1)
return None
## Create a directory if one doesn't exist, else continue
def make_dir(name):
try:
os.mkdir(name)
except OSError:
pass
## Extract xml files from downloaded zip files
## to a temp dir
def unzip(tmpdir):
files = [tmpdir + f for f in os.listdir(tmpdir)]
for f in files:
try:
zipfile = ZipFile(f)
contents = zipfile.infolist()
for c in contents:
if c.filename[-4:] == ".xml":
zipfile.extract(c, tmpdir)
except:
print "Downloaded file {0} does not appear to be a zip file!".format(f)
sys.exit(1)
## Get the Dailymed rss feed for drugs updated within the past 7 days.
## Download the files for each, extract the spl and copy it to a
## master directory of spls and directory for spls contained in the
## current update. Delete downloaded files when finished.
def run():
TMPDIR = "tmp_spls/"
SPLDIR = "spls/"
rssparser = feedparser.parse('http://dailymed.nlm.nih.gov/dailymed/rss.cfm')
make_dir(TMPDIR)
make_dir(SPLDIR)
make_dir(os.path.join(SPLDIR, "updates"))
for ctr, entry in enumerate(rssparser['entries']):
#downloadURL = get_xml_url_retry(entry['link'], 3)
#downloadURL = downloadURL[:downloadURL.index("name=")]
setid = entry['id'].split('setid=')[1]
downloadURL = "http://dailymed.nlm.nih.gov/dailymed/downloadzipfile.cfm?setId={0}".format(setid)
#dailymedid = downloadURL.split("id=")[1].split("&")[0]
filename = os.path.join(TMPDIR, setid + ".zip")
urllib.urlretrieve(downloadURL, filename)
dl_progress(ctr+1, len(rssparser['entries']))
unzip(TMPDIR)
copy_xml(TMPDIR, SPLDIR)
clean(TMPDIR)
if __name__=="__main__":
run()
| mit | 1,745,702,138,065,895,700 | 30.027397 | 105 | 0.608609 | false |
nicholasserra/sentry | src/sentry/api/endpoints/group_events_latest.py | 3 | 1307 | from __future__ import absolute_import
from rest_framework.response import Response
from sentry.api import client
from sentry.api.base import DocSection
from sentry.api.bases.group import GroupEndpoint
from sentry.models import Group
from sentry.utils.apidocs import scenario, attach_scenarios
@scenario('GetLatestGroupSample')
def get_latest_group_sample_scenario(runner):
project = runner.default_project
group = Group.objects.filter(project=project).first()
runner.request(
method='GET',
path='/issues/%s/events/latest/' % group.id,
)
class GroupEventsLatestEndpoint(GroupEndpoint):
doc_section = DocSection.EVENTS
@attach_scenarios([get_latest_group_sample_scenario])
def get(self, request, group):
"""
Latest Sample
`````````````
Retrieves the details of the latest sample for an aggregate.
:pparam string group_id: the ID of the group to get the latest sample of.
"""
event = group.get_latest_event()
if not event:
return Response({'detail': 'No events found for group'}, status=404)
try:
return client.get('/events/{}/'.format(event.id), request=request)
except client.ApiError as e:
return Response(e.body, status=e.status_code)
| bsd-3-clause | 7,867,201,354,274,145,000 | 30.119048 | 81 | 0.672533 | false |
lptorres/noah-inasafe | web_api/safe/messaging/item/emphasized_text.py | 1 | 1990 | """
InaSAFE Disaster risk assessment tool developed by AusAid - **Paragraph.**
Contact : [email protected]
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = '[email protected]'
__revision__ = '$Format:%H$'
__date__ = '28/05/2013'
__copyright__ = ('Copyright 2012, Australia Indonesia Facility for '
'Disaster Reduction')
from text import Text
#FIXME (MB) remove when all to_* methods are implemented
#pylint: disable=W0223
class EmphasizedText(Text):
"""A class to model emphasized text in the messaging system """
def __init__(self, text, **kwargs):
"""Creates a Emphasized Text Text object
Args:
String message, a string to add to the message
Returns:
None
Raises:
Errors are propagated
We pass the kwargs on to the base class so an exception is raised
if invalid keywords were passed. See:
http://stackoverflow.com/questions/13124961/
how-to-pass-arguments-efficiently-kwargs-in-python
"""
super(EmphasizedText, self).__init__(**kwargs)
self.text = text
def to_html(self):
"""Render as html
Args:
None
Returns:
Str the html representation
Raises:
Errors are propagated
"""
return '<em%s>%s%s</em>' % (
self.html_attributes(), self.html_icon(), self.text)
def to_text(self):
"""Render as plain text
Args:
None
Returns:
Str the plain text representation
Raises:
Errors are propagated
"""
return '_%s_' % self.text
| gpl-3.0 | 5,129,373,317,291,118,000 | 24.533333 | 78 | 0.573367 | false |
signalnine/zxcvbn | data-scripts/build_frequency_lists.py | 5 | 7267 |
import os
import time
import codecs
import urllib
import urllib2
from pprint import pprint
SLEEP_TIME = 20 # seconds
def get_ranked_english():
'''
wikitionary has a list of ~40k English words, ranked by frequency of occurance in TV
and movie transcripts. more details at:
http://en.wiktionary.org/wiki/Wiktionary:Frequency_lists/TV/2006/explanation
the list is separated into pages of 1000 or 2000 terms each.
* the first 10k words are separated into pages of 1000 terms each.
* the remainder is separated into pages of 2000 terms each:
'''
URL_TMPL = 'http://en.wiktionary.org/wiki/Wiktionary:Frequency_lists/TV/2006/%s'
urls = []
for i in xrange(10):
freq_range = "%d-%d" % (i * 1000 + 1, (i+1) * 1000)
urls.append(URL_TMPL % freq_range)
for i in xrange(0,15):
freq_range = "%d-%d" % (10000 + 2 * i * 1000 + 1, 10000 + (2 * i + 2) * 1000)
urls.append(URL_TMPL % freq_range)
urls.append(URL_TMPL % '40001-41284')
ranked_terms = [] # ordered by rank, in decreasing frequency.
for url in urls:
html, is_cached = wiki_download(url)
if not is_cached:
time.sleep(SLEEP_TIME)
new_terms = parse_wiki_terms(html)
ranked_terms.extend(new_terms)
return ranked_terms
def wiki_download(url):
'''
scrape friendly: sleep 20 seconds between each request, cache each result.
'''
DOWNLOAD_TMPL = '../data/tv_and_movie_freqlist%s.html'
freq_range = url[url.rindex('/')+1:]
tmp_path = DOWNLOAD_TMPL % freq_range
if os.path.exists(tmp_path):
print 'cached.......', url
with codecs.open(tmp_path, 'r', 'utf8') as f:
return f.read(), True
with codecs.open(tmp_path, 'w', 'utf8') as f:
print 'downloading...', url
req = urllib2.Request(url, headers={
'User-Agent': 'zxcvbn'
})
response = urllib2.urlopen(req)
result = response.read().decode('utf8')
f.write(result)
return result, False
def parse_wiki_terms(doc):
'''who needs an html parser. fragile hax, but checks the result at the end'''
results = []
last3 = ['', '', '']
header = True
for line in doc.split('\n'):
last3.pop(0)
last3.append(line.strip())
if all(s.startswith('<td>') and not s == '<td></td>' for s in last3):
if header:
header = False
continue
last3 = [s.replace('<td>', '').replace('</td>', '').strip() for s in last3]
rank, term, count = last3
rank = int(rank.split()[0])
term = term.replace('</a>', '')
term = term[term.index('>')+1:].lower()
results.append(term)
assert len(results) in [1000, 2000, 1284] # early docs have 1k entries, later 2k, last 1284
return results
def get_ranked_census_names():
'''
takes name lists from the the 2000 us census, prepares as a json array in order of frequency
(most common names first).
more info:
http://www.census.gov/genealogy/www/data/2000surnames/index.html
files in data are downloaded copies of:
http://www.census.gov/genealogy/names/dist.all.last
http://www.census.gov/genealogy/names/dist.male.first
http://www.census.gov/genealogy/names/dist.female.first
'''
FILE_TMPL = '../data/us_census_2000_%s.txt'
# ie7 can't handle huge lists. cut surname list off at a certain percentile.
SURNAME_CUTOFF_PERCENTILE = 85
lists = []
for list_name in ['surnames', 'male_first', 'female_first']:
path = FILE_TMPL % list_name
lst = []
for line in codecs.open(path, 'r', 'utf8'):
if line.strip():
if list_name == 'surnames' and float(line.split()[2]) > SURNAME_CUTOFF_PERCENTILE:
break
name = line.split()[0].lower()
lst.append(name)
lists.append(lst)
return lists
def get_ranked_common_passwords():
lst = []
for line in codecs.open('../data/common_passwords.txt', 'r', 'utf8'):
if line.strip():
lst.append(line.strip())
return lst
def to_ranked_dict(lst):
return dict((word, i) for i, word in enumerate(lst))
def filter_short(terms):
'''
only keep if brute-force possibilities are greater than this word's rank in the dictionary
'''
return [term for i, term in enumerate(terms) if 26**(len(term)) > i]
def filter_dup(lst, lists):
'''
filters lst to only include terms that don't have lower rank in another list
'''
max_rank = len(lst) + 1
dct = to_ranked_dict(lst)
dicts = [to_ranked_dict(l) for l in lists]
return [word for word in lst if all(dct[word] < dct2.get(word, max_rank) for dct2 in dicts)]
def filter_ascii(lst):
'''
removes words with accent chars etc.
(most accented words in the english lookup exist in the same table unaccented.)
'''
return [word for word in lst if all(ord(c) < 128 for c in word)]
def to_kv(lst, lst_name):
for word in lst:
assert ',' not in word and '"' not in word, "hax, switch to csv if this starts failing"
val = '"%s".split(",")' % ','.join(lst)
return '%s: %s' % (lst_name, val)
def main():
english = get_ranked_english()
surnames, male_names, female_names = get_ranked_census_names()
passwords = get_ranked_common_passwords()
[english,
surnames, male_names, female_names,
passwords] = [filter_ascii(filter_short(lst)) for lst in (english,
surnames, male_names, female_names,
passwords)]
# make dictionaries disjoint so that d1 & d2 == set() for any two dictionaries
all_dicts = set(tuple(l) for l in [english, surnames, male_names, female_names, passwords])
passwords = filter_dup(passwords, all_dicts - set([tuple(passwords)]))
male_names = filter_dup(male_names, all_dicts - set([tuple(male_names)]))
female_names = filter_dup(female_names, all_dicts - set([tuple(female_names)]))
surnames = filter_dup(surnames, all_dicts - set([tuple(surnames)]))
english = filter_dup(english, all_dicts - set([tuple(english)]))
with open('../frequency_lists.coffee', 'w') as f: # words are all ascii at this point
lsts = locals()
f.write('# generated by scripts/build_frequency_lists.py\n')
f.write('frequency_lists = \n ')
lines = []
for lst_name in 'male_names female_names surnames passwords english'.split():
lst = lsts[lst_name]
lines.append(to_kv(lst, lst_name))
f.write('\n '.join(lines))
f.write('\n')
f.write('module.exports = frequency_lists\n')
print '\nall done! totals:\n'
print 'passwords....', len(passwords)
print 'male.........', len(male_names)
print 'female.......', len(female_names)
print 'surnames.....', len(surnames)
print 'english......', len(english)
print
if __name__ == '__main__':
if os.path.basename(os.getcwd()) != 'data-scripts':
print 'run this from the data-scripts directory'
exit(1)
main()
| mit | -3,165,214,987,900,263,000 | 35.517588 | 98 | 0.592404 | false |
mapattacker/cheatsheets | python/nltk.py | 1 | 10103 | import nltk
# BASICS
#---------------------------------------
# types of features in text classification
# 1) Words
# stopwords
# normalisation (lowercase or not)
# stemming/lemmatising or not
# Caps or not
# POS in a sentence
# Grammatical structure
# Grouping of words of same meaning (semantics), other e.g., dates
# DEFAULT CORPUSES
#---------------------------------------
# Open window to select download of corpuses (colletion of writings) and packages in nltk
nltk.download()
from nltk.book import * # list books
text1 # list book
sent1 # list one sentence of text1
# all english words
from nltk.corpus import words
correct_spellings = words.words()
# FREQUENCY DISTRIBUTION
#---------------------------------------
freq = nltk.FreqDist(g)
print freq
# it gives a dictionary
FreqDist({u'endorsed': 2,
u'Mortimer': 1,
u'foul': 2,
u'Heights': 5,
u'four': 20,
u'spiders': 1,
u'railing': 3,})
# So it works like a dictionary
freq.keys() # get words without frequency
freq['endorsed'] # >>> 2
top10 = freq.most_common(300) # top n most common, arranged descending order
# STEMMING
#---------------------------------------
# stemming involved finding the root word
input1 = "List listed lists listing listings"
words1 = input1.lower().split(' ')
porter = nltk.PorterStemmer()
[porter.stem(t) for t in words1]
# ['list', 'list', 'list', 'list', 'list']
# LEMMATIZATION
#---------------------------------------
# lemmatising is a variant of stemming that makes all root words a word.
# stemming will sometimes make it not a word.
udhr = nltk.corpus.udhr.words('English-Latin1')
# Using Stemming
porter = nltk.PorterStemmer()
print [porter.stem(t) for t in udhr[:20]]
[u'univers',
u'declar',
u'of',
u'human',
u'right',
u'preambl',
u'wherea',
u'recognit']
# Using Lemmatization
WNlemma = nltk.WordNetLemmatizer()
print [WNlemma.lemmatize(t) for t in udhr[:20]]
['Universal',
'Declaration',
'of',
'Human',
'Rights',
'Preamble',
'Whereas',
'recognition']
# TOKENIZATION
#---------------------------------------
# splitting text into words or sentences
# Word Tokens
text11 = "Children shouldn't drink a sugary drink before bed."
print text11.split(' ')
['Children', "shouldn't", 'drink', 'a', 'sugary', 'drink', 'before', 'bed.']
print nltk.word_tokenize(text11)
['Children', 'should', "n't", 'drink', 'a', 'sugary', 'drink', 'before', 'bed', '.']
# Sentence Tokens
text12 = "This is the first sentence. A gallon of milk in the U.S. \
costs $2.99. Is this the third sentence? Yes, it is!"
print nltk.sent_tokenize(text12)
['This is the first sentence.',
'A gallon of milk in the U.S. costs $2.99.',
'Is this the third sentence?',
'Yes, it is!']
# PARTS OF SPEECH (POS)
#---------------------------------------
# grammer terms
# get definition and examples of terms
nltk.help.upenn_tagset('MD')
text11 = "Children shouldn't drink a sugary drink before bed."
text11 = nltk.word_tokenize(text11)
print nltk.pos_tag(text13)
[('Children', 'NNP'),
('should', 'MD'),
("n't", 'RB'),
('drink', 'VB'),
('a', 'DT'),
('sugary', 'JJ'),
('drink', 'NN'),
('before', 'IN'),
('bed', 'NN'),
('.', '.')]
# isalpha()
#---------------------------------------
# Gives boolean whether the string contains alpabets only
str = "this"; # No space & digit in this string
print str.isalpha() # True
str = "this is string example....wow!!!";
print str.isalpha() # False
# SPELL CHECKER ALGORITHMS
#---------------------------------------
# about ngrams
print set(nltk.ngrams('hello', n=3)) #trigram cos n=3
# set([('l', 'l', 'o'), ('e', 'l', 'l'), ('h', 'e', 'l')])
# Jaccard Distance, more: https://engineerbyday.wordpress.com/2012/01/30/how-spell-checkers-work-part-1/#Jaccard
nltk.jaccard_distance(set(nltk.ngrams(word1, n=4)),
set(nltk.ngrams(word2, n=4))) #shorter the distance, closer the match (0 to 1)
# Edit Distance
nltk.edit_distance(entries[0], a) #shorter distance, closer the match (0 to len(word))
# CLASSIFICATION
#---------------------------------------
from nltk.classify import NaiveBayesClassifier
# nltk naive bayes have a useful function to show most informative features
classifier.show_most_informative_features()
# Using Count Vectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score
# Fitting the CountVectorizer tokenizes each document by finding all sequences of characters
# of at least two letters or numbers separated by word boundaries.
# Converts everything to lowercase and builds a vocabulary using these tokens.
# Converts each feature word into integer
vect = CountVectorizer().fit(X_train)
print vect.get_feature_names() # give a list of feature names
X_train_vectorized = vect.transform(X_train)
print vect.vocabulary_ # gives a dict of feature names with frequency
print vect.vocabulary_.items() # gives pairs of key values in tuples instead, within a list
model = LogisticRegression()
model.fit(X_train_vectorized, y_train)
predictions = model.predict(vect.transform(X_test))
print('AUC: ', roc_auc_score(y_test, predictions))
# TF-IDF (Term frequency-inverse document frequency)
# High weight is given to terms that appear often in a particular document,
# but don't appear often in the corpus (all documents).
# Features with low tf–idf are either commonly used across all documents
# or rarely used and only occur in long documents.
# TF-IDF can reduce the number of features required to train a model
from sklearn.feature_extraction.text import TfidfVectorizer
# min_df, a minimum document frequency of < 5
# extracting 1-grams and 2-grams
vect = TfidfVectorizer(min_df=5, ngram_range=(1,2)).fit(X_train)
# Use CountVectorizor to find three letter tokens, remove stop_words,
# remove tokens that don't appear in at least 20 documents,
# remove tokens that appear in more than 20% of the documents
vect = CountVectorizer(min_df=20, max_df=0.2, stop_words='english',
token_pattern='(?u)\\b\\w\\w\\w+\\b') # (?u) refers to unicode
# SEMANTICS
#---------------------------------------
# semantic text similarity using WordNet
# http://www.nltk.org/howto/wordnet.html
# Path Similarity (highest 0.5, lowest near but not 0)
import nltk
from nltk.corpus import wordnet as wn
deer = wn.synset('deer.n.01') # wordnet contains synsets, which is a collection of synonyms
elk = wn.synset('elk.n.01')
horse = wn.synset('horse.n.01')
deer.path_similarity(elk) # 0.5
deer.path_similarity(horse) # 0.14
# synset vs synsets (end with s)
wn.synsets('test.n.01') # gives the first synonym in the collection (n stands for noun)
wn.synsets('test') # gives a list of all possible synoynms
# Lin Similarity
# Lowest Common Subsumer (find closest ancestor)
# Collocations & Distributions Similarity
# two words that occur frequently in similar context are more likely to be semantically related
# TOPIC MODELING
#---------------------------------------
# Coarse level analysis of what's in a text collection
# A document is a mixture of topics
# A text clustering problem
# Different models available
# Topic output are just word distributions: interpretation is subjective
# Given: Corpus, Number of Topics
# Not Given: Topic Names, Topic Distribution for each document
# Preprocessing
# Tokenize, normalize
# Stop words removal (common works in a domain)
# Stemming
# Build Document Term Matrix
# Convert document has what words > what words in each document
# Build LDA Model
# Latent Dirichlet Allocation
# A type of generative model
# Choose length of document
# Choose mixture of topic for document
# Use topic's multinomial distribution to output words to fill topics's quota
# for a particular document, 40% of the words come from topic A, then you use that topic A's multinomial distribution to output the 40% of the words.
# LDA is a very powerful tool and a text clustering tool that is fairly commonly used as the first step to understand what a corpus is about.
# LDA can also be used as a feature selection technique for text classification and other tasks
# So once you have built the mapping between the terms and documents, then suppose you have a set of pre-processed text documents in this variable doc_set.
# Then you could use gensim to learn LDA this way. You could import gensim and specifically you import the corpora and the models.
# First you create a dictionary, dictionary is mapping between IDs and words.
#
# Then you create corpus, and corpus you create going through this, all the documents in the doc_set,
# and creating a document to bag of words model.
# This is the step that creates the document term matrix.
# Once you have that, then you input that in the LdaModel call, so that you use a gensim.models LdaModel,
# where you also specify the number of topics you want to learn. So in this case, we said number of topics is going to be four,
# and you also specify this mapping, the id2word mapping. That's a dictionary that is learned two steps ahead.
#
# Once you have learned that, then that's it, and you can say how many passes it should go through.
# And there are other parameters that I would encourage you to read upon. But once you have defined this ldamodel,
# you can then use the ldamodel to print the topics. So, in this particular case, we learnt four topics.
# And you can say, give me the top five words of these four topics and then it will bring that one out for you.
#
# ldamodel model can also be used to find topic distributions of documents.
# So when you have a new document and you apply the ldamodel on it, so you infer it.
# You can say, what was the topic distribution, across these four topics, for that new document.
| mit | -8,646,577,840,105,274,000 | 33.831034 | 158 | 0.670033 | false |
githubutilities/LeetCode | Python/kth-smallest-element-in-a-bst.py | 3 | 1033 | # Time: O(max(h, k))
# Space: O(h)
# Given a binary search tree, write a function kthSmallest to find the kth smallest element in it.
#
# Note:
# You may assume k is always valid, 1 ≤ k ≤ BST's total elements.
#
# Follow up:
# What if the BST is modified (insert/delete operations) often and
# you need to find the kth smallest frequently? How would you optimize the kthSmallest routine?
#
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param {TreeNode} root
# @param {integer} k
# @return {integer}
def kthSmallest(self, root, k):
s, cur, rank = [], root, 0
while s or cur:
if cur:
s.append(cur)
cur = cur.left
else:
cur = s.pop()
rank += 1
if rank == k:
return cur.val
cur = cur.right
return float("-inf")
| mit | 8,156,352,121,990,785,000 | 25.384615 | 98 | 0.539359 | false |
zennobjects/kivy | kivy/core/audio/audio_pygame.py | 4 | 3145 | '''
AudioPygame: implementation of Sound with Pygame
'''
__all__ = ('SoundPygame', )
from kivy.clock import Clock
from kivy.utils import platform
from kivy.core.audio import Sound, SoundLoader
_platform = platform
try:
if _platform == 'android':
try:
import android.mixer as mixer
except ImportError:
# old python-for-android version
import android_mixer as mixer
else:
from pygame import mixer
except:
raise
# init pygame sound
mixer.pre_init(44100, -16, 2, 1024)
mixer.init()
mixer.set_num_channels(32)
class SoundPygame(Sound):
# XXX we don't set __slots__ here, to automaticly add
# a dictionnary. We need that to be able to use weakref for
# SoundPygame object. Otherwise, it failed with:
# TypeError: cannot create weak reference to 'SoundPygame' object
# We use our clock in play() method.
# __slots__ = ('_data', '_channel')
@staticmethod
def extensions():
if _platform == 'android':
return ('wav', 'ogg', 'mp3')
return ('wav', 'ogg')
def __init__(self, **kwargs):
self._data = None
self._channel = None
super(SoundPygame, self).__init__(**kwargs)
def _check_play(self, dt):
if self._channel is None:
return False
if self._channel.get_busy():
return
if self.loop:
def do_loop(dt):
self.play()
Clock.schedule_once(do_loop)
else:
self.stop()
return False
def play(self):
if not self._data:
return
self._data.set_volume(self.volume)
self._channel = self._data.play()
# schedule event to check if the sound is still playing or not
Clock.schedule_interval(self._check_play, 0.1)
super(SoundPygame, self).play()
def stop(self):
if not self._data:
return
self._data.stop()
# ensure we don't have anymore the callback
Clock.unschedule(self._check_play)
self._channel = None
super(SoundPygame, self).stop()
def load(self):
self.unload()
if self.filename is None:
return
self._data = mixer.Sound(self.filename)
def unload(self):
self.stop()
self._data = None
def seek(self, position):
if not self._data:
return
if _platform == 'android' and self._channel:
self._channel.seek(position)
def get_pos(self):
if self._data is not None:
if _platform == 'android' and self._channel:
return self._channel.get_pos()
return mixer.music.get_pos()
return 0
def on_volume(self, instance, volume):
if self._data is not None:
self._data.set_volume(volume)
def _get_length(self):
if _platform == 'android' and self._channel:
return self._channel.get_length()
if self._data is not None:
return self._data.get_length()
return super(SoundPygame, self)._get_length()
SoundLoader.register(SoundPygame)
| mit | 7,294,687,923,665,151,000 | 26.587719 | 70 | 0.575517 | false |
Bytewerk/Fernwartung | Fernwartung.py | 1 | 1401 | #!/usr/bin/env python3
# Requirements:
# - zenity
# - lxqt-sudo, gksudo or similar (see config)
# - sshd and possibly port forwarding must be already configured
#
# CONFIG
#
config_graphical_sudo_bin = "lxqt-sudo"
config_zenity_bin = "zenity"
config_sshd_start = ["systemctl", "start", "sshd"]
config_sshd_stop = ["systemctl", "stop", "sshd"]
config_lang = "de"
#
# CONFIG END
#
# Import required libraries
import sys
import translation
import subprocess
import os
sys.path.insert(0, "ipgetter")
import ipgetter
# Set the translation language
t = translation.t(config_lang)
# Helper Function: Display a message or error with zenity
def msg(text, is_error=False):
arg_type = "--info"
if is_error: arg_type = "--error"
subprocess.call([config_zenity_bin, arg_type, "--title",
"Fernwartung", "--text", text])
# Require root access
if not os.geteuid() == 0:
try:
subprocess.call([config_graphical_sudo_bin,__file__])
except:
msg(t.get("sudoerr")+"\n("+config_graphical_sudo_bin+")", True)
sys.exit()
# Get the IP
msg(t.get("connect"))
ip = ipgetter.myip()
if ip == "":
msg(t.get("ipfail"), True)
sys.exit()
# Start SSHD
try:
subprocess.call(config_sshd_start)
except:
msg(t.get("sshstartfail"), True)
exit()
# Keep running until the dialog closes
msg(t.get("running") + "IP: " +ip)
# Stop SSHD
try:
subprocess.call(config_sshd_stop)
except:
msg(t.get("sshstopfail"), True)
| unlicense | 334,722,960,870,031,900 | 19.304348 | 65 | 0.685225 | false |
rgayon/plaso | tests/parsers/sqlite_plugins/chrome_history.py | 1 | 12609 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the Google Chrome History database plugin."""
from __future__ import unicode_literals
import unittest
from plaso.formatters import chrome as _ # pylint: disable=unused-import
from plaso.lib import definitions
from plaso.parsers.sqlite_plugins import chrome_history
from tests.parsers.sqlite_plugins import test_lib
class GoogleChrome8HistoryPluginTest(test_lib.SQLitePluginTestCase):
"""Tests for the Google Chrome 8 history SQLite database plugin."""
def testProcess(self):
"""Tests the Process function on a Chrome History database file."""
plugin = chrome_history.GoogleChrome8HistoryPlugin()
storage_writer = self._ParseDatabaseFileWithPlugin(
['History'], plugin)
self.assertEqual(storage_writer.number_of_warnings, 0)
# The History file contains 71 events (69 page visits, 1 file downloads).
self.assertEqual(storage_writer.number_of_events, 71)
events = list(storage_writer.GetEvents())
# Check the first page visited entry.
event = events[0]
self.CheckTimestamp(event.timestamp, '2011-04-07 12:03:11.000000')
self.assertEqual(
event.timestamp_desc, definitions.TIME_DESCRIPTION_LAST_VISITED)
event_data = self._GetEventDataOfEvent(storage_writer, event)
expected_url = 'http://start.ubuntu.com/10.04/Google/'
self.assertEqual(event_data.url, expected_url)
self.assertEqual(event_data.title, 'Ubuntu Start Page')
expected_message = (
'{0:s} '
'(Ubuntu Start Page) [count: 0] '
'Visit Source: [SOURCE_FIREFOX_IMPORTED] Type: [LINK - User clicked '
'a link] (URL not typed directly - no typed count)').format(
expected_url)
expected_short_message = '{0:s} (Ubuntu Start Page)'.format(expected_url)
self._TestGetMessageStrings(
event_data, expected_message, expected_short_message)
# Check the first file downloaded entry.
event = events[69]
self.CheckTimestamp(event.timestamp, '2011-05-23 08:35:30.000000')
self.assertEqual(
event.timestamp_desc, definitions.TIME_DESCRIPTION_FILE_DOWNLOADED)
event_data = self._GetEventDataOfEvent(storage_writer, event)
expected_url = (
'http://fatloss4idiotsx.com/download/funcats/'
'funcats_scr.exe')
self.assertEqual(event_data.url, expected_url)
expected_full_path = '/home/john/Downloads/funcats_scr.exe'
self.assertEqual(event_data.full_path, expected_full_path)
expected_message = (
'{0:s} ({1:s}). '
'Received: 1132155 bytes out of: '
'1132155 bytes.').format(expected_url, expected_full_path)
expected_short_message = '{0:s} downloaded (1132155 bytes)'.format(
expected_full_path)
self._TestGetMessageStrings(
event_data, expected_message, expected_short_message)
class GoogleChrome27HistoryPluginTest(test_lib.SQLitePluginTestCase):
"""Tests for the Google Chrome 27 history SQLite database plugin."""
def testProcess57(self):
"""Tests the Process function on a Google Chrome 57 History database."""
plugin = chrome_history.GoogleChrome27HistoryPlugin()
storage_writer = self._ParseDatabaseFileWithPlugin(
['History-57.0.2987.133'], plugin)
self.assertEqual(storage_writer.number_of_warnings, 0)
# The History file contains 2 events (1 page visits, 1 file downloads).
self.assertEqual(storage_writer.number_of_events, 2)
events = list(storage_writer.GetEvents())
# Check the page visit event.
event = events[0]
self.CheckTimestamp(event.timestamp, '2018-01-21 14:09:53.885478')
self.assertEqual(
event.timestamp_desc, definitions.TIME_DESCRIPTION_LAST_VISITED)
event_data = self._GetEventDataOfEvent(storage_writer, event)
expected_url = (
'https://raw.githubusercontent.com/dfirlabs/chrome-specimens/master/'
'generate-specimens.sh')
self.assertEqual(event_data.url, expected_url)
self.assertEqual(event_data.title, '')
expected_message = (
'{0:s} '
'[count: 0] '
'Type: [START_PAGE - The start page of the browser] '
'(URL not typed directly - no typed count)').format(expected_url)
expected_short_message = '{0:s}...'.format(expected_url[:77])
self._TestGetMessageStrings(
event_data, expected_message, expected_short_message)
# Check the file downloaded event.
event = events[1]
self.CheckTimestamp(event.timestamp, '2018-01-21 14:09:53.900399')
self.assertEqual(
event.timestamp_desc, definitions.TIME_DESCRIPTION_FILE_DOWNLOADED)
event_data = self._GetEventDataOfEvent(storage_writer, event)
expected_url = (
'https://raw.githubusercontent.com/log2timeline/l2tbinaries/master/'
'win32/plaso-20171231.1.win32.msi')
self.assertEqual(event_data.url, expected_url)
expected_full_path = '/home/ubuntu/Downloads/plaso-20171231.1.win32.msi'
self.assertEqual(event_data.full_path, expected_full_path)
expected_message = (
'{0:s} ({1:s}). '
'Received: 3080192 bytes out of: 3080192 bytes.').format(
expected_url, expected_full_path)
expected_short_message = '{0:s} downloaded (3080192 bytes)'.format(
expected_full_path)
self._TestGetMessageStrings(
event_data, expected_message, expected_short_message)
def testProcess58(self):
"""Tests the Process function on a Google Chrome 58 History database."""
plugin = chrome_history.GoogleChrome27HistoryPlugin()
storage_writer = self._ParseDatabaseFileWithPlugin(
['History-58.0.3029.96'], plugin)
self.assertEqual(storage_writer.number_of_warnings, 0)
# The History file contains 2 events (1 page visits, 1 file downloads).
self.assertEqual(storage_writer.number_of_events, 2)
events = list(storage_writer.GetEvents())
# Check the page visit event.
event = events[0]
self.CheckTimestamp(event.timestamp, '2018-01-21 14:09:27.315765')
self.assertEqual(
event.timestamp_desc, definitions.TIME_DESCRIPTION_LAST_VISITED)
event_data = self._GetEventDataOfEvent(storage_writer, event)
expected_url = (
'https://raw.githubusercontent.com/dfirlabs/chrome-specimens/master/'
'generate-specimens.sh')
self.assertEqual(event_data.url, expected_url)
self.assertEqual(event_data.title, '')
expected_message = (
'{0:s} '
'[count: 0] '
'Type: [START_PAGE - The start page of the browser] '
'(URL not typed directly - no typed count)').format(expected_url)
expected_short_message = '{0:s}...'.format(expected_url[:77])
self._TestGetMessageStrings(
event_data, expected_message, expected_short_message)
# Check the file downloaded event.
event = events[1]
self.CheckTimestamp(event.timestamp, '2018-01-21 14:09:27.200398')
self.assertEqual(
event.timestamp_desc, definitions.TIME_DESCRIPTION_FILE_DOWNLOADED)
event_data = self._GetEventDataOfEvent(storage_writer, event)
expected_url = (
'https://raw.githubusercontent.com/log2timeline/l2tbinaries/master/'
'win32/plaso-20171231.1.win32.msi')
self.assertEqual(event_data.url, expected_url)
expected_full_path = '/home/ubuntu/Downloads/plaso-20171231.1.win32.msi'
self.assertEqual(event_data.full_path, expected_full_path)
expected_message = (
'{0:s} ({1:s}). '
'Received: 3080192 bytes out of: 3080192 bytes.').format(
expected_url, expected_full_path)
expected_short_message = '{0:s} downloaded (3080192 bytes)'.format(
expected_full_path)
self._TestGetMessageStrings(
event_data, expected_message, expected_short_message)
def testProcess59(self):
"""Tests the Process function on a Google Chrome 59 History database."""
plugin = chrome_history.GoogleChrome27HistoryPlugin()
storage_writer = self._ParseDatabaseFileWithPlugin(
['History-59.0.3071.86'], plugin)
self.assertEqual(storage_writer.number_of_warnings, 0)
# The History file contains 2 events (1 page visits, 1 file downloads).
self.assertEqual(storage_writer.number_of_events, 2)
events = list(storage_writer.GetEvents())
# Check the page visit event.
event = events[0]
self.CheckTimestamp(event.timestamp, '2018-01-21 14:08:52.037692')
self.assertEqual(
event.timestamp_desc, definitions.TIME_DESCRIPTION_LAST_VISITED)
event_data = self._GetEventDataOfEvent(storage_writer, event)
expected_url = (
'https://raw.githubusercontent.com/dfirlabs/chrome-specimens/master/'
'generate-specimens.sh')
self.assertEqual(event_data.url, expected_url)
self.assertEqual(event_data.title, '')
expected_message = (
'{0:s} '
'[count: 0] '
'Type: [START_PAGE - The start page of the browser] '
'(URL not typed directly - no typed count)').format(expected_url)
expected_short_message = '{0:s}...'.format(expected_url[:77])
self._TestGetMessageStrings(
event_data, expected_message, expected_short_message)
# Check the file downloaded event.
event = events[1]
self.CheckTimestamp(event.timestamp, '2018-01-21 14:08:51.811123')
self.assertEqual(
event.timestamp_desc, definitions.TIME_DESCRIPTION_FILE_DOWNLOADED)
event_data = self._GetEventDataOfEvent(storage_writer, event)
expected_url = (
'https://raw.githubusercontent.com/log2timeline/l2tbinaries/master/'
'win32/plaso-20171231.1.win32.msi')
self.assertEqual(event_data.url, expected_url)
expected_full_path = '/home/ubuntu/Downloads/plaso-20171231.1.win32.msi'
self.assertEqual(event_data.full_path, expected_full_path)
expected_message = (
'{0:s} ({1:s}). '
'Received: 3080192 bytes out of: 3080192 bytes.').format(
expected_url, expected_full_path)
expected_short_message = '{0:s} downloaded (3080192 bytes)'.format(
expected_full_path)
self._TestGetMessageStrings(
event_data, expected_message, expected_short_message)
def testProcess59ExtraColumn(self):
"""Tests the Process function on a Google Chrome 59 History database,
manually modified to have an unexpected column.
"""
plugin = chrome_history.GoogleChrome27HistoryPlugin()
storage_writer = self._ParseDatabaseFileWithPlugin(
['History-59_added-fake-column'], plugin)
self.assertEqual(storage_writer.number_of_warnings, 0)
# The History file contains 2 events (1 page visits, 1 file downloads).
self.assertEqual(storage_writer.number_of_events, 2)
events = list(storage_writer.GetEvents())
# Check the page visit event.
event = events[0]
self.CheckTimestamp(event.timestamp, '2018-01-21 14:08:52.037692')
self.assertEqual(
event.timestamp_desc, definitions.TIME_DESCRIPTION_LAST_VISITED)
event_data = self._GetEventDataOfEvent(storage_writer, event)
expected_url = (
'https://raw.githubusercontent.com/dfirlabs/chrome-specimens/master/'
'generate-specimens.sh')
self.assertEqual(event_data.url, expected_url)
self.assertEqual(event_data.title, '')
expected_message = (
'{0:s} '
'[count: 0] '
'Type: [START_PAGE - The start page of the browser] '
'(URL not typed directly - no typed count)').format(expected_url)
expected_short_message = '{0:s}...'.format(expected_url[:77])
self._TestGetMessageStrings(
event_data, expected_message, expected_short_message)
# Check the file downloaded event.
event = events[1]
self.CheckTimestamp(event.timestamp, '2018-01-21 14:08:51.811123')
self.assertEqual(
event.timestamp_desc, definitions.TIME_DESCRIPTION_FILE_DOWNLOADED)
event_data = self._GetEventDataOfEvent(storage_writer, event)
expected_url = (
'https://raw.githubusercontent.com/log2timeline/l2tbinaries/master/'
'win32/plaso-20171231.1.win32.msi')
self.assertEqual(event_data.url, expected_url)
expected_full_path = '/home/ubuntu/Downloads/plaso-20171231.1.win32.msi'
self.assertEqual(event_data.full_path, expected_full_path)
expected_message = (
'{0:s} ({1:s}). '
'Received: 3080192 bytes out of: 3080192 bytes.').format(
expected_url, expected_full_path)
expected_short_message = '{0:s} downloaded (3080192 bytes)'.format(
expected_full_path)
self._TestGetMessageStrings(
event_data, expected_message, expected_short_message)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -2,171,431,143,122,919,200 | 37.093656 | 77 | 0.685701 | false |
osgcc/ryzom | nel/tools/build_gamedata/processes/anim/1_export.py | 3 | 4632 | #!/usr/bin/python
#
# #################################################################
# ## WARNING : this is a generated file, don't change it !
# #################################################################
#
# \file 1_export.py
# \brief Export anim
# \date 2011-09-21-20-51-GMT
# \author Jan Boon (Kaetemi)
# Python port of game data build pipeline.
# Export anim
#
# NeL - MMORPG Framework <http://dev.ryzom.com/projects/nel/>
# Copyright (C) 2010 Winch Gate Property Limited
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import time, sys, os, shutil, subprocess, distutils.dir_util
sys.path.append("../../configuration")
if os.path.isfile("log.log"):
os.remove("log.log")
if os.path.isfile("temp_log.log"):
os.remove("temp_log.log")
log = open("temp_log.log", "w")
from scripts import *
from buildsite import *
from process import *
from tools import *
from directories import *
printLog(log, "")
printLog(log, "-------")
printLog(log, "--- Export anim")
printLog(log, "-------")
printLog(log, time.strftime("%Y-%m-%d %H:%MGMT", time.gmtime(time.time())))
printLog(log, "")
# Find tools
# ...
# Export anim 3dsmax
if MaxAvailable:
# Find tools
Max = findMax(log, MaxDirectory, MaxExecutable)
printLog(log, "")
printLog(log, ">>> Export anim 3dsmax <<<")
mkPath(log, ExportBuildDirectory + "/" + AnimExportDirectory)
mkPath(log, ExportBuildDirectory + "/" + AnimTagExportDirectory)
for dir in AnimSourceDirectories:
mkPath(log, DatabaseDirectory + "/" + dir)
if (needUpdateDirByTagLog(log, DatabaseDirectory + "/" + dir, ".max", ExportBuildDirectory + "/" + AnimTagExportDirectory, ".max.tag")):
scriptSrc = "maxscript/anim_export.ms"
scriptDst = MaxUserDirectory + "/scripts/anim_export.ms"
outputLogfile = ScriptDirectory + "/processes/anim/log.log"
outputDirectory = ExportBuildDirectory + "/" + AnimExportDirectory
tagDirectory = ExportBuildDirectory + "/" + AnimTagExportDirectory
maxSourceDir = DatabaseDirectory + "/" + dir
maxRunningTagFile = tagDirectory + "/max_running.tag"
tagList = findFiles(log, tagDirectory, "", ".max.tag")
tagLen = len(tagList)
if os.path.isfile(scriptDst):
os.remove(scriptDst)
tagDiff = 1
sSrc = open(scriptSrc, "r")
sDst = open(scriptDst, "w")
for line in sSrc:
newline = line.replace("%OutputLogfile%", outputLogfile)
newline = newline.replace("%MaxSourceDirectory%", maxSourceDir)
newline = newline.replace("%OutputDirectory%", outputDirectory)
newline = newline.replace("%TagDirectory%", tagDirectory)
sDst.write(newline)
sSrc.close()
sDst.close()
zeroRetryLimit = 3
while tagDiff > 0:
mrt = open(maxRunningTagFile, "w")
mrt.write("moe-moe-kyun")
mrt.close()
printLog(log, "MAXSCRIPT " + scriptDst)
subprocess.call([ Max, "-U", "MAXScript", "anim_export.ms", "-q", "-mi", "-vn" ])
if os.path.exists(outputLogfile):
try:
lSrc = open(outputLogfile, "r")
for line in lSrc:
lineStrip = line.strip()
if (len(lineStrip) > 0):
printLog(log, lineStrip)
lSrc.close()
os.remove(outputLogfile)
except Exception:
printLog(log, "ERROR Failed to read 3dsmax log")
else:
printLog(log, "WARNING No 3dsmax log")
tagList = findFiles(log, tagDirectory, "", ".max.tag")
newTagLen = len(tagList)
tagDiff = newTagLen - tagLen
tagLen = newTagLen
addTagDiff = 0
if os.path.exists(maxRunningTagFile):
printLog(log, "FAIL 3ds Max crashed and/or file export failed!")
if tagDiff == 0:
if zeroRetryLimit > 0:
zeroRetryLimit = zeroRetryLimit - 1
addTagDiff = 1
else:
printLog(log, "FAIL Retry limit reached!")
else:
addTagDiff = 1
os.remove(maxRunningTagFile)
printLog(log, "Exported " + str(tagDiff) + " .max files!")
tagDiff += addTagDiff
os.remove(scriptDst)
printLog(log, "")
log.close()
if os.path.isfile("log.log"):
os.remove("log.log")
shutil.move("temp_log.log", "log.log")
# end of file
| agpl-3.0 | 7,485,434,476,453,457,000 | 32.323741 | 138 | 0.657168 | false |
shsingh/ansible | lib/ansible/modules/cloud/azure/azure_rm_storageaccount_info.py | 10 | 22917 | #!/usr/bin/python
#
# Copyright (c) 2016 Matt Davis, <[email protected]>
# Chris Houseknecht, <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_storageaccount_info
version_added: "2.9"
short_description: Get storage account facts
description:
- Get facts for one storage account or all storage accounts within a resource group.
options:
name:
description:
- Only show results for a specific account.
resource_group:
description:
- Limit results to a resource group. Required when filtering by name.
aliases:
- resource_group_name
tags:
description:
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
show_connection_string:
description:
- Show the connection string for each of the storageaccount's endpoints.
- For convenient usage, C(show_connection_string) will also show the access keys for each of the storageaccount's endpoints.
- Note that it will cost a lot of time when list all storageaccount rather than query a single one.
type: bool
version_added: "2.8"
show_blob_cors:
description:
- Show the blob CORS settings for each blob related to the storage account.
- Querying all storage accounts will take a long time.
type: bool
version_added: "2.8"
extends_documentation_fragment:
- azure
author:
- Chris Houseknecht (@chouseknecht)
- Matt Davis (@nitzmahone)
'''
EXAMPLES = '''
- name: Get facts for one account
azure_rm_storageaccount_info:
resource_group: myResourceGroup
name: clh0002
- name: Get facts for all accounts in a resource group
azure_rm_storageaccount_info:
resource_group: myResourceGroup
- name: Get facts for all accounts by tags
azure_rm_storageaccount_info:
tags:
- testing
- foo:bar
'''
RETURN = '''
azure_storageaccounts:
description:
- List of storage account dicts.
returned: always
type: list
example: [{
"id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/myResourceGroups/testing/providers/Microsoft.Storage/storageAccounts/testaccount001",
"location": "eastus2",
"name": "testaccount001",
"properties": {
"accountType": "Standard_LRS",
"creationTime": "2016-03-28T02:46:58.290113Z",
"primaryEndpoints": {
"blob": "https://testaccount001.blob.core.windows.net/",
"file": "https://testaccount001.file.core.windows.net/",
"queue": "https://testaccount001.queue.core.windows.net/",
"table": "https://testaccount001.table.core.windows.net/"
},
"primaryLocation": "eastus2",
"provisioningState": "Succeeded",
"statusOfPrimary": "Available"
},
"tags": {},
"type": "Microsoft.Storage/storageAccounts"
}]
storageaccounts:
description:
- List of storage account dicts in resource module's parameter format.
returned: always
type: complex
contains:
id:
description:
- Resource ID.
returned: always
type: str
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Storage/storageAccounts/t
estaccount001"
name:
description:
- Name of the storage account to update or create.
returned: always
type: str
sample: testaccount001
location:
description:
- Valid Azure location. Defaults to location of the resource group.
returned: always
type: str
sample: eastus
account_type:
description:
- Type of storage account.
- C(Standard_ZRS) and C(Premium_LRS) accounts cannot be changed to other account types.
- Other account types cannot be changed to C(Standard_ZRS) or C(Premium_LRS).
returned: always
type: str
sample: Standard_ZRS
custom_domain:
description:
- User domain assigned to the storage account.
- Must be a dictionary with I(name) and I(use_sub_domain) keys where I(name) is the CNAME source.
returned: always
type: complex
contains:
name:
description:
- CNAME source.
returned: always
type: str
sample: testaccount
use_sub_domain:
description:
- Whether to use sub domain.
returned: always
type: bool
sample: true
kind:
description:
- The kind of storage.
returned: always
type: str
sample: Storage
access_tier:
description:
- The access tier for this storage account.
returned: always
type: str
sample: Hot
https_only:
description:
- Allows https traffic only to storage service when set to C(true).
returned: always
type: bool
sample: false
provisioning_state:
description:
- The status of the storage account at the time the operation was called.
- Possible values include C(Creating), C(ResolvingDNS), C(Succeeded).
returned: always
type: str
sample: Succeeded
secondary_location:
description:
- The location of the geo-replicated secondary for the storage account.
- Only available if the I(account_type=Standard_GRS) or I(account_type=Standard_RAGRS).
returned: always
type: str
sample: westus
status_of_primary:
description:
- Status of the primary location of the storage account; either C(available) or C(unavailable).
returned: always
type: str
sample: available
status_of_secondary:
description:
- Status of the secondary location of the storage account; either C(available) or C(unavailable).
returned: always
type: str
sample: available
primary_location:
description:
- The location of the primary data center for the storage account.
returned: always
type: str
sample: eastus
primary_endpoints:
description:
- URLs to retrieve a public I(blob), I(queue), or I(table) object.
- Note that C(Standard_ZRS) and C(Premium_LRS) accounts only return the blob endpoint.
returned: always
type: complex
contains:
blob:
description:
- The primary blob endpoint and connection string.
returned: always
type: complex
contains:
endpoint:
description:
- The primary blob endpoint.
returned: always
type: str
sample: "https://testaccount001.blob.core.windows.net/"
connectionstring:
description:
- Connectionstring of the blob endpoint.
returned: always
type: str
sample: "DefaultEndpointsProtocol=https;EndpointSuffix=core.windows.net;AccountName=X;AccountKey=X;BlobEndpoint=X"
queue:
description:
- The primary queue endpoint and connection string.
returned: always
type: complex
contains:
endpoint:
description:
- The primary queue endpoint.
returned: always
type: str
sample: "https://testaccount001.queue.core.windows.net/"
connectionstring:
description:
- Connectionstring of the queue endpoint.
returned: always
type: str
sample: "DefaultEndpointsProtocol=https;EndpointSuffix=core.windows.net;AccountName=X;AccountKey=X;QueueEndpoint=X"
table:
description:
- The primary table endpoint and connection string.
returned: always
type: complex
contains:
endpoint:
description:
- The primary table endpoint.
returned: always
type: str
sample: "https://testaccount001.table.core.windows.net/"
connectionstring:
description:
- Connectionstring of the table endpoint.
returned: always
type: str
sample: "DefaultEndpointsProtocol=https;EndpointSuffix=core.windows.net;AccountName=X;AccountKey=X;TableEndpoint=X"
key:
description:
- The account key for the primary_endpoints
returned: always
type: str
sample: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
secondary_endpoints:
description:
- The URLs to retrieve a public I(blob), I(queue), or I(table) object from the secondary location.
- Only available if the SKU I(name=Standard_RAGRS).
returned: always
type: complex
contains:
blob:
description:
- The secondary blob endpoint and connection string.
returned: always
type: complex
contains:
endpoint:
description:
- The secondary blob endpoint.
returned: always
type: str
sample: "https://testaccount001.blob.core.windows.net/"
connectionstring:
description:
- Connectionstring of the blob endpoint.
returned: always
type: str
sample: "DefaultEndpointsProtocol=https;EndpointSuffix=core.windows.net;AccountName=X;AccountKey=X;BlobEndpoint=X"
queue:
description:
- The secondary queue endpoint and connection string.
returned: always
type: complex
contains:
endpoint:
description:
- The secondary queue endpoint.
returned: always
type: str
sample: "https://testaccount001.queue.core.windows.net/"
connectionstring:
description:
- Connectionstring of the queue endpoint.
returned: always
type: str
sample: "DefaultEndpointsProtocol=https;EndpointSuffix=core.windows.net;AccountName=X;AccountKey=X;QueueEndpoint=X"
table:
description:
- The secondary table endpoint and connection string.
returned: always
type: complex
contains:
endpoint:
description:
- The secondary table endpoint.
returned: always
type: str
sample: "https://testaccount001.table.core.windows.net/"
connectionstring:
description:
- Connectionstring of the table endpoint.
returned: always
type: str
sample: "DefaultEndpointsProtocol=https;EndpointSuffix=core.windows.net;AccountName=X;AccountKey=X;TableEndpoint=X"
key:
description:
- The account key for the secondary_endpoints
sample: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
tags:
description:
- Resource tags.
returned: always
type: dict
sample: { "tag1": "abc" }
'''
try:
from msrestazure.azure_exceptions import CloudError
except Exception:
# This is handled in azure_rm_common
pass
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
from ansible.module_utils._text import to_native
AZURE_OBJECT_CLASS = 'StorageAccount'
class AzureRMStorageAccountInfo(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
name=dict(type='str'),
resource_group=dict(type='str', aliases=['resource_group_name']),
tags=dict(type='list'),
show_connection_string=dict(type='bool'),
show_blob_cors=dict(type='bool')
)
self.results = dict(
changed=False,
storageaccounts=[]
)
self.name = None
self.resource_group = None
self.tags = None
self.show_connection_string = None
self.show_blob_cors = None
super(AzureRMStorageAccountInfo, self).__init__(self.module_arg_spec,
supports_tags=False,
facts_module=True)
def exec_module(self, **kwargs):
is_old_facts = self.module._name == 'azure_rm_storageaccount_facts'
if is_old_facts:
self.module.deprecate("The 'azure_rm_storageaccount_facts' module has been renamed to 'azure_rm_storageaccount_info'", version='2.13')
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
if self.name and not self.resource_group:
self.fail("Parameter error: resource group required when filtering by name.")
results = []
if self.name:
results = self.get_account()
elif self.resource_group:
results = self.list_resource_group()
else:
results = self.list_all()
filtered = self.filter_tag(results)
if is_old_facts:
self.results['ansible_facts'] = {
'azure_storageaccounts': self.serialize(filtered),
'storageaccounts': self.format_to_dict(filtered),
}
self.results['storageaccounts'] = self.format_to_dict(filtered)
return self.results
def get_account(self):
self.log('Get properties for account {0}'.format(self.name))
account = None
try:
account = self.storage_client.storage_accounts.get_properties(self.resource_group, self.name)
return [account]
except CloudError:
pass
return []
def list_resource_group(self):
self.log('List items')
try:
response = self.storage_client.storage_accounts.list_by_resource_group(self.resource_group)
except Exception as exc:
self.fail("Error listing for resource group {0} - {1}".format(self.resource_group, str(exc)))
return response
def list_all(self):
self.log('List all items')
try:
response = self.storage_client.storage_accounts.list()
except Exception as exc:
self.fail("Error listing all items - {0}".format(str(exc)))
return response
def filter_tag(self, raw):
return [item for item in raw if self.has_tags(item.tags, self.tags)]
def serialize(self, raw):
return [self.serialize_obj(item, AZURE_OBJECT_CLASS) for item in raw]
def format_to_dict(self, raw):
return [self.account_obj_to_dict(item) for item in raw]
def account_obj_to_dict(self, account_obj, blob_service_props=None):
account_dict = dict(
id=account_obj.id,
name=account_obj.name,
location=account_obj.location,
access_tier=(account_obj.access_tier.value
if account_obj.access_tier is not None else None),
account_type=account_obj.sku.name.value,
kind=account_obj.kind.value if account_obj.kind else None,
provisioning_state=account_obj.provisioning_state.value,
secondary_location=account_obj.secondary_location,
status_of_primary=(account_obj.status_of_primary.value
if account_obj.status_of_primary is not None else None),
status_of_secondary=(account_obj.status_of_secondary.value
if account_obj.status_of_secondary is not None else None),
primary_location=account_obj.primary_location,
https_only=account_obj.enable_https_traffic_only
)
id_dict = self.parse_resource_to_dict(account_obj.id)
account_dict['resource_group'] = id_dict.get('resource_group')
account_key = self.get_connectionstring(account_dict['resource_group'], account_dict['name'])
account_dict['custom_domain'] = None
if account_obj.custom_domain:
account_dict['custom_domain'] = dict(
name=account_obj.custom_domain.name,
use_sub_domain=account_obj.custom_domain.use_sub_domain
)
account_dict['primary_endpoints'] = None
if account_obj.primary_endpoints:
account_dict['primary_endpoints'] = dict(
blob=self.format_endpoint_dict(account_dict['name'], account_key[0], account_obj.primary_endpoints.blob, 'blob'),
queue=self.format_endpoint_dict(account_dict['name'], account_key[0], account_obj.primary_endpoints.queue, 'queue'),
table=self.format_endpoint_dict(account_dict['name'], account_key[0], account_obj.primary_endpoints.table, 'table')
)
if account_key[0]:
account_dict['primary_endpoints']['key'] = '{0}'.format(account_key[0])
account_dict['secondary_endpoints'] = None
if account_obj.secondary_endpoints:
account_dict['secondary_endpoints'] = dict(
blob=self.format_endpoint_dict(account_dict['name'], account_key[1], account_obj.primary_endpoints.blob, 'blob'),
queue=self.format_endpoint_dict(account_dict['name'], account_key[1], account_obj.primary_endpoints.queue, 'queue'),
table=self.format_endpoint_dict(account_dict['name'], account_key[1], account_obj.primary_endpoints.table, 'table'),
)
if account_key[1]:
account_dict['secondary_endpoints']['key'] = '{0}'.format(account_key[1])
account_dict['tags'] = None
if account_obj.tags:
account_dict['tags'] = account_obj.tags
blob_service_props = self.get_blob_service_props(account_dict['resource_group'], account_dict['name'])
if blob_service_props and blob_service_props.cors and blob_service_props.cors.cors_rules:
account_dict['blob_cors'] = [dict(
allowed_origins=to_native(x.allowed_origins),
allowed_methods=to_native(x.allowed_methods),
max_age_in_seconds=x.max_age_in_seconds,
exposed_headers=to_native(x.exposed_headers),
allowed_headers=to_native(x.allowed_headers)
) for x in blob_service_props.cors.cors_rules]
return account_dict
def format_endpoint_dict(self, name, key, endpoint, storagetype, protocol='https'):
result = dict(endpoint=endpoint)
if key:
result['connectionstring'] = 'DefaultEndpointsProtocol={0};EndpointSuffix={1};AccountName={2};AccountKey={3};{4}Endpoint={5}'.format(
protocol,
self._cloud_environment.suffixes.storage_endpoint,
name,
key,
str.title(storagetype),
endpoint)
return result
def get_blob_service_props(self, resource_group, name):
if not self.show_blob_cors:
return None
try:
blob_service_props = self.storage_client.blob_services.get_service_properties(resource_group, name)
return blob_service_props
except Exception:
pass
return None
def get_connectionstring(self, resource_group, name):
keys = ['', '']
if not self.show_connection_string:
return keys
try:
cred = self.storage_client.storage_accounts.list_keys(resource_group, name)
# get the following try catch from CLI
try:
keys = [cred.keys[0].value, cred.keys[1].value]
except AttributeError:
keys = [cred.key1, cred.key2]
except Exception:
pass
return keys
def main():
AzureRMStorageAccountInfo()
if __name__ == '__main__':
main()
| gpl-3.0 | 5,622,439,457,724,269,000 | 40.143627 | 152 | 0.535585 | false |
mjoblin/netdumplings | netdumplings/dumplingchefs/dnslookupchef.py | 1 | 2558 | import time
from netdumplings import DumplingChef
class DNSLookupChef(DumplingChef):
"""
Makes dumplings which describe DNS activity. Sends per-packet dumplings
for individual DNS (Domain Name System) lookups; and poke-interval
dumplings which describe the hosts lookups seen so far with per-host lookup
counts and timestamp of last lookup.
Dumpling payload examples:
Per DNS lookup: ::
{
"lookup": {
"hostname": "srirachamadness.com",
"when": 1499040017.811247
}
}
Per poke-interval: ::
{
"lookups_seen": {
"srirachamadness.com": {
"count": 28,
"latest": 1499040017.811247
},
"www.fleegle.com": {
"count": 1,
"latest": 1499045642.57563
},
"floople.com": {
"count": 7,
"latest": 1499043343.104648
}
}
}
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.lookups_seen = {}
def packet_handler(self, packet):
"""
Processes a packet from nd-sniff. Makes a dumpling summarizing the
contents of each each valid DNS lookup.
:param packet: Packet from nd-sniff.
"""
if not packet.haslayer('DNS'):
return
dns_query = packet.getlayer('DNS')
query = dns_query.fields['qd']
if query is None:
return
hostname = query.qname.decode('utf-8')
if hostname.endswith('.'):
hostname = hostname[:-1]
now_millis = time.time()
try:
self.lookups_seen[hostname]['count'] += 1
self.lookups_seen[hostname]['latest'] = now_millis
except KeyError:
self.lookups_seen[hostname] = {
'count': 1,
'latest': now_millis,
}
payload = {
'lookup': {
'hostname': hostname,
'when': now_millis
}
}
return payload
def interval_handler(self, interval=None):
"""
Makes a dumpling at regular intervals which summarizes all the host
lookups seen so far along with the count and latest lookup time for
each host.
"""
payload = {
'lookups_seen': self.lookups_seen
}
return payload
| mit | -6,226,850,122,016,848,000 | 25.102041 | 79 | 0.497263 | false |
voxpupuli/puppetboard | puppetboard/default_settings.py | 4 | 1712 | import os
PUPPETDB_HOST = 'localhost'
PUPPETDB_PORT = 8080
PUPPETDB_PROTO = None
PUPPETDB_SSL_VERIFY = True
PUPPETDB_KEY = None
PUPPETDB_CERT = None
PUPPETDB_TIMEOUT = 20
DEFAULT_ENVIRONMENT = 'production'
SECRET_KEY = os.urandom(24)
DEV_LISTEN_HOST = '127.0.0.1'
DEV_LISTEN_PORT = 5000
DEV_COFFEE_LOCATION = 'coffee'
UNRESPONSIVE_HOURS = 2
ENABLE_QUERY = True
# Uncomment to restrict the enabled PuppetDB endpoints in the query page.
# ENABLED_QUERY_ENDPOINTS = ['facts', 'nodes']
LOCALISE_TIMESTAMP = True
LOGLEVEL = 'info'
NORMAL_TABLE_COUNT = 100
LITTLE_TABLE_COUNT = 10
TABLE_COUNT_SELECTOR = [10, 20, 50, 100, 500]
DISPLAYED_METRICS = ['resources.total',
'events.failure',
'events.success',
'resources.skipped',
'events.noop']
OFFLINE_MODE = False
ENABLE_CATALOG = False
OVERVIEW_FILTER = None
PAGE_TITLE = "Puppetboard"
GRAPH_TYPE = 'pie'
GRAPH_FACTS = ['architecture',
'clientversion',
'domain',
'lsbcodename',
'lsbdistcodename',
'lsbdistid',
'lsbdistrelease',
'lsbmajdistrelease',
'netmask',
'osfamily',
'puppetversion',
'processorcount']
INVENTORY_FACTS = [('Hostname', 'fqdn'),
('IP Address', 'ipaddress'),
('OS', 'lsbdistdescription'),
('Architecture', 'hardwaremodel'),
('Kernel Version', 'kernelrelease'),
('Puppet Version', 'puppetversion'), ]
REFRESH_RATE = 30
DAILY_REPORTS_CHART_ENABLED = True
DAILY_REPORTS_CHART_DAYS = 8
WITH_EVENT_NUMBERS = True
| apache-2.0 | -6,717,358,751,225,282,000 | 30.127273 | 73 | 0.584696 | false |
hsavolai/vmlab | src/kiwi/accessor.py | 3 | 18200 | #
# Kiwi: a Framework and Enhanced Widgets for Python
#
# Copyright (C) 2002-2005 Async Open Source
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
# Author(s): Andreas Kostyrka <[email protected]>
# Christian Reis <[email protected]>
# Johan Dahlin <[email protected]>
#
"""The accessor module offers two important front-end functions:
kgetattr and ksetattr. These functions allow retrieving attribute values
from objects much in the same way as getattr/setattr allow, but with two
important differences:
- They follow a dot hierarchy to retrieve or modify any value
reachable from the object.
- They cache the method used to access a certain attribute and reuse
it the next time the value is retrieved.
"""
import string
import types
from kiwi.log import Logger
log = Logger('kiwi.accessor')
def get_default_getter(model, attr_name, cache):
"""Obtains from model a callable through which attr_name can be
retrieved. This callable is an accessor named get_foo, where
foo is the value of attr_name, or getattr(model, foo) if the
accessor does not exist. If the callable exists, it is returned;
if getattr() is to be used a tuple in the format (model,
attr_name) is returned."""
func = getattr(model, "get_%s" % attr_name, None)
if callable(func):
log.info('kgetattr based get_%s method is deprecated, '
'replace it with a property' % attr_name)
return func
else:
return (model, attr_name)
def get_default_setter(model, attr_name, cache):
"""Obtains from model a callable through which attr_name can be
set. This callable is an accessor named set_foo, where
foo is the value of attr_name, or setattr(model, foo, value) if the
accessor does not exist. If the callable exists, it is returned;
if setattr() is to be used a tuple in the format (model,
attr_name) is returned."""
func = getattr(model, "set_%s" % attr_name, None)
if callable(func):
log.info('ksetattr based set_%s method is deprecated, '
'replace it with a property' % attr_name)
return func
else:
return (model, attr_name)
# The _*_cache dictionaries cache the objects, attributes and callables
# (called `accessor tuples' here) we retrieve values from. If possible,
# we use weakrefs to avoid holding hard references to objects, allowing
# them to be garbage collected. Certain objects (ZODB.Persistent for
# one) cannot be weakref()ed and *will* leak - be sure to call
# clear_attr_cache() if you need them released.
#
# Key structure:
# (objref_or_weakref, attrname)
#
# Value structure (accessor tuples):
#
# kgetattr: (access_code, data1, data2)
# ksetattr: (access_code, data1, data2, value_mode)
#
# Access codes:
#
# 0: data1() unbound methods and functions (data2 is None)
# 1: data2(data1()) bound methods and weakref
# 2: getattr(data1(), data2) using straight getattr and weakref
# 3: data2(data1) bound methods (no weakref)
# 4: getattr(data1, data2) using straight getattr (no weakref)
import weakref
_kgetattr_cache = {}
_kgetattr_wref = {}
_ksetattr_cache = {}
_ksetattr_wref = {}
class CacheControl(object):
__slots__ = ['key', 'cacheable']
def __init__(self, key):
self.key = key
self.cacheable = 1
def disable(self):
self.cacheable = 0
def invalidate(self):
key = self.key
if _kgetattr_cache.has_key(key):
del _kgetattr_cache[key]
if _ksetattr_cache.has_key(key):
del _ksetattr_cache[key]
class _AttrUnset:
# indicates an unset value since None needs to be used
pass
class DefaultValue(Exception):
"""
This can be raised in kgetattr accessors to indicate that the default
value should be used
"""
def kgetattr_guard(wref):
try:
key = _kgetattr_wref[id(wref)][0]
del _kgetattr_wref[id(wref)]
del _kgetattr_cache[key]
except KeyError:
# This path is used only when the program terminates.
pass
def ksetattr_guard(wref):
try:
key = _ksetattr_wref[id(wref)][0]
del _ksetattr_wref[id(wref)]
del _ksetattr_cache[key]
except KeyError:
# This path is used only when the program terminates.
pass
# 1. Break up attr_name into parts
# 2. Loop around main lookup code for each part:
# 2.1. Try and get accessor tuple out of cache
# 2.2. If not there, generate tuple from callable and store it
# 2.3. Use accessor tuple to grab value
# 2.4. Value wasn't found, return default or raise ValueError
# Use value as obj in next iteration
# 3. Return value
def kgetattr(model,
attr_name,
default=_AttrUnset,
flat=0,
# bind to local variables for speed:
ref=weakref.ref,
TupleType=types.TupleType,
MethodType=types.MethodType,
split=string.split,
kgetattr_guard=kgetattr_guard,
getattr=getattr,
dummycache=CacheControl((None,None)),
# constants:
# access opcodes:
LAMBDA_ACCESS = 0,
METHOD_ACCESS = 1,
TUPLE_ACCESS = 2,
NWR_METHOD_ACCESS = 3,
NWR_TUPLE_ACCESS = 4,
# FAST tuples do not store the object, as the input object
# is also the accesses object.
FAST_METHOD_ACCESS = 5,
FAST_TUPLE_ACCESS = 6,
):
"""Returns the value associated with the attribute in model
named by attr_name. If default is provided and model does not
have an attribute called attr_name, the default value is
returned. If flat=1 is specified, no dot path parsing will
be done."""
# 1. Break up attr_name into parts
if flat or "." not in attr_name:
names = [attr_name, ]
else:
try:
names = attr_name.split(".")
except AttributeError:
names = split(attr_name, ".")
# 2. Loop around main lookup code for each part:
obj = model
for name in names:
key = (id(obj), name)
# First time round, obj is the model. Every subsequent loop, obj
# is the subattribute value indicated by the current part in
# [names]. The last loop grabs the target value and returns it.
try:
# 2.1 Fetch the opcode tuple from the cache.
objref, icode, data1, data2 = _kgetattr_cache[key]
except KeyError:
# 2.2. If not there, generate tuple from callable and store it
try:
get_getter = obj.__class__.get_getter
cache = CacheControl(key)
except AttributeError:
# This is needed so that the check below if the result is
# cacheable can be done. The inbuilt get_getter always
# allows caching.
cache = dummycache
get_getter = None
func = getattr(obj, "get_%s" % name, None)
if callable(func):
log.info('kgetattr based get_%s method is deprecated, '
'replace it with a property' % name)
icode = FAST_METHOD_ACCESS
data1 = func.im_func
data2 = None
else:
icode = FAST_TUPLE_ACCESS
data1 = None
data2 = name
if get_getter is not None:
try:
func = get_getter(obj, name, cache)
except DefaultValue:
if default == _AttrUnset:
raise
return default
if isinstance(func, TupleType):
data1, data2 = func
if data1 == obj:
data1 = None
icode = FAST_TUPLE_ACCESS
else:
try:
data1 = ref(data1, kgetattr_guard)
_kgetattr_wref[id(data1)] = (key, data1)
icode = TUPLE_ACCESS
except TypeError:
icode = NWR_TUPLE_ACCESS
elif isinstance(func, MethodType):
data1 = func.im_func
data2 = func.im_self
if data2 == obj:
data2 = None
icode = FAST_METHOD_ACCESS
else:
try:
data2 = ref(func.im_self, kgetattr_guard)
_kgetattr_wref[id(data2)] = (key, data2)
icode = METHOD_ACCESS
except TypeError:
data2 = func.im_self
icode = NWR_METHOD_ACCESS
else:
icode = LAMBDA_ACCESS
data1 = func
data2 = None
if cache.cacheable:
# Store access opcode:
# objref or obj are used as a protection against id-aliasing
# as we use just a plain id(obj) in the cache entry key.
#
# We either have to use a weakref, so we get to know when the
# object dies. We just remove the cache entry containing the
# weakref, _kgetattr_wref is used to associate which key has
# to be killed for a given weakref.
try:
objref = ref(obj, kgetattr_guard)
_kgetattr_wref[id(objref)] = (key, objref)
_kgetattr_cache[key] = (objref, icode, data1, data2)
except TypeError:
# it's not weakrefable (probably ZODB!)
# store a hard reference.
_kgetattr_cache[key] = (obj, icode, data1, data2)
else:
if _kgetattr_cache.has_key(key):
del _kgetattr_cache[key]
# 2.3. Use accessor tuple to grab value
try:
if icode == FAST_METHOD_ACCESS:
obj = data1(obj)
elif icode == FAST_TUPLE_ACCESS:
obj = getattr(obj, data2, default)
if obj is _AttrUnset:
raise AttributeError(
"%r object has no attribute %r" % (obj, data2))
elif icode == TUPLE_ACCESS:
o = data1()
obj = getattr(o, data2, default)
if obj is _AttrUnset:
raise AttributeError(
"%r object has no attribute %r" % (o, data2))
elif icode == NWR_TUPLE_ACCESS:
obj = getattr(data1, data2)
elif icode == NWR_METHOD_ACCESS:
obj = data1(data2)
elif icode == METHOD_ACCESS:
obj = data1(data2())
elif icode == LAMBDA_ACCESS:
obj = data1()
else:
raise AssertionError("Unknown tuple type in _kgetattr_cache")
# 2.4. Value wasn't found, return default or raise ValueError
except DefaultValue:
if default == _AttrUnset:
raise
return default
# At the end of the iteration, the value retrieved becomes the new obj
# 3. Return value
return obj
# A general algo for ksetattr:
#
# 1. Use attr_name to kgetattr the target object, and get the real attribute
# 2. Try and get accessor tuple from cache
# 3. If not there, generate accessor tuple and store it
# 4. Set value to target object's attribute
def ksetattr(model,
attr_name,
value,
flat=0,
# bind to local variables for speed:
ref=weakref.ref,
TupleType=types.TupleType,
MethodType=types.MethodType,
ksetattr_guard=ksetattr_guard,
getattr=getattr,
dummycache=CacheControl((None,None)),
# constants:
LAMBDA_ACCESS = 0,
METHOD_ACCESS = 1,
TUPLE_ACCESS = 2,
NWR_METHOD_ACCESS = 3,
NWR_TUPLE_ACCESS = 4,
FAST_METHOD_ACCESS = 5,
FAST_TUPLE_ACCESS = 6,
):
"""Set the value associated with the attribute in model
named by attr_name. If flat=1 is specified, no dot path parsing will
be done."""
# 1. kgetattr the target object, and get the real attribute
# This is the only section which is special about ksetattr. When you
# set foo.bar.baz to "x", what you really want to do is get hold of
# foo.bar and use an accessor (set_baz/setattr) on it. This bit gets
# the attribute name and the model we want.
if not flat:
lastdot = string.rfind(attr_name, ".")
if lastdot != -1:
model = kgetattr(model, attr_name[:lastdot])
attr_name = attr_name[lastdot+1:]
# At this point we only have a flat attribute and the right model.
key = (id(model), attr_name)
try:
# 2. Try and get accessor tuple from cache
objref, icode, data1, data2 = _ksetattr_cache[key]
except KeyError:
# 3. If not there, generate accessor tuple and store it
# cache = CacheControl(key)
try:
get_setter = model.__class__.get_setter
cache = CacheControl(key)
except AttributeError:
# No get_setter found:
get_setter = None
# This is needed so the entry storing code can check if it's ok
# to cache.
cache = dummycache
func = getattr(model, "set_%s" % attr_name, None)
if callable(func):
log.info('ksetattr based set_%s method is deprecated, '
'replace it with a property' % attr_name)
icode = FAST_METHOD_ACCESS
data1 = func.im_func
data2 = None
else:
icode = FAST_TUPLE_ACCESS
data1 = None
data2 = attr_name
if get_setter is not None:
func = get_setter(model, attr_name, cache)
if isinstance(func, TupleType):
data1, data2 = func
if data1 == model:
data1 = None
icode = FAST_TUPLE_ACCESS
else:
try:
data1 = ref(data1, ksetattr_guard)
_ksetattr_wref[id(data1)] = (key, data1)
icode = TUPLE_ACCESS
except TypeError:
icode = NWR_TUPLE_ACCESS
elif isinstance(func, MethodType):
data1 = func.im_func
data2 = func.im_self
if data2 == model:
data2 = None
icode = FAST_METHOD_ACCESS
else:
try:
data2 = ref(data2, ksetattr_guard)
_ksetattr_wref[id(data2)] = (key, data2)
icode = METHOD_ACCESS
except TypeError:
data2 = func.im_self
icode = NWR_METHOD_ACCESS
else:
icode = LAMBDA_ACCESS
data1 = func
data2 = None
if cache.cacheable:
# store the access opcode.
# for the use of model/objref as first value in the opcode tuple
# see the kgetattr comments.
try:
objref = ref(model, ksetattr_guard)
_ksetattr_wref[id(objref)] = (key, objref)
_ksetattr_cache[key] = (objref, icode, data1, data2)
except TypeError:
# it's not weakref-able, store a hard reference.
_ksetattr_cache[key] = (model, icode, data1, data2)
else:
if _ksetattr_cache.has_key(key):
del _ksetattr_cache.has_key[key]
if icode == FAST_TUPLE_ACCESS:
setattr(model, data2, value)
elif icode == FAST_METHOD_ACCESS:
data1(model, value)
elif icode == TUPLE_ACCESS:
setattr(data1(), data2, value)
elif icode == NWR_TUPLE_ACCESS:
setattr(data1, data2, value)
elif icode == NWR_METHOD_ACCESS:
data1(data2, value)
elif icode == METHOD_ACCESS:
data1(data2(), value)
elif icode == LAMBDA_ACCESS:
data1(value)
else:
raise AssertionError("Unknown tuple type in _ksetattr_cache")
def enable_attr_cache():
"""Enables the use of the kgetattr cache when using Python
versions that do not support weakrefs (1.5.x and earlier). Be
warned, using the cache in these versions causes leaked
references to accessor methods and models!"""
global _kgetattr_cache, _ksetattr_cache, _kgetattr_wref, _ksetattr_wref
_kgetattr_cache = {}
_ksetattr_cache = {}
_kgetattr_wref = {}
_ksetattr_wref = {}
def clear_attr_cache():
"""Clears the kgetattr cache. It must be called repeatedly to
avoid memory leaks in Python 2.0 and earlier."""
global _kgetattr_cache, _ksetattr_cache, _kgetattr_wref, _ksetattr_wref
_kgetattr_cache = {}
_ksetattr_cache = {}
_kgetattr_wref = {}
_ksetattr_wref = {}
| gpl-3.0 | 1,366,487,333,307,515,400 | 36.371663 | 78 | 0.554451 | false |
whip112/Whip112 | vendor/packages/translate/misc/optrecurse.py | 24 | 32471 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2002-2006 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
import fnmatch
import logging
import optparse
import os.path
import re
import sys
import traceback
from cStringIO import StringIO
from translate import __version__
from translate.misc import progressbar
class ManPageOption(optparse.Option, object):
ACTIONS = optparse.Option.ACTIONS + ("manpage",)
def take_action(self, action, dest, opt, value, values, parser):
"""take_action that can handle manpage as well as standard actions"""
if action == "manpage":
parser.print_manpage()
sys.exit(0)
return super(ManPageOption, self).take_action(action, dest, opt, value,
values, parser)
class ManHelpFormatter(optparse.HelpFormatter):
def __init__(self,
indent_increment=0,
max_help_position=0,
width=80,
short_first=1):
optparse.HelpFormatter.__init__(
self, indent_increment, max_help_position, width, short_first)
def format_option_strings(self, option):
"""Return a comma-separated list of option strings & metavariables."""
if option.takes_value():
metavar = option.metavar or option.dest.upper()
metavar = '\\fI%s\\fP' % metavar
short_opts = [sopt + metavar for sopt in option._short_opts]
long_opts = [lopt + "\\fR=\\fP" + metavar for lopt in option._long_opts]
else:
short_opts = option._short_opts
long_opts = option._long_opts
if self.short_first:
opts = short_opts + long_opts
else:
opts = long_opts + short_opts
return '\\fB%s\\fP' % ("\\fR, \\fP".join(opts))
class RecursiveOptionParser(optparse.OptionParser, object):
"""A specialized Option Parser for recursing through directories."""
def __init__(self, formats, usetemplates=False, allowmissingtemplate=False,
description=None):
"""Construct the specialized Option Parser.
:type formats: Dictionary
:param formats: See :meth:`~.RecursiveOptionParser.setformats`
for an explanation of the formats parameter.
"""
optparse.OptionParser.__init__(self, version="%prog " + __version__.sver,
description=description)
self.setmanpageoption()
self.setprogressoptions()
self.seterrorleveloptions()
self.setformats(formats, usetemplates)
self.passthrough = []
self.allowmissingtemplate = allowmissingtemplate
logging.basicConfig(format="%(name)s: %(levelname)s: %(message)s")
def get_prog_name(self):
return os.path.basename(sys.argv[0])
def setmanpageoption(self):
"""creates a manpage option that allows the optionparser to generate a
manpage"""
manpageoption = ManPageOption(None, "--manpage", dest="manpage",
default=False, action="manpage",
help="output a manpage based on the help")
self.define_option(manpageoption)
def format_manpage(self):
"""returns a formatted manpage"""
result = []
prog = self.get_prog_name()
formatprog = lambda x: x.replace("%prog", prog)
formatToolkit = lambda x: x.replace("%prog", "Translate Toolkit")
result.append('.\\" Autogenerated manpage\n')
result.append('.TH %s 1 "%s" "" "%s"\n' % (prog,
formatToolkit(self.version),
formatToolkit(self.version)))
result.append('.SH NAME\n')
result.append('%s \\- %s\n' % (self.get_prog_name(),
self.description.split('\n\n')[0]))
result.append('.SH SYNOPSIS\n')
result.append('.PP\n')
usage = "\\fB%prog "
usage += " ".join([self.getusageman(option) for option in self.option_list])
usage += "\\fP"
result.append('%s\n' % formatprog(usage))
description_lines = self.description.split('\n\n')[1:]
if description_lines:
result.append('.SH DESCRIPTION\n')
result.append('\n\n'.join([re.sub('\.\. note::', 'Note:', l)
for l in description_lines]))
result.append('.SH OPTIONS\n')
ManHelpFormatter().store_option_strings(self)
result.append('.PP\n')
for option in self.option_list:
result.append('.TP\n')
result.append('%s\n' % str(option).replace('-', '\-'))
result.append('%s\n' % option.help.replace('-', '\-'))
return "".join(result)
def print_manpage(self, file=None):
"""outputs a manpage for the program using the help information"""
if file is None:
file = sys.stdout
file.write(self.format_manpage())
def set_usage(self, usage=None):
"""sets the usage string - if usage not given, uses getusagestring for
each option"""
if usage is None:
self.usage = "%prog " + " ".join([self.getusagestring(option) for option in self.option_list])
else:
super(RecursiveOptionParser, self).set_usage(usage)
def warning(self, msg, options=None, exc_info=None):
"""Print a warning message incorporating 'msg' to stderr and exit."""
if options:
if options.errorlevel == "traceback":
errorinfo = "\n".join(traceback.format_exception(exc_info[0],
exc_info[1], exc_info[2]))
elif options.errorlevel == "exception":
errorinfo = "\n".join(traceback.format_exception_only(exc_info[0], exc_info[1]))
elif options.errorlevel == "message":
errorinfo = str(exc_info[1])
else:
errorinfo = ""
if errorinfo:
msg += ": " + errorinfo
logging.getLogger(self.get_prog_name()).warning(msg)
def getusagestring(self, option):
"""returns the usage string for the given option"""
optionstring = "|".join(option._short_opts + option._long_opts)
if getattr(option, "optionalswitch", False):
optionstring = "[%s]" % optionstring
if option.metavar:
optionstring += " " + option.metavar
if getattr(option, "required", False):
return optionstring
else:
return "[%s]" % optionstring
def getusageman(self, option):
"""returns the usage string for the given option"""
optionstring = "\\fR|\\fP".join(option._short_opts + option._long_opts)
if getattr(option, "optionalswitch", False):
optionstring = "\\fR[\\fP%s\\fR]\\fP" % optionstring
if option.metavar:
optionstring += " \\fI%s\\fP" % option.metavar
if getattr(option, "required", False):
return optionstring
else:
return "\\fR[\\fP%s\\fR]\\fP" % optionstring
def define_option(self, option):
"""Defines the given option, replacing an existing one of the same short
name if neccessary..."""
for short_opt in option._short_opts:
if self.has_option(short_opt):
self.remove_option(short_opt)
for long_opt in option._long_opts:
if self.has_option(long_opt):
self.remove_option(long_opt)
self.add_option(option)
def setformats(self, formats, usetemplates):
"""Sets the format options using the given format dictionary.
:type formats: Dictionary
:param formats: The dictionary *keys* should be:
- Single strings (or 1-tuples) containing an
input format (if not *usetemplates*)
- Tuples containing an input format and
template format (if *usetemplates*)
- Formats can be *None* to indicate what to do
with standard input
The dictionary *values* should be tuples of
outputformat (string) and processor method.
"""
inputformats = []
outputformats = []
templateformats = []
self.outputoptions = {}
self.usetemplates = usetemplates
for formatgroup, outputoptions in formats.iteritems():
if isinstance(formatgroup, (str, unicode)) or formatgroup is None:
formatgroup = (formatgroup, )
if not isinstance(formatgroup, tuple):
raise ValueError("formatgroups must be tuples or None/str/unicode")
if len(formatgroup) < 1 or len(formatgroup) > 2:
raise ValueError("formatgroups must be tuples of length 1 or 2")
if len(formatgroup) == 1:
formatgroup += (None, )
inputformat, templateformat = formatgroup
if not isinstance(outputoptions, tuple) or len(outputoptions) != 2:
raise ValueError("output options must be tuples of length 2")
outputformat, processor = outputoptions
if not inputformat in inputformats:
inputformats.append(inputformat)
if not outputformat in outputformats:
outputformats.append(outputformat)
if not templateformat in templateformats:
templateformats.append(templateformat)
self.outputoptions[(inputformat, templateformat)] = (outputformat, processor)
self.inputformats = inputformats
inputformathelp = self.getformathelp(inputformats)
inputoption = optparse.Option("-i", "--input", dest="input",
default=None, metavar="INPUT",
help="read from INPUT in %s" % (inputformathelp))
inputoption.optionalswitch = True
inputoption.required = True
self.define_option(inputoption)
excludeoption = optparse.Option("-x", "--exclude", dest="exclude",
action="append", type="string", metavar="EXCLUDE",
default=["CVS", ".svn", "_darcs", ".git", ".hg", ".bzr"],
help="exclude names matching EXCLUDE from input paths")
self.define_option(excludeoption)
outputformathelp = self.getformathelp(outputformats)
outputoption = optparse.Option("-o", "--output", dest="output",
default=None, metavar="OUTPUT",
help="write to OUTPUT in %s" % (outputformathelp))
outputoption.optionalswitch = True
outputoption.required = True
self.define_option(outputoption)
if self.usetemplates:
self.templateformats = templateformats
templateformathelp = self.getformathelp(self.templateformats)
templateoption = optparse.Option("-t", "--template",
dest="template", default=None, metavar="TEMPLATE",
help="read from TEMPLATE in %s" % (templateformathelp))
self.define_option(templateoption)
def setprogressoptions(self):
"""Sets the progress options."""
self.progresstypes = {
"none": progressbar.NoProgressBar,
"bar": progressbar.HashProgressBar,
"dots": progressbar.DotsProgressBar,
"names": progressbar.MessageProgressBar,
"verbose": progressbar.VerboseProgressBar,
}
progressoption = optparse.Option(None, "--progress", dest="progress",
default="bar",
choices=self.progresstypes.keys(), metavar="PROGRESS",
help="show progress as: %s" % (", ".join(self.progresstypes)))
self.define_option(progressoption)
def seterrorleveloptions(self):
"""Sets the errorlevel options."""
self.errorleveltypes = ["none", "message", "exception", "traceback"]
errorleveloption = optparse.Option(None, "--errorlevel",
dest="errorlevel", default="message",
choices=self.errorleveltypes, metavar="ERRORLEVEL",
help="show errorlevel as: %s" %
(", ".join(self.errorleveltypes)))
self.define_option(errorleveloption)
def getformathelp(self, formats):
"""Make a nice help string for describing formats..."""
formats = sorted(formats)
if None in formats:
formats = filter(lambda format: format is not None, formats)
if len(formats) == 0:
return ""
elif len(formats) == 1:
return "%s format" % (", ".join(formats))
else:
return "%s formats" % (", ".join(formats))
def isrecursive(self, fileoption, filepurpose='input'):
"""Checks if fileoption is a recursive file."""
if fileoption is None:
return False
elif isinstance(fileoption, list):
return True
else:
return os.path.isdir(fileoption)
def parse_args(self, args=None, values=None):
"""Parses the command line options, handling implicit input/output
args."""
(options, args) = super(RecursiveOptionParser, self).parse_args(args, values)
# some intelligent as to what reasonable people might give on the
# command line
if args and not options.input:
if len(args) > 1:
options.input = args[:-1]
args = args[-1:]
else:
options.input = args[0]
args = []
if args and not options.output:
options.output = args[-1]
args = args[:-1]
if args:
self.error("You have used an invalid combination of --input, --output and freestanding args")
if isinstance(options.input, list) and len(options.input) == 1:
options.input = options.input[0]
if options.input is None:
self.error("You need to give an inputfile or use - for stdin ; use --help for full usage instructions")
elif options.input == '-':
options.input = None
return (options, args)
def getpassthroughoptions(self, options):
"""Get the options required to pass to the filtermethod..."""
passthroughoptions = {}
for optionname in dir(options):
if optionname in self.passthrough:
passthroughoptions[optionname] = getattr(options, optionname)
return passthroughoptions
def getoutputoptions(self, options, inputpath, templatepath):
"""Works out which output format and processor method to use..."""
if inputpath:
inputbase, inputext = self.splitinputext(inputpath)
else:
inputext = None
if templatepath:
templatebase, templateext = self.splittemplateext(templatepath)
else:
templateext = None
if (inputext, templateext) in options.outputoptions:
return options.outputoptions[inputext, templateext]
elif (inputext, "*") in options.outputoptions:
outputformat, fileprocessor = options.outputoptions[inputext, "*"]
elif ("*", templateext) in options.outputoptions:
outputformat, fileprocessor = options.outputoptions["*", templateext]
elif ("*", "*") in options.outputoptions:
outputformat, fileprocessor = options.outputoptions["*", "*"]
elif (inputext, None) in options.outputoptions:
return options.outputoptions[inputext, None]
elif (None, templateext) in options.outputoptions:
return options.outputoptions[None, templateext]
elif ("*", None) in options.outputoptions:
outputformat, fileprocessor = options.outputoptions["*", None]
elif (None, "*") in options.outputoptions:
outputformat, fileprocessor = options.outputoptions[None, "*"]
else:
if self.usetemplates:
if inputext is None:
raise ValueError("don't know what to do with input format (no file extension), no template file")
elif templateext is None:
raise ValueError("don't know what to do with input format %s, no template file" %
(os.extsep + inputext))
else:
raise ValueError("don't know what to do with input format %s, template format %s" %
(os.extsep + inputext, os.extsep + templateext))
else:
raise ValueError("don't know what to do with input format %s" %
(os.extsep + inputext))
if outputformat == "*":
if inputext:
outputformat = inputext
elif templateext:
outputformat = templateext
elif ("*", "*") in options.outputoptions:
outputformat = None
else:
if self.usetemplates:
raise ValueError("don't know what to do with input format (no file extension), no template file")
else:
raise ValueError("don't know what to do with input format (no file extension)")
return outputformat, fileprocessor
def initprogressbar(self, allfiles, options):
"""Sets up a progress bar appropriate to the options and files."""
if options.progress in ('bar', 'verbose'):
self.progressbar = \
self.progresstypes[options.progress](0, len(allfiles))
# should use .getChild("progress") but that is only in 2.7
logger = logging.getLogger(self.get_prog_name() + ".progress")
logger.setLevel(logging.INFO)
logger.propagate = False
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
handler.setFormatter(logging.Formatter())
logger.addHandler(handler)
logger.info("processing %d files...", len(allfiles))
else:
self.progressbar = self.progresstypes[options.progress]()
def getfullinputpath(self, options, inputpath):
"""Gets the absolute path to an input file."""
if options.input:
return os.path.join(options.input, inputpath)
else:
return inputpath
def getfulloutputpath(self, options, outputpath):
"""Gets the absolute path to an output file."""
if options.recursiveoutput and options.output:
return os.path.join(options.output, outputpath)
else:
return outputpath
def getfulltemplatepath(self, options, templatepath):
"""Gets the absolute path to a template file."""
if not options.recursivetemplate:
return templatepath
elif (templatepath is not None and
self.usetemplates and options.template):
return os.path.join(options.template, templatepath)
else:
return None
def run(self):
"""Parses the arguments, and runs recursiveprocess with the resulting
options..."""
(options, args) = self.parse_args()
# this is so derived classes can modify the inputformats etc based on
# the options
options.inputformats = self.inputformats
options.outputoptions = self.outputoptions
self.recursiveprocess(options)
def recursiveprocess(self, options):
"""Recurse through directories and process files."""
if self.isrecursive(options.input, 'input') and getattr(options, "allowrecursiveinput", True):
if not self.isrecursive(options.output, 'output'):
if not options.output:
self.error(optparse.OptionValueError("No output directory given"))
try:
self.warning("Output directory does not exist. Attempting to create")
os.mkdir(options.output)
except IOError as e:
self.error(optparse.OptionValueError("Output directory does not exist, attempt to create failed"))
if isinstance(options.input, list):
inputfiles = self.recurseinputfilelist(options)
else:
inputfiles = self.recurseinputfiles(options)
else:
if options.input:
inputfiles = [os.path.basename(options.input)]
options.input = os.path.dirname(options.input)
else:
inputfiles = [options.input]
options.recursiveoutput = (self.isrecursive(options.output, 'output') and
getattr(options, "allowrecursiveoutput", True))
options.recursivetemplate = (self.usetemplates and
self.isrecursive(options.template, 'template') and
getattr(options, "allowrecursivetemplate", True))
self.initprogressbar(inputfiles, options)
for inputpath in inputfiles:
try:
templatepath = self.gettemplatename(options, inputpath)
# If we have a recursive template, but the template doesn't
# have this input file, let's drop it.
if (options.recursivetemplate and templatepath is None and
not self.allowmissingtemplate):
self.warning("No template at %s. Skipping %s." %
(templatepath, inputpath))
continue
outputformat, fileprocessor = self.getoutputoptions(options, inputpath, templatepath)
fullinputpath = self.getfullinputpath(options, inputpath)
fulltemplatepath = self.getfulltemplatepath(options,
templatepath)
outputpath = self.getoutputname(options, inputpath, outputformat)
fulloutputpath = self.getfulloutputpath(options, outputpath)
if options.recursiveoutput and outputpath:
self.checkoutputsubdir(options, os.path.dirname(outputpath))
except Exception as error:
if isinstance(error, KeyboardInterrupt):
raise
self.warning("Couldn't handle input file %s" %
inputpath, options, sys.exc_info())
continue
try:
success = self.processfile(fileprocessor, options,
fullinputpath, fulloutputpath,
fulltemplatepath)
except Exception as error:
if isinstance(error, KeyboardInterrupt):
raise
self.warning("Error processing: input %s, output %s, template %s" %
(fullinputpath, fulloutputpath,
fulltemplatepath), options, sys.exc_info())
success = False
self.reportprogress(inputpath, success)
del self.progressbar
def openinputfile(self, options, fullinputpath):
"""Opens the input file."""
if fullinputpath is None:
return sys.stdin
return open(fullinputpath, 'r')
def openoutputfile(self, options, fulloutputpath):
"""Opens the output file."""
if fulloutputpath is None:
return sys.stdout
return open(fulloutputpath, 'w')
def opentempoutputfile(self, options, fulloutputpath):
"""Opens a temporary output file."""
return StringIO()
def finalizetempoutputfile(self, options, outputfile, fulloutputpath):
"""Write the temp outputfile to its final destination."""
outputfile.reset()
outputstring = outputfile.read()
outputfile = self.openoutputfile(options, fulloutputpath)
outputfile.write(outputstring)
outputfile.close()
def opentemplatefile(self, options, fulltemplatepath):
"""Opens the template file (if required)."""
if fulltemplatepath is not None:
if os.path.isfile(fulltemplatepath):
return open(fulltemplatepath, 'r')
else:
self.warning("missing template file %s" % fulltemplatepath)
return None
def processfile(self, fileprocessor, options, fullinputpath,
fulloutputpath, fulltemplatepath):
"""Process an individual file."""
inputfile = self.openinputfile(options, fullinputpath)
if (fulloutputpath and
fulloutputpath in (fullinputpath, fulltemplatepath)):
outputfile = self.opentempoutputfile(options, fulloutputpath)
tempoutput = True
else:
outputfile = self.openoutputfile(options, fulloutputpath)
tempoutput = False
templatefile = self.opentemplatefile(options, fulltemplatepath)
passthroughoptions = self.getpassthroughoptions(options)
if fileprocessor(inputfile, outputfile, templatefile,
**passthroughoptions):
if tempoutput:
self.warning("writing to temporary output...")
self.finalizetempoutputfile(options, outputfile,
fulloutputpath)
return True
else:
# remove the file if it is a file (could be stdout etc)
if fulloutputpath and os.path.isfile(fulloutputpath):
outputfile.close()
os.unlink(fulloutputpath)
return False
def reportprogress(self, filename, success):
"""Shows that we are progressing..."""
self.progressbar.amount += 1
self.progressbar.show(filename)
def mkdir(self, parent, subdir):
"""Makes a subdirectory (recursively if neccessary)."""
if not os.path.isdir(parent):
raise ValueError("cannot make child directory %r if parent %r does not exist" %
(subdir, parent))
currentpath = parent
subparts = subdir.split(os.sep)
for part in subparts:
currentpath = os.path.join(currentpath, part)
if not os.path.isdir(currentpath):
os.mkdir(currentpath)
def checkoutputsubdir(self, options, subdir):
"""Checks to see if subdir under options.output needs to be created,
creates if neccessary."""
fullpath = os.path.join(options.output, subdir)
if not os.path.isdir(fullpath):
self.mkdir(options.output, subdir)
def isexcluded(self, options, inputpath):
"""Checks if this path has been excluded."""
basename = os.path.basename(inputpath)
for excludename in options.exclude:
if fnmatch.fnmatch(basename, excludename):
return True
return False
def recurseinputfilelist(self, options):
"""Use a list of files, and find a common base directory for them."""
# find a common base directory for the files to do everything
# relative to
commondir = os.path.dirname(os.path.commonprefix(options.input))
inputfiles = []
for inputfile in options.input:
if self.isexcluded(options, inputfile):
continue
if inputfile.startswith(commondir + os.sep):
inputfiles.append(inputfile.replace(commondir + os.sep, "", 1))
else:
inputfiles.append(inputfile.replace(commondir, "", 1))
options.input = commondir
return inputfiles
def recurseinputfiles(self, options):
"""Recurse through directories and return files to be processed."""
dirstack = ['']
join = os.path.join
inputfiles = []
while dirstack:
top = dirstack.pop(-1)
names = os.listdir(join(options.input, top))
dirs = []
for name in names:
inputpath = join(top, name)
if self.isexcluded(options, inputpath):
continue
fullinputpath = self.getfullinputpath(options, inputpath)
# handle directories...
if os.path.isdir(fullinputpath):
dirs.append(inputpath)
elif os.path.isfile(fullinputpath):
if not self.isvalidinputname(options, name):
# only handle names that match recognized input
# file extensions
continue
inputfiles.append(inputpath)
# make sure the directories are processed next time round.
dirs.reverse()
dirstack.extend(dirs)
return inputfiles
def splitext(self, pathname):
"""Splits *pathname* into name and ext, and removes the extsep.
:param pathname: A file path
:type pathname: string
:return: root, ext
:rtype: tuple
"""
root, ext = os.path.splitext(pathname)
ext = ext.replace(os.extsep, "", 1)
return (root, ext)
def splitinputext(self, inputpath):
"""Splits an *inputpath* into name and extension."""
return self.splitext(inputpath)
def splittemplateext(self, templatepath):
"""Splits a *templatepath* into name and extension."""
return self.splitext(templatepath)
def templateexists(self, options, templatepath):
"""Returns whether the given template exists..."""
fulltemplatepath = self.getfulltemplatepath(options, templatepath)
return os.path.isfile(fulltemplatepath)
def gettemplatename(self, options, inputname):
"""Gets an output filename based on the input filename."""
if not self.usetemplates:
return None
if not inputname or not options.recursivetemplate:
return options.template
inputbase, inputext = self.splitinputext(inputname)
if options.template:
for inputext1, templateext1 in options.outputoptions:
if inputext == inputext1:
if templateext1:
templatepath = inputbase + os.extsep + templateext1
if self.templateexists(options, templatepath):
return templatepath
if "*" in options.inputformats:
for inputext1, templateext1 in options.outputoptions:
if (inputext == inputext1) or (inputext1 == "*"):
if templateext1 == "*":
templatepath = inputname
if self.templateexists(options, templatepath):
return templatepath
elif templateext1:
templatepath = inputbase + os.extsep + templateext1
if self.templateexists(options, templatepath):
return templatepath
return None
def getoutputname(self, options, inputname, outputformat):
"""Gets an output filename based on the input filename."""
if not inputname or not options.recursiveoutput:
return options.output
inputbase, inputext = self.splitinputext(inputname)
outputname = inputbase
if outputformat:
outputname += os.extsep + outputformat
return outputname
def isvalidinputname(self, options, inputname):
"""Checks if this is a valid input filename."""
inputbase, inputext = self.splitinputext(inputname)
return ((inputext in options.inputformats) or
("*" in options.inputformats))
| mpl-2.0 | -1,564,789,509,277,742,800 | 43.664374 | 118 | 0.585753 | false |
emijrp/pywikibot-core | scripts/login.py | 2 | 4478 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Script to log the bot in to a wiki account.
Suggestion is to make a special account to use for bot use only. Make
sure this bot account is well known on your home wiki before using.
Parameters:
-family:FF
-lang:LL Log in to the LL language of the FF family.
Example: -family:wiktionary -lang:fr will log you in at
fr.wiktionary.org.
-all Try to log in on all sites where a username is defined in
user-config.py.
-logout Log out of the curren site. Combine with -all to log out of
all sites, or with -family and -lang to log out of a specific
site.
-force Ignores if the user is already logged in, and tries to log in.
-pass Useful in combination with -all when you have accounts for
several sites and use the same password for all of them.
Asks you for the password, then logs in on all given sites.
-pass:XXXX Uses XXXX as password. Be careful if you use this
parameter because your password will be shown on your
screen, and will probably be saved in your command line
history. This is NOT RECOMMENDED for use on computers
where others have either physical or remote access.
Use -pass instead.
-sysop Log in with your sysop account.
If not given as parameter, the script will ask for your username and
password (password entry will be hidden), log in to your home wiki using
this combination, and store the resulting cookies (containing your password
hash, so keep it secured!) in a file in the data subdirectory.
All scripts in this library will be looking for this cookie file and will
use the login information if it is present.
To log out, throw away the *.lwp file that is created in the data
subdirectory.
"""
#
# (C) Rob W.W. Hooft, 2003
# (C) Pywikibot team, 2003-2014
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__version__ = '$Id$'
#
import pywikibot
from os.path import join
from pywikibot import config
from pywikibot.exceptions import SiteDefinitionError
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
password = None
sysop = False
logall = False
logout = False
for arg in pywikibot.handle_args(args):
if arg.startswith("-pass"):
if len(arg) == 5:
password = pywikibot.input(u'Password for all accounts (no characters will be shown):',
password=True)
else:
password = arg[6:]
elif arg == "-sysop":
sysop = True
elif arg == "-all":
logall = True
elif arg == "-force":
pywikibot.output(u"To force a re-login, please delete the revelant "
u"lines from '%s' (or the entire file) and try again." %
join(config.base_dir, 'pywikibot.lwp'))
elif arg == "-logout":
logout = True
else:
pywikibot.showHelp('login')
return
if logall:
if sysop:
namedict = config.sysopnames
else:
namedict = config.usernames
else:
site = pywikibot.Site()
namedict = {site.family.name: {site.code: None}}
for familyName in namedict:
for lang in namedict[familyName]:
try:
site = pywikibot.Site(code=lang, fam=familyName)
if logout:
site.logout()
else:
site.login(sysop)
user = site.user()
if user:
pywikibot.output(u"Logged in on %(site)s as %(user)s." % locals())
else:
if logout:
pywikibot.output(u"Logged out of %(site)s." % locals())
else:
pywikibot.output(u"Not logged in on %(site)s." % locals())
except SiteDefinitionError:
pywikibot.output(u'%s.%s is not a valid site, please remove it'
u' from your config' % (lang, familyName))
if __name__ == "__main__":
main()
| mit | -651,188,939,974,501,400 | 33.713178 | 103 | 0.577267 | false |
DPaaS-Raksha/horizon | openstack_dashboard/dashboards/admin/users/views.py | 7 | 4104 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import operator
from django.core.urlresolvers import reverse, reverse_lazy
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.debug import sensitive_post_parameters
from horizon import exceptions
from horizon import forms
from horizon import tables
from openstack_dashboard import api
from .forms import CreateUserForm, UpdateUserForm
from .tables import UsersTable
class IndexView(tables.DataTableView):
table_class = UsersTable
template_name = 'admin/users/index.html'
def get_data(self):
users = []
try:
users = api.keystone.user_list(self.request)
except:
exceptions.handle(self.request,
_('Unable to retrieve user list.'))
return users
class UpdateView(forms.ModalFormView):
form_class = UpdateUserForm
template_name = 'admin/users/update.html'
success_url = reverse_lazy('horizon:admin:users:index')
@method_decorator(sensitive_post_parameters('password',
'confirm_password'))
def dispatch(self, *args, **kwargs):
return super(UpdateView, self).dispatch(*args, **kwargs)
def get_object(self):
if not hasattr(self, "_object"):
try:
self._object = api.keystone.user_get(self.request,
self.kwargs['user_id'],
admin=True)
except:
redirect = reverse("horizon:admin:users:index")
exceptions.handle(self.request,
_('Unable to update user.'),
redirect=redirect)
return self._object
def get_context_data(self, **kwargs):
context = super(UpdateView, self).get_context_data(**kwargs)
context['user'] = self.get_object()
return context
def get_initial(self):
user = self.get_object()
return {'id': user.id,
'name': user.name,
'tenant_id': getattr(user, 'tenantId', None),
'email': user.email}
class CreateView(forms.ModalFormView):
form_class = CreateUserForm
template_name = 'admin/users/create.html'
success_url = reverse_lazy('horizon:admin:users:index')
@method_decorator(sensitive_post_parameters('password',
'confirm_password'))
def dispatch(self, *args, **kwargs):
return super(CreateView, self).dispatch(*args, **kwargs)
def get_form_kwargs(self):
kwargs = super(CreateView, self).get_form_kwargs()
try:
roles = api.keystone.role_list(self.request)
except:
redirect = reverse("horizon:admin:users:index")
exceptions.handle(self.request,
_("Unable to retrieve user roles."),
redirect=redirect)
roles.sort(key=operator.attrgetter("id"))
kwargs['roles'] = roles
return kwargs
def get_initial(self):
default_role = api.keystone.get_default_role(self.request)
return {'role_id': getattr(default_role, "id", None)}
| apache-2.0 | -3,582,051,108,274,903,000 | 35.642857 | 78 | 0.61501 | false |
huguesv/PTVS | Python/Product/Miniconda/Miniconda3-x64/Lib/turtledemo/fractalcurves.py | 14 | 3473 | #!/usr/bin/env python3
""" turtle-example-suite:
tdemo_fractalCurves.py
This program draws two fractal-curve-designs:
(1) A hilbert curve (in a box)
(2) A combination of Koch-curves.
The CurvesTurtle class and the fractal-curve-
methods are taken from the PythonCard example
scripts for turtle-graphics.
"""
from turtle import *
from time import sleep, perf_counter as clock
class CurvesTurtle(Pen):
# example derived from
# Turtle Geometry: The Computer as a Medium for Exploring Mathematics
# by Harold Abelson and Andrea diSessa
# p. 96-98
def hilbert(self, size, level, parity):
if level == 0:
return
# rotate and draw first subcurve with opposite parity to big curve
self.left(parity * 90)
self.hilbert(size, level - 1, -parity)
# interface to and draw second subcurve with same parity as big curve
self.forward(size)
self.right(parity * 90)
self.hilbert(size, level - 1, parity)
# third subcurve
self.forward(size)
self.hilbert(size, level - 1, parity)
# fourth subcurve
self.right(parity * 90)
self.forward(size)
self.hilbert(size, level - 1, -parity)
# a final turn is needed to make the turtle
# end up facing outward from the large square
self.left(parity * 90)
# Visual Modeling with Logo: A Structural Approach to Seeing
# by James Clayson
# Koch curve, after Helge von Koch who introduced this geometric figure in 1904
# p. 146
def fractalgon(self, n, rad, lev, dir):
import math
# if dir = 1 turn outward
# if dir = -1 turn inward
edge = 2 * rad * math.sin(math.pi / n)
self.pu()
self.fd(rad)
self.pd()
self.rt(180 - (90 * (n - 2) / n))
for i in range(n):
self.fractal(edge, lev, dir)
self.rt(360 / n)
self.lt(180 - (90 * (n - 2) / n))
self.pu()
self.bk(rad)
self.pd()
# p. 146
def fractal(self, dist, depth, dir):
if depth < 1:
self.fd(dist)
return
self.fractal(dist / 3, depth - 1, dir)
self.lt(60 * dir)
self.fractal(dist / 3, depth - 1, dir)
self.rt(120 * dir)
self.fractal(dist / 3, depth - 1, dir)
self.lt(60 * dir)
self.fractal(dist / 3, depth - 1, dir)
def main():
ft = CurvesTurtle()
ft.reset()
ft.speed(0)
ft.ht()
ft.getscreen().tracer(1,0)
ft.pu()
size = 6
ft.setpos(-33*size, -32*size)
ft.pd()
ta=clock()
ft.fillcolor("red")
ft.begin_fill()
ft.fd(size)
ft.hilbert(size, 6, 1)
# frame
ft.fd(size)
for i in range(3):
ft.lt(90)
ft.fd(size*(64+i%2))
ft.pu()
for i in range(2):
ft.fd(size)
ft.rt(90)
ft.pd()
for i in range(4):
ft.fd(size*(66+i%2))
ft.rt(90)
ft.end_fill()
tb=clock()
res = "Hilbert: %.2fsec. " % (tb-ta)
sleep(3)
ft.reset()
ft.speed(0)
ft.ht()
ft.getscreen().tracer(1,0)
ta=clock()
ft.color("black", "blue")
ft.begin_fill()
ft.fractalgon(3, 250, 4, 1)
ft.end_fill()
ft.begin_fill()
ft.color("red")
ft.fractalgon(3, 200, 4, -1)
ft.end_fill()
tb=clock()
res += "Koch: %.2fsec." % (tb-ta)
return res
if __name__ == '__main__':
msg = main()
print(msg)
mainloop()
| apache-2.0 | 1,199,790,521,574,341,000 | 24.166667 | 83 | 0.553124 | false |
codificat/sos | sos/plugins/smartcard.py | 5 | 1355 | # Copyright (C) 2007 Sadique Puthen <[email protected]>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from sos.plugins import Plugin, RedHatPlugin
class Smartcard(Plugin, RedHatPlugin):
"""PKCS#11 smart cards
"""
plugin_name = 'smartcard'
profiles = ('security', 'identity', 'hardware')
files = ('/etc/pam_pkcs11/pam_pkcs11.conf',)
packages = ('pam_pkcs11',)
def setup(self):
self.add_copy_spec([
"/etc/reader.conf",
"/etc/reader.conf.d/",
"/etc/pam_pkcs11/"])
self.add_cmd_output([
"pkcs11_inspect debug",
"pklogin_finder debug",
"ls -nl /usr/lib*/pam_pkcs11/"
])
# vim: et ts=4 sw=4
| gpl-2.0 | 867,349,903,535,783,000 | 32.04878 | 70 | 0.665683 | false |
gogo40/GoGo-Pokerbot | h_power_calc.py | 1 | 4342 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# GoGo - Pokerbot
# Copyright 2015 Péricles Lopes Machado
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# h power calculator
#
# A very simple heuristic to evaluate the hand quality
# Ranks:
# c = copas
# s = espadas
# h = paus
# d = ouros
# Values:
# 2 .. 9
# T = 10
# J, Q, K, A
# Unknown = __
import timeit
from pokereval import PokerEval
__author__ = 'gogo40'
pe = PokerEval()
ranks_ = ["h", "c", "d", "s"]
game_ = "holdem"
iterations_ = 5000000
dead_ = []
#######################################
# h_power calculator
def h_power_calc(result_):
total_ = 0.0
ev_ = []
for r in result_['eval']:
v = float(r['ev'])
ev_.append(v)
total_ = total_ + v
threshold_ = 100.0 / n_players_
id = 1
P = []
H = []
for e in ev_:
p = (100.0 * e) / total_
h = p / threshold_
print "p[%d] = %.02f%%" % (id, p)
print "H[%d] = %.02f" % (id, h)
P.append(p)
H.append(h)
id = id + 1
return (P, H, threshold_)
#######################################
def read_raw_data(msg):
v = 0
while True:
try:
v = raw_input(msg)
break
except:
print "Invalid input!"
return v
#######################################
def read_data(msg):
v = 0
while True:
try:
v = input(msg)
break
except:
print "Invalid input!"
return v
############################################
def read_card(msg):
cn = -1
c = ""
while True:
try:
c = read_raw_data(msg)
cn = pe.string2card(c)
break
except:
print c, " isn't a card!"
return c
############################################
print "deck = %s\n" % pe.card2string(pe.deck())
print "---------------------------------------"
n_players_ = read_data("N players> ")
big_blind_ = read_data("Big Blind> ")
print "---------------------------------------"
print "cards> 2, ... , 9, T, J, Q, K, A"
print "ranks> %s" % ranks_
player_hand_ = []
for i in range(1, 3):
player_hand_.append(read_card("Player card %d> " % (i)))
pockets_ = [player_hand_]
print player_hand_
print "---------------------------------------"
n_known_cards_ = input("Number of known hands> ")
for i in range(1, n_known_cards_ + 1):
c1 = read_card("\t(%d) CARD1>" % (i))
c2 = read_card("\t(%d) CARD2>" % (i))
print "\n"
pockets_.append([c1, c2])
for i in range(n_known_cards_ + 1, n_players_):
pockets_.append(["__", "__"])
print "Pockets> "
print pockets_
print "---------------------------------------"
print "cards> 2, ... , 9, T, J, Q, K, A"
print "ranks> %s" % ranks_
known_board_ = []
for n_known_cards_ in [0, 3, 1, 1]:
n = len(known_board_)
for i in range(1, n_known_cards_ + 1):
c = read_card("Board card %d> " % (n + i))
known_board_.append(c)
board_ = []
for v in known_board_:
board_.append(v)
for i in range(len(known_board_), 5):
board_.append("__")
print "Player hand> "
print player_hand_
print "Known Board> "
print known_board_
print "Board> "
print board_
print "---------------------------------------"
time_start = timeit.default_timer()
result_ = pe.poker_eval(game=game_, pockets=pockets_, dead=dead_, board=board_, iterations=iterations_)
time_end = timeit.default_timer()
(P, H, threshold) = h_power_calc(result_)
print "Player win probability: P = %.02f" % P[0]
print "Player power: H = %.02f" % H[0]
print "Threshold: threshold = %.02f" % threshold
print "Big blind: %.02f" % big_blind_
print "Bid: %.02f" % (big_blind_ * H[0])
print "Processing time elapsed: %.06f s" % (time_end - time_start)
| apache-2.0 | -8,282,153,635,375,138,000 | 21.609375 | 107 | 0.504262 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.